code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
from django.views.generic.detail import DetailView
from ratings.handlers import ratings
class VotedByView(DetailView):
"""
Can be used to render a list of users that voted a given object.
For example, you can add in your *urls.py* a view displaying all
users that voted a single active article::
from ratings.views.generic import VotedByView
urlpatterns = patterns('',
url(r'^(?P<slug>[-\w]+)/votes/$', VotedByView.as_view(
queryset=Article.objects.filter(is_active=True)),
name="article_voted_by"),
)
Two context variables will be present in the template:
- *object*: the voted article
- *votes*: all the Vote instances for that article
The default template suffix is ``'_voted_by'``, and so the template
used in our example is ``article_voted_by.html``.
"""
select_related = 'user'
context_votes_name = 'votes'
template_name_suffix = '_voted_by'
def get_context_votes_name(self, obj):
"""
Get the name to use for the votes.
"""
return self.context_votes_name
def get_votes(self, obj, request):
"""
Return a queryset of votes given to *obj*.
"""
queryset = self.handler.get_votes_for(obj)
if self.select_related:
queryset = queryset.select_related(self.select_related)
return queryset
def get(self, request, **kwargs):
self.object = self.get_object()
self.handler = ratings.get_handler(self.object)
self.votes = self.get_votes(self.object, request)
kwargs = {
'object': self.object,
self.get_context_votes_name(self.object): self.votes,
}
context = self.get_context_data(**kwargs)
response = self.render_to_response(context)
# FIXME: try to avoid this workaround
if hasattr(response, 'render') and callable(response.render):
response.render()
return response | /redsolutioncms.django-generic-ratings-0.6.1.tar.gz/redsolutioncms.django-generic-ratings-0.6.1/ratings/views/generic.py | 0.569374 | 0.329796 | generic.py | pypi |
from django.core.urlresolvers import reverse
from menuproxy.utils import DoesNotDefined
class MenuProxy(object):
u"""Базовый класс, описывающий метод получения данных из модели для построения меню"""
def __init__(self, model=None, children_filter={}, children_exclude={},
ancestors_filter={}, ancestors_exclude={}, **other):
self.model = model
self.children_filter = children_filter
self.children_exclude = children_exclude
self.ancestors_filter = ancestors_filter
self.ancestors_exclude = ancestors_exclude
def title(self, object):
u"""Возвращает заголовок элемента"""
assert object is not DoesNotDefined, DoesNotDefined
if object is None:
return None
if not hasattr(object, 'title'):
return unicode(object)
if callable(object.title):
return object.title()
else:
return object.title
def url(self, object):
u"""Возвращает url элемента"""
assert object is not DoesNotDefined, DoesNotDefined
if object is None:
return None
return object.get_absolute_url()
def ancestors(self, object):
u"""Возвращает список родительских элементов, начиная с верхнего уровня"""
if object is None or object is DoesNotDefined:
return None
return object.get_ancestors().filter(**self.ancestors_filter).exclude(**self.ancestors_exclude)
def children(self, object):
u"""Возвращает список дочерних элементов.
Если object == None возвращает список элементов верхнего уровня"""
if object is DoesNotDefined:
object = None
return self.model.objects.filter(parent=object).filter(**self.children_filter).exclude(**self.children_exclude)
def lasy_children(self, object):
u"""Возвращает список дочерних элементов,
если объект не содержится в потомках выбранный элемент"""
return None
class FlatProxy(MenuProxy):
u"""Класс, описывающий метод получения данных из не древовидной модели. Отображает все элементы на верхнем уровне"""
def ancestors(self, object):
u"""Возвращает список родительских элементов, начиная с верхнего уровня"""
return None
def children(self, object):
u"""Возвращает список дочерних элементов.
Если object == None возвращает список элементов первого уровня.
force == False только при построении разворачивающегося меню и
только для элементов, не содержащих в потомках выбранный элемент"""
if object is None or object is DoesNotDefined:
return self.model.objects.filter(**self.children_filter).exclude(**self.children_exclude)
else:
return None
class EmptyProxy(MenuProxy):
u"""Класс, возвращающий пустой список дочерних и родительских элементов"""
def ancestors(self, object):
u"""Возвращает список родительских элементов, начиная с верхнего уровня"""
return None
def children(self, object):
u"""Возвращает список дочерних элементов.
Если object == None возвращает список элементов первого уровня"""
return None
class StaticUrlProxy(MenuProxy):
u"""Класс, возвращает указанный url"""
def __init__(self, url_text=None, get_url=None,
title_text=None, get_title=None, **other):
self.url_text = url_text
self.get_url = get_url
self.title_text = title_text
self.get_title = get_title
def title(self, object):
u"""Возвращает заголовок элемента"""
if self.get_title is None:
return self.title_text
from importpath import importpath
title = importpath(self.get_title)
if callable(title):
return title()
else:
return title
def url(self, object):
u"""Возвращает url элемента"""
if self.get_url is None:
return self.url_text
from importpath import importpath
url = importpath(self.get_url)
if callable(url):
return url()
else:
return url
def ancestors(self, object):
u"""Возвращает список родительских элементов, начиная с верхнего уровня"""
return None
def children(self, object):
u"""Возвращает список дочерних элементов.
Если object == None возвращает список элементов верхнего уровня"""
return None
class ReverseProxy(StaticUrlProxy):
u"""Класс, возвращает url по укананному именю view"""
def __init__(self, viewname, title_text=None, get_title=None,
args=None, kwargs=None, prefix=None, **other):
self.viewname = viewname
self.title_text = title_text
self.get_title = get_title
self.args = args
self.kwargs = kwargs
self.prefix = prefix
def url(self, object):
u"""Возвращает url элемента"""
return reverse(viewname=self.viewname, args=self.args, kwargs=self.kwargs,
prefix=self.prefix) | /redsolutioncms.django-menu-proxy-0.1.2.zip/redsolutioncms.django-menu-proxy-0.1.2/menuproxy/proxies.py | 0.41941 | 0.41253 | proxies.py | pypi |
from django import conf
from django.core.cache import cache
from importpath import importpath
METHODS = (
'replace', # Указывает, что объект point следует заменить объектом object
'insert', # Указывает, что к списку дочерних элементов inside-правила нужно добавить элемент object
'children', # Указывает, что к списку дочерних элементов inside-правила нужно добавить дочерние элементы object-а
)
def get_title(menu_proxy, object):
"""Correct value returned by menu_proxy.title function"""
result = menu_proxy.title(object)
if result is None:
return u''
return unicode(result)
def get_url(menu_proxy, object):
"""Correct value returned by menu_proxy.url function"""
result = menu_proxy.url(object)
if result is None:
return u''
return unicode(result)
def get_ancestors(menu_proxy, object):
"""Correct value returned by menu_proxy.ancestors function"""
result = menu_proxy.ancestors(object)
if result is None:
return []
return [value for value in result]
def get_children(menu_proxy, object, lasy):
"""
Call ``children`` or ``lasy_children`` function for ``menu_proxy``.
Pass to it ``object``.
Correct result.
"""
if lasy:
result = menu_proxy.lasy_children(object)
else:
result = menu_proxy.children(object)
if result is None:
return []
return [value for value in result]
class DoesNotDefined(object):
"""
Class to indicate that value was not pressend in rule.
"""
pass
def try_to_import(value, exception_text):
"""
If ``value`` is not None and is not DoesNotDefined
then try to import specified by ``value`` path.
"""
if value is not DoesNotDefined and value is not None:
return importpath(value, exception_text)
return value
def get_rules():
"""Return dictionary of rules with settings"""
rules = cache.get('menuproxy.rules', None)
if rules is not None:
return rules
rules = {}
sequence = {None: []}
def add_to_sequence(rule, value):
if rule not in sequence:
sequence[rule] = []
sequence[rule].append(value)
rules[None] = MenuRule(name=None, method='replace', proxy=None, rules=rules)
for kwargs in getattr(conf.settings, 'MENU_PROXY_RULES', []):
rule = MenuRule(rules=rules, **kwargs)
rules[rule.name] = rule
add_to_sequence(rule.name, rule.name)
add_to_sequence(rule.inside, rule.name)
for name, rule in rules.iteritems():
rule.sequence = [rules[item] for item in sequence[name]]
cache.set('menuproxy.rules', rules)
return rules
def get_front_page(rules):
"""If MENU_PROXY_FRONT_PAGED is True and there is front page return MenuItem for it"""
front_page = cache.get('menuproxy.front_page', DoesNotDefined)
if front_page is not DoesNotDefined:
return front_page
front_page = None
if getattr(conf.settings, 'MENU_PROXY_FRONT_PAGED', True):
root = MenuItem(None, DoesNotDefined)
children = root.children(False)
if children:
front_page = children[0]
cache.set('menuproxy.front_page', front_page)
return front_page
class MenuRule(object):
"""Rule"""
def __init__(self, name, method, proxy, rules, inside=None,
model=DoesNotDefined, point=DoesNotDefined, object=DoesNotDefined,
point_function=DoesNotDefined, object_function=DoesNotDefined, **other):
self.name = name
self.method = method
assert self.method in METHODS, 'menuproxy does`t support method: %s' % self.method
self.inside = inside
self.model = try_to_import(model, 'model class')
self.point = try_to_import(point, 'mount point')
if callable(self.point) and self.point is not DoesNotDefined:
self.point = self.point()
if self.point is DoesNotDefined:
self.point_function = try_to_import(point_function, 'mount point function')
else:
self.point_function = DoesNotDefined
self.object = try_to_import(object, 'mount object')
if callable(self.object) and self.object is not DoesNotDefined:
self.object = self.object()
if self.object is DoesNotDefined:
self.object_function = try_to_import(object_function, 'mount object function')
else:
self.object_function = DoesNotDefined
self.proxy = try_to_import(proxy, 'MenuProxy class')
other.update(self.__dict__)
if callable(self.proxy) and self.proxy is not DoesNotDefined:
self.proxy = self.proxy(**other)
self.rules = rules
self.sequence = []
def _get_point(self, object, forward):
if self.point is not DoesNotDefined:
return self.point
elif self.point_function is not DoesNotDefined:
return self.point_function(object, forward)
else:
return DoesNotDefined
def _get_object(self, object, forward):
if self.object is not DoesNotDefined:
return self.object
elif self.object_function is not DoesNotDefined:
return self.object_function(object, forward)
else:
return DoesNotDefined
def forward_point(self, object):
return self._get_point(object, True)
def backward_point(self, object):
return self._get_point(object, False)
def forward_object(self, object):
return self._get_object(object, True)
def backward_object(self, object):
return self._get_object(object, False)
class MenuItem(object):
"""Objects of this class will be send to templates. Class provide to walk through nested rules"""
active = False
current = False
def __init__(self, name=None, object=None):
if isinstance(object, MenuItem):
self.rules = object.rules
self.name, self.object = object.name, object.object
else:
self.rules = get_rules()
for rule in self.rules[name].sequence:
if rule.name != name and rule.method == 'replace':
point = rule.forward_point(object)
if point is DoesNotDefined or point == object:
self.name, self.object = rule.name, rule.forward_object(object)
break
else:
self.name, self.object = name, object
self.front_paged_ancestors = False
def title(self):
"""Returns title for object"""
if hasattr(self, '_title'):
return getattr(self, '_title')
title = get_title(self.rules[self.name].proxy, self.object)
setattr(self, '_title', title)
return title
def url(self):
"""Returns url for object"""
if hasattr(self, '_url'):
return getattr(self, '_url')
url = get_url(self.rules[self.name].proxy, self.object)
setattr(self, '_url', url)
return url
def ancestors(self):
"""Returns ancestors for object, started from top level"""
if hasattr(self, '_ancestors'):
return getattr(self, '_ancestors')
ancestors = []
name = self.name
object = self.object
while True:
items = get_ancestors(self.rules[name].proxy, object)
until = self.rules[name].backward_object(object)
items.reverse()
for item in items:
ancestors.insert(0, MenuItem(name, item))
if item == until:
break
method, object, name = self.rules[name].method, self.rules[name].backward_point(object), self.rules[name].inside
if name is None:
break
if method != 'replace':
ancestors.insert(0, MenuItem(name, object))
front_page = get_front_page(self.rules)
if front_page is not None:
if not ancestors or ancestors[0].object != front_page.object:
if (front_page.name, front_page.object) != (self.name, self.object):
self.front_paged_ancestors = True
ancestors.insert(0, front_page)
setattr(self, '_ancestors', ancestors)
return ancestors
def ancestors_for_menu(self):
"""
Returns ancestors for show_menu tags.
Ancestors will not contain front page and will contain object itself.
"""
ancestors = self.ancestors()
if self.front_paged_ancestors:
ancestors = ancestors[1:]
else:
ancestors = ancestors[:]
ancestors.append(self)
return ancestors
def children(self, lasy=False):
"""Returns children for object"""
if lasy:
field_name = '_children_lasy'
else:
field_name = '_children'
if hasattr(self, field_name):
return getattr(self, field_name)
children = []
for rule in self.rules[self.name].sequence:
point = rule.forward_point(self.object)
if rule.name == self.name:
children += [MenuItem(self.name, item) for item in get_children(
self.rules[self.name].proxy, self.object, lasy)
]
elif point is DoesNotDefined or point == self.object:
object = rule.forward_object(self.object)
if rule.method == 'insert' and not lasy:
children += [MenuItem(rule.name, object)]
elif rule.method == 'children':
children += [MenuItem(rule.name, item) for item in get_children(
rule.proxy, object, lasy)
]
setattr(self, field_name, children)
return children | /redsolutioncms.django-menu-proxy-0.1.2.zip/redsolutioncms.django-menu-proxy-0.1.2/menuproxy/utils.py | 0.492432 | 0.239638 | utils.py | pypi |
"""A collection of functions for Page CMS"""
from pages import settings
from pages.http import get_request_mock
from django.template import TemplateDoesNotExist
from django.template import loader, Context
from django.core.cache import cache
import re
def get_placeholders(template_name):
"""Return a list of PlaceholderNode found in the given template.
:param template_name: the name of the template file
"""
try:
temp = loader.get_template(template_name)
except TemplateDoesNotExist:
return []
plist, blist = [], []
_placeholders_recursif(temp.nodelist, plist, blist)
return plist
def _placeholders_recursif(nodelist, plist, blist):
"""Recursively search into a template node list for PlaceholderNode
node."""
# I needed to do this lazy import to compile the documentation
from django.template.loader_tags import BlockNode
for node in nodelist:
# extends node?
if hasattr(node, 'parent_name'):
_placeholders_recursif(node.get_parent(Context()).nodelist,
plist, blist)
# include node?
elif hasattr(node, 'template'):
_placeholders_recursif(node.template.nodelist, plist, blist)
# Is it a placeholder?
if hasattr(node, 'page') and hasattr(node, 'parsed') and \
hasattr(node, 'as_varname') and hasattr(node, 'name'):
already_in_plist = False
for placeholder in plist:
if placeholder.name == node.name:
already_in_plist = True
if not already_in_plist:
if len(blist):
node.found_in_block = blist[len(blist) - 1]
plist.append(node)
node.render(Context())
for key in ('nodelist', 'nodelist_true', 'nodelist_false'):
if isinstance(node, BlockNode):
# delete placeholders found in a block of the same name
offset = 0
_plist = [(i, v) for i, v in enumerate(plist)]
for index, pl in _plist:
if pl.found_in_block and \
pl.found_in_block.name == node.name \
and pl.found_in_block != node:
del plist[index - offset]
offset += 1
blist.append(node)
if hasattr(node, key):
try:
_placeholders_recursif(getattr(node, key), plist, blist)
except:
pass
if isinstance(node, BlockNode):
blist.pop()
do_not_msg = "DO NOT MODIFIY BELOW THIS LINE"
po_comment = """Page %s
%s
placeholder=%s
page_id=%d
content_id=%s"""
def export_po_files(path='poexport', stdout=None):
"""
Export all the content from the published pages into
po files. The files will be automatically updated
with the new content if you run the command again.
"""
if stdout is None:
import sys
stdout = sys.stdout
if not path.endswith('/'):
path += '/'
import polib
import os
from pages.models import Page, Content
source_language = settings.PAGE_DEFAULT_LANGUAGE
source_list = []
for page in Page.objects.published():
source_list.extend(page.content_by_language(source_language))
for lang in settings.PAGE_LANGUAGES:
if lang[0] != settings.PAGE_DEFAULT_LANGUAGE:
try:
os.mkdir(path)
except OSError:
pass
po_path = path + lang[0] + '.po'
stdout.write("Export language %s.\n" % lang[0])
po = polib.pofile(po_path)
po.metadata['Content-Type'] = 'text/plain; charset=utf-8'
for source_content in source_list:
page = source_content.page
try:
target_content = Content.objects.get_content_object(
page, lang[0], source_content.type)
msgstr = target_content.body
except Content.DoesNotExist:
target_content = None
msgstr = ""
if source_content.body:
if target_content:
tc_id = str(target_content.id)
else:
tc_id = ""
entry = polib.POEntry(msgid=source_content.body,
msgstr=msgstr)
entry.tcomment = po_comment % (page.title(), do_not_msg,
source_content.type, page.id, tc_id)
if entry not in po:
po.append(entry)
po.save(po_path)
stdout.write("""Export finished. The files are available """
"""in the %s directory.\n""" % path)
def import_po_files(path='poexport', stdout=None):
"""
Import all the content updates from the po files into
the pages.
"""
import polib
import os
from pages.models import Page, Content
source_language = settings.PAGE_DEFAULT_LANGUAGE
source_list = []
pages_to_invalidate = []
for page in Page.objects.published():
source_list.extend(page.content_by_language(source_language))
if stdout is None:
import sys
stdout = sys.stdout
if not path.endswith('/'):
path += '/'
for lang in settings.PAGE_LANGUAGES:
if lang[0] != settings.PAGE_DEFAULT_LANGUAGE:
stdout.write("Update language %s.\n" % lang[0])
po_path = path + lang[0] + '.po'
po = polib.pofile(po_path)
for entry in po:
meta_data = entry.tcomment.split(do_not_msg)[1].split("\n")
placeholder_name = meta_data[1].split('=')[1]
page_id = int(meta_data[2].split('=')[1])
try:
content_id = int(meta_data[3].split('=')[1])
except ValueError:
content_id = None
page = Page.objects.get(id=page_id)
current_content = Content.objects.get_content(page, lang[0],
placeholder_name)
if current_content != entry.msgstr:
stdout.write("Update page %d placeholder %s.\n" % (page_id,
placeholder_name))
Content.objects.create_content_if_changed(
page, lang[0], placeholder_name, entry.msgstr)
if page not in pages_to_invalidate:
pages_to_invalidate.append(page)
for page in pages_to_invalidate:
page.invalidate()
stdout.write("Import finished from %s.\n" % path)
def normalize_url(url):
"""Return a normalized url with trailing and without leading slash.
>>> normalize_url(None)
'/'
>>> normalize_url('/')
'/'
>>> normalize_url('/foo/bar')
'/foo/bar'
>>> normalize_url('foo/bar')
'/foo/bar'
>>> normalize_url('/foo/bar/')
'/foo/bar'
"""
if not url or len(url) == 0:
return '/'
if not url.startswith('/'):
url = '/' + url
if len(url) > 1 and url.endswith('/'):
url = url[0:len(url) - 1]
return url
PAGE_CLASS_ID_REGEX = re.compile('page_([0-9]+)')
def filter_link(content, page, language, content_type):
"""Transform the HTML link href to point to the targeted page
absolute URL.
>>> filter_link('<a class="page_1">hello</a>', page, 'en-us', body)
'<a href="/pages/page-1" class="page_1">hello</a>'
"""
if not settings.PAGE_LINK_FILTER:
return content
if content_type in ('title', 'slug'):
return content
from BeautifulSoup import BeautifulSoup
tree = BeautifulSoup(content)
tags = tree.findAll('a')
if len(tags) == 0:
return content
for tag in tags:
tag_class = tag.get('class', False)
if tag_class:
# find page link with class 'page_ID'
result = PAGE_CLASS_ID_REGEX.search(content)
if result and result.group:
try:
# TODO: try the cache before fetching the Page object
from pages.models import Page
target_page = Page.objects.get(pk=int(result.group(1)))
tag['href'] = target_page.get_url_path(language)
except Page.DoesNotExist:
cache.set(Page.PAGE_BROKEN_LINK_KEY % page.id, True)
tag['class'] = 'pagelink_broken'
return unicode(tree) | /redsolutioncms.django-page-cms-1.4.5.tar.gz/redsolutioncms.django-page-cms-1.4.5/pages/utils.py | 0.460532 | 0.264195 | utils.py | pypi |
(function($){
/**
* The bgiframe is chainable and applies the iframe hack to get
* around zIndex issues in IE6. It will only apply itself in IE6
* and adds a class to the iframe called 'bgiframe'. The iframe
* is appeneded as the first child of the matched element(s)
* with a tabIndex and zIndex of -1.
*
* By default the plugin will take borders, sized with pixel units,
* into account. If a different unit is used for the border's width,
* then you will need to use the top and left settings as explained below.
*
* NOTICE: This plugin has been reported to cause perfromance problems
* when used on elements that change properties (like width, height and
* opacity) a lot in IE6. Most of these problems have been caused by
* the expressions used to calculate the elements width, height and
* borders. Some have reported it is due to the opacity filter. All
* these settings can be changed if needed as explained below.
*
* @example $('div').bgiframe();
* @before <div><p>Paragraph</p></div>
* @result <div><iframe class="bgiframe".../><p>Paragraph</p></div>
*
* @param Map settings Optional settings to configure the iframe.
* @option String|Number top The iframe must be offset to the top
* by the width of the top border. This should be a negative
* number representing the border-top-width. If a number is
* is used here, pixels will be assumed. Otherwise, be sure
* to specify a unit. An expression could also be used.
* By default the value is "auto" which will use an expression
* to get the border-top-width if it is in pixels.
* @option String|Number left The iframe must be offset to the left
* by the width of the left border. This should be a negative
* number representing the border-left-width. If a number is
* is used here, pixels will be assumed. Otherwise, be sure
* to specify a unit. An expression could also be used.
* By default the value is "auto" which will use an expression
* to get the border-left-width if it is in pixels.
* @option String|Number width This is the width of the iframe. If
* a number is used here, pixels will be assume. Otherwise, be sure
* to specify a unit. An experssion could also be used.
* By default the value is "auto" which will use an experssion
* to get the offsetWidth.
* @option String|Number height This is the height of the iframe. If
* a number is used here, pixels will be assume. Otherwise, be sure
* to specify a unit. An experssion could also be used.
* By default the value is "auto" which will use an experssion
* to get the offsetHeight.
* @option Boolean opacity This is a boolean representing whether or not
* to use opacity. If set to true, the opacity of 0 is applied. If
* set to false, the opacity filter is not applied. Default: true.
* @option String src This setting is provided so that one could change
* the src of the iframe to whatever they need.
* Default: "javascript:false;"
*
* @name bgiframe
* @type jQuery
* @cat Plugins/bgiframe
* @author Brandon Aaron (brandon.aaron@gmail.com || http://brandonaaron.net)
*/
$.fn.bgIframe = $.fn.bgiframe = function(s) {
// This is only for IE6
if ( $.browser.msie && /6.0/.test(navigator.userAgent) ) {
s = $.extend({
top : 'auto', // auto == .currentStyle.borderTopWidth
left : 'auto', // auto == .currentStyle.borderLeftWidth
width : 'auto', // auto == offsetWidth
height : 'auto', // auto == offsetHeight
opacity : true,
src : 'javascript:false;'
}, s || {});
var prop = function(n){return n&&n.constructor==Number?n+'px':n;},
html = '<iframe class="bgiframe"frameborder="0"tabindex="-1"src="'+s.src+'"'+
'style="display:block;position:absolute;z-index:-1;'+
(s.opacity !== false?'filter:Alpha(Opacity=\'0\');':'')+
'top:'+(s.top=='auto'?'expression(((parseInt(this.parentNode.currentStyle.borderTopWidth)||0)*-1)+\'px\')':prop(s.top))+';'+
'left:'+(s.left=='auto'?'expression(((parseInt(this.parentNode.currentStyle.borderLeftWidth)||0)*-1)+\'px\')':prop(s.left))+';'+
'width:'+(s.width=='auto'?'expression(this.parentNode.offsetWidth+\'px\')':prop(s.width))+';'+
'height:'+(s.height=='auto'?'expression(this.parentNode.offsetHeight+\'px\')':prop(s.height))+';'+
'"/>';
return this.each(function() {
if ( $('> iframe.bgiframe', this).length == 0 )
this.insertBefore( document.createElement(html), this.firstChild );
});
}
return this;
};
})(jQuery); | /redsolutioncms.django-page-cms-1.4.5.tar.gz/redsolutioncms.django-page-cms-1.4.5/pages/static/pages/javascript/jquery.bgiframe.js | 0.502441 | 0.568536 | jquery.bgiframe.js | pypi |
from django import template
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpRequest
from django.template.loader import render_to_string
from django.utils.datastructures import SortedDict
from attachment.models import AttachmentImage, AttachmentFile
from classytags.arguments import Argument
from classytags.core import Options
from classytags.helpers import AsTag
INTENT_ATTACHMENTS = 'attachments'
INTENT_IMAGES = 'images'
INTENT_FILES = 'files'
INTENTS = [INTENT_ATTACHMENTS, INTENT_IMAGES, INTENT_FILES]
def get_list(intent, object, context):
result = []
object = template.Variable(object).resolve(context)
if object is None:
return result
if intent in [INTENT_ATTACHMENTS, INTENT_IMAGES]:
result += [attachment for attachment in AttachmentImage.objects.filter(
content_type=ContentType.objects.get_for_model(object.__class__),
object_id=object.id)]
if intent in [INTENT_ATTACHMENTS, INTENT_FILES]:
result += [attachment for attachment in AttachmentFile.objects.filter(
content_type=ContentType.objects.get_for_model(object.__class__),
object_id=object.id)]
return result
register = template.Library()
class ShowAttachments(template.Node):
def __init__(self, intent, object):
self.intent = intent
self.object = object
def render(self, context):
attachments = get_list(self.intent, self.object, context)
return render_to_string('attachment/show.html', {
'attachments': attachments,
}, context_instance=template.RequestContext(context.get('request', HttpRequest())))
def show_attachments(parser, token):
"""Show attachments for object"""
splited = token.split_contents()
if len(splited) != 3 or splited[0].split('_')[1] not in INTENTS or splited[1] != 'for':
raise template.TemplateSyntaxError, "Invalid syntax. Use ``{% show_<attachments|images|files> for <object> %}``"
return ShowAttachments(splited[0].split('_')[1], splited[2])
for intent in INTENTS:
register.tag('show_%s' % intent, show_attachments)
class GetAttachments(template.Node):
def __init__(self, intent, object, variable):
self.intent = intent
self.object = object
self.variable = variable
def render(self, context):
context[self.variable] = get_list(self.intent, self.object, context)
return u''
def get_attachments(parser, token):
"""Get attachments for object"""
splited = token.split_contents()
if len(splited) != 5 or splited[0].split('_')[1] not in INTENTS or splited[1] != 'for' or splited[3] != 'as':
raise template.TemplateSyntaxError, "Invalid syntax. Use ``{% get_<attachments|images|files> for <object> as <variable> %}``"
return GetAttachments(splited[0].split('_')[1] , splited[2], splited[4])
for intent in INTENTS:
register.tag('get_%s' % intent, get_attachments)
class GetImageGroups(AsTag):
""" Get a dict like {group_name: [group_image_list]} of all the images
from image_list with group attribute specified.
"""
name = 'get_image_groups'
options = Options(
'for',
Argument('image_list', resolve=True, required=True),
'as',
Argument('varname', resolve=False, required=True),
)
def get_value(self, context, image_list):
result = SortedDict()
for image in image_list:
if image.group:
group_name = image.group
result.setdefault(group_name, []).append(image)
return result
register.tag(GetImageGroups)
@register.filter
def ungrouped(value):
"""Filter images with no group attribute specified from image_list"""
return [image for image in value if not image.group]
@register.filter
def key(value, arg):
"""Get value from dict by string key"""
try:
return value[arg]
except KeyError:
return None | /redsolutioncms.django-tinymce-attachment-0.5.0.tar.gz/redsolutioncms.django-tinymce-attachment-0.5.0/attachment/templatetags/attachment_tags.py | 0.567098 | 0.161949 | attachment_tags.py | pypi |
import re
from beautifulsoup import BeautifulSoup, NavigableString, Tag, buildTagMap
from trustedhtml import settings
from trustedhtml.signals import rule_done, rule_exception
from trustedhtml.utils import get_cdata, get_style
from urlmethods import urlsplit, urljoin, urlfix, remote_check, local_check
BeautifulSoup.QUOTE_TAGS = {}
BeautifulSoup.SELF_CLOSING_TAGS = buildTagMap(None, [
'area', 'base', 'basefont', 'br', 'col', 'frame', 'hr',
'img', 'input', 'isindex', 'link', 'meta', 'param',
# I don`t what is: 'spacer',
])
class TrustedException(ValueError):
"""
Base trustedhtml exception.
"""
pass
class EmptyException(TrustedException):
"""
Raised when value is empty and ``allow_empty`` flag is False.
This exception means that attribute must be removed.
"""
pass
class IncorrectException(TrustedException):
"""
Raised when value is incorrect.
This exception means that attribute must be removed.
"""
pass
class InvalidException(TrustedException):
"""
Raised when value pass check and invalid flag is True.
This exception means that hole item must be removed.
"""
pass
class ElementException(TrustedException):
"""
Raised when value fail check and tag_exception flag is True.
This exception means that hole TAG must be removed.
This exception will raise throw all rules to element-rule.
"""
pass
class Rule(object):
"""
Base rule class.
All rules inherit it and overwrite ``core`` or ``__init__`` functions.
"""
def __init__(
self, allow_empty=True, default=None, invalid=False,
element_exception=False, data=None):
"""
Sets behaviour for the rule:
``allow_empty`` if False than value can`t be empty.
For example: attribute "width" for tag "img".
``default`` if it is not None and validation fail than will return this one.
For example: attribute "alt" for tag "img".
``invalid`` if True than result of validation will be inverted.
So if validation will pass than InvalidException will be raised.
It if validation will fail than source value will be returned as correct value.
``element_exception`` if True and validation will failed than
hole TAG must be removed.
``data`` any extended data, usually used by signals.
"""
self.allow_empty = allow_empty
self.default = default
self.invalid = invalid
self.element_exception = element_exception
self.data = data
def validate(self, value, path=None):
"""
Main interface function. Call it to validate specified ``value``.
Returns correct value or raise exception.
``path`` is the list of rules that called this validation.
First element of this list will be first rule.
This function will call ``preprocess``, ``core`` and
``postprocess`` functions.
They can be overwritten by subclasses.
"""
if path is None:
path = []
source = value
try:
try:
value = self.preprocess(value, path)
value = self.core(value, path)
value = self.postprocess(value, path)
if self.invalid:
raise InvalidException(self, value)
except ElementException, exception:
raise exception
except TrustedException, exception:
if self.element_exception:
raise ElementException(*exception.args)
if self.default is not None:
value = self.default
elif self.invalid and exception is not InvalidException:
value = source
else:
raise exception
results = rule_done.send(
sender=self.__class__, rule=self,
path=path, value=value, source=source)
for receiver, response in results:
value = response
except TrustedException, exception:
rule_exception.send(
sender=self.__class__, rule=self,
path=path, value=value, source=source, exception=exception)
raise exception
return value
def core(self, value, path):
"""
This function is called while validation.
Subclasses can overwrite this one to define another validation mechanism.
``value`` is value for validation.
``path`` is the list of rules that called this validation.
First element of this list will be first rule.
Return correct value or raise TrustedException (or subclasses).
"""
return value
def preprocess(self, value, path):
"""
This function is called while validation before ``core``.
It checks ``value`` according to ``allow_empty`` property.
Subclasses can overwrite this one to define another preprocess mechanism.
``value`` is value for validation.
``path`` is the list of rules that called this validation.
First element of this list will be first rule.
Return prepared value for ``core`` function.
"""
if not self.allow_empty and not value:
raise EmptyException(self, value)
return value
def postprocess(self, value, path):
"""
This function is called while validation after ``core``.
It checks ``value`` according to ``allow_empty`` property.
Subclasses can overwrite this one to define another postprocess mechanism.
``value`` is value for validation.
``path`` is the list of rules that called this validation.
First element of this list will be first rule.
Return prepared value after ``core`` function.
"""
if not self.allow_empty and not value:
raise EmptyException(self, value)
return value
class String(Rule):
"""
Rule suppose that any string value is correct.
Validation will return striped string value if specified.
"""
def __init__(self, case_sensitive=False, strip=True, allow_empty=False, **kwargs):
"""
``strip`` if True than remove leading and trailing whitespace.
``case_sensitive`` if True than validation will be case sensitive.
This class don`t prepare ``value`` according to ``case_sensitive``.
Just specified functions to do it.
"""
super(String, self).__init__(allow_empty=allow_empty, **kwargs)
if self.default is not None:
self.default = unicode(self.default)
self.case_sensitive = case_sensitive
self.strip = strip
def lower_string(self, value):
"""
``value`` is an object with __unicode__ method.
``value`` can be None.
Returns ``value`` in low case if ``case_sensitive`` is True.
"""
if value is None:
return value
if self.case_sensitive:
return unicode(value)
else:
return unicode(value).lower()
def lower_list(self, values):
"""
``values`` is list of objects.
``value`` can be None.
Returns list of ``values`` in low case if ``case_sensitive`` is True.
"""
if values is None or values is False or values is True:
return values
return [
self.lower_string(value)
for value in values]
def preprocess(self, value, path):
"""Do it."""
value = super(String, self).preprocess(value, path)
if value is None:
value = ''
value = unicode(value)
if self.strip:
value = value.strip()
return value
class List(String):
"""
Rule suppose that value is correct if it is in ``values``.
Validation will return corresponding item from ``values``.
"""
def __init__(self, values, return_defined=True, **kwargs):
"""
``values`` is list of allowed values.
``return_defined`` if True than return value as it was defined in ``values``.
"""
super(List, self).__init__(**kwargs)
self.source_values = values
self.return_defined = return_defined
self.values = self.lower_list(self.source_values)
def core(self, value, path):
"""Do it."""
value = super(List, self).core(value, path)
source = value
value = self.lower_string(value)
if value not in self.values:
raise IncorrectException(self, value)
if not self.case_sensitive:
if self.return_defined:
value = self.source_values[self.values.index(value)]
else:
value = source
return value
class RegExp(String):
"""
Rule suppose that value is correct if it match specified ``regexp``.
Validation will return expanded match object.
"""
def __init__(self, regexp, flags=0, expand=r'\1', **kwargs):
"""
``regexp`` specified string with regular expression to validate ``value``.
Specify '$' char at the end of expression to avoid cutting end of string.
``flags`` specified flags for regular expression.
``expand`` is string using to expand match objects
and to return result of validation.
Example of validation:
regexp: r'([-+]?\d*),(?P<a>\d*)$'
expand: r'\g<a>;\1'
value: '-12,34'
result: '34;-12'
description: return group "a" and first group.
'$' at the and of string will prevent skipping all following chars.
"""
super(RegExp, self).__init__(**kwargs)
self.regexp = regexp
self.flags = flags
self.expand = expand
if not self.case_sensitive:
self.flags = self.flags | re.IGNORECASE
self.compiled = re.compile(unicode(self.regexp), self.flags)
def core(self, value, path):
"""Do it."""
value = super(RegExp, self).core(value, path)
match = self.compiled.match(value)
if match is None:
raise IncorrectException(self, value)
value = match.expand(self.expand)
return value
class Uri(String):
"""
Rule suppose that value is correct if it is allowed URI.
Validation will return correct URI.
"""
LINK = 0
IMAGE = 1
OBJECT = 2
def __init__(
self, type=LINK, allow_sites=None, allow_schemes=None,
cut_sites=None, cut_schemes=None, verify_sites=None, verify_schemes=None,
verify_local=None, local_sites=None, local_schemes=None,
**kwargs):
"""
``type`` indicate that this url must be an image.
This class not support such validation,
but this attribute can be used by rules or signals.
``is_object`` indicate that this url must be an embedded object.
This class not support such validation,
but this attribute can be used by rules or signals.
All lists passed to this function can be:
list of strings with allowed values.
True to allow all values.
False or disable all values.
None is used not set default value (from settings.py).
``allow_sites`` is list with names of allowed sites.
You can use your registered sites like this:
[site.domain for site in django.contrib.sites.models.Site.objects.all()]
``allow_schemes`` is list of allowed schemes for URIs.
``cut_sites`` is list of sites for witch
scheme and site`s name will be removed from url.
Cut operations will be applied before any other validations.
``cut_schemes`` is list of schemes that can be removed.
Only urls with such scheme will be cut.
``verify_sites`` is list of sites to be verified.
Validation will try to fetch specified url for such sites.
``verify_schemes`` is list of allowed schemes for verification.
``verify_local`` specify verification mechanism:
string to specify host to fetch urls without host name.
You can use:
django.contrib.sites.models.Site.objects.get_current().domain
False to disable any verification of local urls.
True to enable verification by using django.test.Client.
You must use it if your server is running in one thread
(and was not forked).
In such case requests to it self can`t be received.
``local_sites`` is list of sites to be recognized as local.
``local_schemes`` is list of schemes for local verification.
``verify_user_agent`` is name of user agent for verification.
This class ignore ``case_sensitive`` considered it is False.
"""
super(Uri, self).__init__(case_sensitive=False, **kwargs)
if allow_sites is None:
if type == self.LINK:
allow_sites = settings.TRUSTEDHTML_LINK_SITES
elif type == self.IMAGE:
allow_sites = settings.TRUSTEDHTML_IMAGE_SITES
elif type == self.OBJECT:
allow_sites = settings.TRUSTEDHTML_OBJECT_SITES
else:
allow_sites = False
if allow_schemes is None:
allow_schemes = settings.TRUSTEDHTML_ALLOW_SCHEMES
if cut_sites is None:
cut_sites = settings.TRUSTEDHTML_CUT_SITES
if cut_schemes is None:
cut_schemes = settings.TRUSTEDHTML_CUT_SCHEMES
if verify_sites is None:
verify_sites = settings.TRUSTEDHTML_VERIFY_SITES
if verify_schemes is None:
verify_schemes = settings.TRUSTEDHTML_VERIFY_SCHEMES
if verify_local is None:
verify_local = settings.TRUSTEDHTML_VERIFY_LOCAL
if local_sites is None:
local_sites = settings.TRUSTEDHTML_LOCAL_SITES
if local_schemes is None:
local_schemes = settings.TRUSTEDHTML_LOCAL_SCHEMES
self.type = type
self.allow_sites = self.lower_list(allow_sites)
self.allow_schemes = self.lower_list(allow_schemes)
self.cut_sites = self.lower_list(cut_sites)
self.cut_schemes = self.lower_list(cut_schemes)
self.verify_sites = self.lower_list(verify_sites)
self.verify_schemes = self.lower_list(verify_schemes)
self.local_sites = self.lower_list(local_sites)
self.local_schemes = self.lower_list(local_schemes)
self.verify_local = verify_local
@staticmethod
def inlist(value, lst):
"""
Check whether ``value`` is in ``lst``.
All lists passed to this function can be:
list of strings with allowed values.
True to allow all values.
False or None to disable all values
If ``value`` is None than it is empty list.
"""
if value is None:
return True
if lst is True:
return True
if lst is None or lst is False:
return False
return value in lst
def preprocess(self, value, path):
"""
Correct escaped-chars (%hh).
We don`t replace escaped-chars, just replace "%Z" with "%25Z".
Escaped-chars not allowed in scheme and authority paths,
so we can trust prepared values.
"""
value = super(Uri, self).preprocess(value, path)
value = urlfix(value)
return value
def core(self, value, path):
"""Do it."""
value = String.core(self, value, path)
scheme_source, authority_source, path, query, fragment = urlsplit(value)
scheme = self.lower_string(scheme_source)
authority = self.lower_string(authority_source)
if self.inlist(scheme, self.cut_schemes) and self.inlist(authority, self.cut_sites) and (
scheme is not None or authority is not None):
scheme = None
authority = None
if not path and not query and not fragment:
path = '/'
if not self.inlist(scheme, self.allow_schemes) or not self.inlist(authority, self.allow_sites):
raise IncorrectException(self, value)
if scheme is None:
check_scheme = 'http'
else:
check_scheme = scheme
if self.inlist(scheme, self.local_schemes) and self.inlist(authority, self.local_sites):
if self.verify_local is True:
if not path and query is not None:
raise IncorrectException(self, value)
if path and not local_check(path, query):
raise IncorrectException(self, value)
elif self.verify_local is not False:
check = urljoin(check_scheme, self.verify_local, path, query, fragment)
if not remote_check(check):
raise IncorrectException(self, value)
elif self.inlist(scheme, self.verify_schemes) and self.inlist(authority, self.verify_sites):
if authority is not None:
check = urljoin(check_scheme, authority, path, query, fragment)
if not remote_check(check):
raise IncorrectException(self, value)
value = urljoin(scheme, authority, path, query, fragment)
return value
class No(Rule):
"""
Rule suppose that value never is correct.
Validation always will raise IncorrectException.
"""
def core(self, value, path):
"""Do it."""
raise IncorrectException(self, value)
class And(Rule):
"""
Rule suppose that value is correct if it corresponding to all ``rules``.
Validation will return correct value from the last rule.
First rule will validate specified ``value``,
second rule will validate result of first validation, etc.
"""
def __init__(self, rules, **kwargs):
"""
``rules`` is list of rules to validate specified ``value``.
"""
super(And, self).__init__(**kwargs)
self.rules = rules
def core(self, value, path):
"""Do it."""
value = super(And, self).core(value, path)
path = path[:] + [self]
for rule in self.rules:
value = rule.validate(value, path)
return value
class Or(Rule):
"""
Rule suppose that value is correct if there is correct rule in ``rules`` list.
Validation will return first correct value returned by specified ``rules``.
If validation for all ``rules`` will fail than raise last exception.
If rule raise ElementException it will be immediately raised.
"""
def __init__(self, rules, **kwargs):
"""
``rules`` is list of rules to validate specified ``value``.
"""
super(Or, self).__init__(**kwargs)
self.rules = rules
def core(self, value, path):
"""Do it."""
value = super(Or, self).core(value, path)
path = path[:] + [self]
last = IncorrectException
for rule in self.rules:
try:
return rule.validate(value, path)
except ElementException, exception:
raise exception
except TrustedException, exception:
last = exception
raise last
class Sequence(String):
"""
Rule suppose that value is correct if each part of value,
divided by ``regexp`` matches specified ``rule``.
Validation will return joined parts of value.
"""
def __init__(
self, rule, regexp=r'\s+', flags=0, min_split=0, max_split=0,
join_string=' ', prepend_string='', append_string='', **kwargs):
"""
``rule`` is the rule that will be called to validate each path of value.
``regexp`` specified string with regular expression
to split specified value.
``flags`` specified flags for regular expression.
``min_split`` specified minimum allowed number of parts.
Validation will raise IncorrectException if number of parts is less.
``max_split`` specified maximum allowed number of parts.
Validation will raise IncorrectException if number of parts is less.
``join_string`` is string that will be used to join back
validated parts of value.
``prepend_string`` is string that will be inserted before joined value.
``append_string`` is string that will be added to the end of joined value.
"""
super(Sequence, self).__init__(**kwargs)
self.rule = rule
self.regexp = regexp
self.flags = flags
if not self.case_sensitive:
self.flags = self.flags | re.IGNORECASE
self.compiled = re.compile(unicode(self.regexp), self.flags)
self.min_split = min_split
self.max_split = max_split
self.join_string = join_string
self.prepend_string = prepend_string
self.append_string = append_string
def sequence(self, values, path):
"""
This function is called from ``core`` function.
Subclasses can overwrite this one to define another validation mechanism.
``values`` is list of parts of specified ``value``.
``path`` is the list of rules that called this validation + self object.
So you can pass this value for subvalidations.
Return correct list of parts of value or raise TrustedException (or subclasses).
"""
result = []
for value in values:
result.append(self.rule.validate(value, path))
return result
def core(self, value, path):
"""Do it."""
value = super(Sequence, self).core(value, path)
values = self.compiled.split(value)
if (len(values) < self.min_split) or (self.max_split and len(values) > self.max_split):
raise IncorrectException(self, value)
path = path[:] + [self]
values = self.sequence(values, path)
if values:
value = self.prepend_string + self.join_string.join(values) + self.append_string
else:
value = u''
return value
class Complex(Sequence):
"""
Rule suppose that value is correct if each part of value,
divided by ``regexp`` matches one of specified
``rules`` list in corresponding order.
Validation will return joined parts of value.
"""
def __init__(self, rules, **kwargs):
"""
``rules`` is list of rules for validation.
"""
super(Complex, self).__init__(rule=None, **kwargs)
self.rules = rules
def sequence(self, values, path):
"""Do it."""
return self.complex(values, path, 0, 0)
def complex(self, values, path, value_index, rule_index):
"""
This function is called by ``sequence`` function.
``values`` is list of parts of specified ``value``.
``path`` is the list of rules that called this validation + self object.
``value_index`` is index in ``values`` list to be processed.
``rule_index`` is index in ``rules`` list to be processed.
Return correct list of parts of value or raise IncorrectException or ElementException.
"""
if value_index >= len(values):
return values
if rule_index >= len(self.rules):
raise IncorrectException(self, values)
try:
value = self.rules[rule_index].validate(values[value_index], path)
result = self.complex(values, path, value_index + 1, rule_index + 1)
result[value_index] = value
return result
except ElementException, exception:
raise exception
except TrustedException:
return self.complex(values, path, value_index, rule_index + 1)
class Validator(object):
"""
Provide mechanism to validate list of values (tag attributes or style properties)
by corresponding rules.
"""
def __init__(self, rules):
"""
``rules`` is dictionary in witch key is name of property
(or tag attribute) and value is corresponding rule.
"""
self.rules = rules
def check(self, values, path):
"""
Check list of ``values`` (tag attributes or style properties)
corresponding to specified rules.
``values`` list of (property, value) pairs, as 2-tuples.
``path`` is the list of rules that called this validation.
First element of this list will be first rule.
Return list of correct values depending on rules.
Or raise exceptions.
"""
if not self.rules:
return []
correct = {}
source = {}
for name, value in values:
name = name.lower()
source[name] = value
for name, rule in self.rules.iteritems():
try:
name = name.lower()
value = source.get(name, None)
correct[name] = rule.validate(value, path)
except ElementException, exception:
raise exception
except TrustedException:
pass
# Order values is source ordering. New values will be appended.
order = [name.lower() for name, value in values]
append = [name for name, value in correct.iteritems() if name not in order]
order.extend(append)
values = [(order.index(name), name, value) for name, value in correct.iteritems()]
values.sort()
return [(name, value) for index, name, value in values]
class Style(Sequence, Validator):
"""
Rule suppose that value is correct if each part of ``value``,
is pair (property_name, property_value) and each property_name
has valid property_value corresponding to ``rules`` dictionary.
Validation will return joined only valid pairs.
"""
def __init__(self, rules, **kwargs):
"""
``rules`` is dictionary in witch key is name of property
(or tag attribute) and value is corresponding rule.
"""
Sequence.__init__(
self, rule=None, regexp=r'\s*;\s*',
join_string='; ', append_string=';', **kwargs)
Validator.__init__(self, rules=rules)
def preprocess(self, value, path):
"""Do it."""
value = super(Style, self).preprocess(value, path)
value = get_style(value)
return value
def sequence(self, values, path):
"""Do it."""
properties = []
for value in values:
if ':' not in value:
continue
property_name = value[:value.find(':')].strip()
property_value = value[value.find(':') + 1:].strip()
properties.append((property_name, property_value))
properties = self.check(properties, path)
return [
'%s: %s' % (property_name, property_value)
for property_name, property_value in properties]
class Element(Rule, Validator):
"""
Rule suppose that value is correct if ``value`` is LIST OF PAIRS
(attribute_name, attribute_value) and each attribute_name
has valid attribute_value corresponding to ``rules`` dictionary.
Validation will return list of valid pairs (attribute_name, attribute_value).
"""
def __init__(
self, rules=None, contents=None, empty_element=False,
remove_element=False, optional_start=False, optional_end=False,
save_content=True, **kwargs):
"""
``rules`` is dictionary in witch key is name of property
(or tag attribute) and value is corresponding rule.
``contents`` list of elements allowed inside this one.
List contains strings with names or True to allow text content.
``empty_element`` where element can contain no content.
``remove_element`` where element must be removed in any case.
``optional_start`` start of this element is optional.
``optional_end`` end of this element is optional.
``save_content`` whether content of incorrect tag must be saved
to parent tag.
"""
if rules is None:
rules = {}
if contents is None:
contents = []
Rule.__init__(self, **kwargs)
Validator.__init__(self, rules=rules)
self.contents = contents
self.empty_element = empty_element
self.remove_element = remove_element
self.optional_start = optional_start
self.optional_end = optional_end
self.save_content = save_content
if self.contents is None:
self.empty_element = True
def preprocess(self, value, path):
"""Do it."""
# Don`t call super to avoid raise EmptyException
value = [
(attribute_name, get_cdata(attribute_value))
for attribute_name, attribute_value in value]
return value
def postprocess(self, value, path):
"""Do it."""
# Don`t call super to avoid raise EmptyException
return value
def core(self, value, path):
"""Do it."""
try:
return self.check(value, path)
except ElementException, exception:
raise IncorrectException(*exception.args)
class Html(String):
"""
Rule suppose that value is correct if it can be fixed
over ``fix_number`` iterations.
And chars in value can be prepared for fixing
over ``prepare_number`` iterations per fix.
Validation will return valid tuple (valid_html, plain_text).
"""
# All constants must be lowered.
SPECIAL_CHARS = [
('&', '&'), # Must be first element in list
('"', '"'),
("'", '''),
('<', '<'),
('>', '>'),
#(NBSP_CHAR, NBSP_TEXT),
]
PLAIN_CHARS = [SPECIAL_CHARS[index] for index in range(len(SPECIAL_CHARS) - 1, -1, -1)]
CODE_RE = re.compile('&#(([0-9]+);?|x([0-9A-Fa-f]+);?)')
CODE_RE_SPECIAL = dict(
[(0, '')] + [(ord(char), string) for char, string in SPECIAL_CHARS])
SYSTEM_RE = re.compile('[\x01-\x1F\s]+')
NBSP_CHAR = u'\xa0'
NBSP_TEXT = ' '
NBSP_RE = re.compile('[' + NBSP_CHAR + ' ]{2,}')
DEFAULT_ROOT_TAG = 'p'
MARKUP_MASSAGE = BeautifulSoup.MARKUP_MASSAGE + [
(re.compile('<!-([^-])'), lambda match: '<!--' + match.group(1))
]
BEAUTIFUL_SOUP = BeautifulSoup()
def __init__(
self, rules, fix_number=2, prepare_number=2, root_tags=None,
allow_empty=True, **kwargs):
"""
``rules`` is dictionary in witch key is name of property
(or tag attribute) and value is corresponding rule.
``fix_number`` specified number of maximum attempts to fix value.
``prepare_number`` specified number of maximum attempts to prepare value.
``root_tags`` list of tags that can be in the root of document.
"""
if root_tags is None:
root_tags = []
super(Html, self).__init__(allow_empty=allow_empty, **kwargs)
self.rules = rules
self.fix_number = fix_number
self.prepare_number = prepare_number
self.root_tags = root_tags
if self.DEFAULT_ROOT_TAG not in self.root_tags:
self.root_tags.append(self.DEFAULT_ROOT_TAG)
def remove_spaces(self, value):
"""Removes spaces from ``value``"""
return self.NBSP_RE.sub(self.NBSP_CHAR, value)
def correct(self, value):
"""Prepare chars in ``value``. Replace system values."""
def code_re_sub(match):
try:
if match.group(2):
code = int(match.group(2))
elif match.group(3):
code = int(match.group(3), 16)
else:
code = 0
if code in self.CODE_RE_SPECIAL:
return self.CODE_RE_SPECIAL[code]
return unichr(code)
except (ValueError, OverflowError):
return ''
value = value.replace('\0', '')
value = self.CODE_RE.sub(code_re_sub, value)
value = self.SYSTEM_RE.sub(' ', value)
value = self.remove_spaces(value)
return value
def clear(self, soup, path):
index = 0
while index < len(soup.contents):
if isinstance(soup.contents[index], Tag):
rule = self.rules.get(soup.contents[index].name, None)
try:
tag = soup.contents[index]
if rule is None:
raise IncorrectException(self, tag.attrs)
if rule.remove_element:
raise IncorrectException(self, tag.attrs)
tag.attrs = rule.validate(tag.attrs, path)
except TrustedException:
element = soup.contents[index].extract()
if rule is None or getattr(rule, 'save_content', True):
insert = index
while len(element.contents):
soup.insert(insert, element.contents[0])
insert += 1
continue
self.clear(soup.contents[index], path)
elif soup.contents[index].__class__ is NavigableString:
value = soup.contents[index].string
value = self.remove_spaces(value)
for char, string in self.SPECIAL_CHARS:
value = value.replace(char, string)
if value != soup.contents[index].string:
soup.contents[index].replaceWith(value)
else:
soup.contents[index].extract()
continue
index += 1
return soup
def join(self, soup):
changed = False
index = 0
while index < len(soup.contents) - 1:
if (
not isinstance(soup.contents[index + 1], Tag)
and not isinstance(soup.contents[index], Tag)):
text = soup.contents[index].string + soup.contents[index + 1].string
text = self.correct(text)
if text != soup.contents[index].string:
soup.contents[index].replaceWith(text)
soup.contents[index + 1].extract()
changed = True
continue
index += 1
return changed
def collapse(self, soup):
changed = True
while changed:
changed = False
index = 0
while index < len(soup.contents):
if isinstance(soup.contents[index], Tag):
self.collapse(soup.contents[index])
if not self.BEAUTIFUL_SOUP.isSelfClosingTag(soup.contents[index].name):
text = soup.contents[index].renderContents(encoding=None)
# encoding=None: Fix bug in BeautifulSoup (don`t work with unicode)
text = self.correct(text)
if not text or text == ' ' or text == self.NBSP_CHAR:
rule = self.rules[soup.contents[index].name]
if rule.default and text != rule.default:
changed = True
text = rule.default
while soup.contents[index].contents:
soup.contents[index].contents[0].extract()
soup.contents[index].append(text)
else:
if not rule.empty_element:
changed = True
if text:
soup.contents[index].replaceWith(text)
else:
soup.contents[index].extract()
continue
index += 1
if not changed:
if self.join(soup):
changed = True
return soup
def collapse_root(self, soup):
index = 0
while index < len(soup.contents):
if not isinstance(soup.contents[index], Tag):
text = soup.contents[index].string
text = self.correct(text)
if not text or (text == ' ') or (text == self.NBSP_CHAR):
soup.contents[index].extract()
continue
index += 1
return soup
def need_wrap(self, content, for_next):
if isinstance(content, Tag):
if content.name in self.root_tags:
return False
else:
if not for_next and (
content.string == ''
or content.string == ' ' or content.string == self.NBSP_CHAR):
return False
return True
def wrap(self, soup):
index = 0
while index < len(soup.contents):
while index < len(soup.contents) and not self.need_wrap(soup.contents[index], False):
index += 1
if index >= len(soup.contents):
break
start = Tag(soup, self.DEFAULT_ROOT_TAG)
while index < len(soup.contents) and self.need_wrap(soup.contents[index], True):
content = soup.contents[index].extract()
start.append(content)
soup.insert(index, start)
return soup
def get_plain_text(self, soup):
result = u''
for content in soup:
if isinstance(content, Tag):
result += self.get_plain_text(content)
else:
value = content.string
for char, string in self.PLAIN_CHARS:
value = value.replace(string, char)
result += value
return result
def fix(self, value, path):
soup = BeautifulSoup(
value, markupMassage=self.MARKUP_MASSAGE,
convertEntities=BeautifulSoup.ALL_ENTITIES)
soup = self.clear(soup, path)
soup = self.collapse(soup)
soup = self.collapse_root(soup)
soup = self.wrap(soup)
return unicode(soup)
def core(self, value, path):
"""Do it."""
path = path[:] + [self]
value = String.core(self, value, path)
for iteration in xrange(self.fix_number + 1):
for preparing in xrange(self.prepare_number + 1):
source = value
value = self.correct(value)
if source == value:
break
else:
raise IncorrectException(self, 'Too much attempts to prepare value')
source = value
value = self.fix(value, path)
if source == value:
break
else:
raise IncorrectException(self, 'Too much attempts to fix value')
return value | /redsolutioncms.django-trusted-html-0.2.0.zip/redsolutioncms.django-trusted-html-0.2.0/trustedhtml/classes.py | 0.738198 | 0.265339 | classes.py | pypi |
from django.forms import Textarea
from django.forms import TextInput
from django.contrib.admin.widgets import AdminTextareaWidget
from trustedhtml.rules import pretty
class TrustedTextarea(Textarea):
"""
Textarea with build-in validation.
"""
def __init__(self, validator=pretty, *args, **kwargs):
super(TrustedTextarea, self).__init__(*args, **kwargs)
self.validator = validator
def value_from_datadict(self, data, files, name):
value = super(TrustedTextarea, self).value_from_datadict(data, files, name)
return self.validator.validate(value)
class AdminTrustedTextarea(AdminTextareaWidget, TrustedTextarea):
def __init__(self, validator=pretty, *args, **kwargs):
super(AdminTrustedTextarea, self).__init__(*args, **kwargs)
self.validator = validator
class TrustedTextInput(TextInput):
"""
TextInput with build-in validation.
"""
def __init__(self, validator=pretty, *args, **kwargs):
super(TrustedTextInput, self).__init__(*args, **kwargs)
self.validator = validator
def value_from_datadict(self, data, files, name):
value = super(TrustedTextInput, self).value_from_datadict(data, files, name)
return self.validator.validate(value)
class AdminTrustedTextInput(AdminTextareaWidget, TrustedTextInput):
def __init__(self, validator=pretty, *args, **kwargs):
super(AdminTrustedTextInput, self).__init__(*args, **kwargs)
self.validator = validator
try:
from tinymce.widgets import TinyMCE
except ImportError:
class TrustedTinyMCE(TrustedTextarea):
pass
class AdminTrustedTinyMCE(AdminTextareaWidget, TrustedTinyMCE):
def __init__(self, validator=pretty, *args, **kwargs):
super(AdminTrustedTinyMCE, self).__init__(*args, **kwargs)
self.validator = validator
else:
class TrustedTinyMCE(TinyMCE):
"""
TinyMCE widget with build-in validation.
"""
def __init__(self, validator=pretty, *args, **kwargs):
super(TrustedTinyMCE, self).__init__(*args, **kwargs)
self.validator = validator
def value_from_datadict(self, data, files, name):
value = super(TrustedTinyMCE, self).value_from_datadict(data, files, name)
return self.validator.validate(value)
class AdminTrustedTinyMCE(AdminTextareaWidget, TrustedTinyMCE):
def __init__(self, validator=pretty, *args, **kwargs):
super(AdminTrustedTinyMCE, self).__init__(*args, **kwargs)
self.validator = validator
try:
from pages.widgets_registry import register_widget
except ImportError:
pass
else:
register_widget(TrustedTextarea)
register_widget(TrustedTextInput)
register_widget(TrustedTinyMCE) | /redsolutioncms.django-trusted-html-0.2.0.zip/redsolutioncms.django-trusted-html-0.2.0/trustedhtml/widgets.py | 0.560012 | 0.174481 | widgets.py | pypi |
from trustedhtml.utils import get_dict
from trustedhtml.classes import List, Or, Sequence
from trustedhtml.rules.css.consts import none, inherit
from trustedhtml.rules.css.values import values
disabled = [
# It will break block formatting:
'margin-top', 'margin-right', 'margin-bottom', 'margin-left',
'margin',
'padding-top', 'padding-right', 'padding-bottom', 'padding-left',
'padding',
# It will break view:
'border-top-width', 'border-right-width', 'border-bottom-width', 'border-left-width',
'border-width',
'border-top-color', 'border-right-color', 'border-bottom-color', 'border-left-color',
'border-color',
'border-top-style', 'border-right-style', 'border-bottom-style', 'border-left-style',
'border-style',
'border-top', 'border-right', 'border-bottom', 'border-left', 'border',
# It will break block formatting:
'display', # Fix: We must add processing: "display: none;"
'position',
'top', 'right', 'bottom', 'left',
'z-index',
'direction',
'unicode-bidi',
# It will break text formatting:
'line-height',
# It isn`t useful
# (IE not support it, and we don`t want to get different appearance)
'min-width', 'min-height',
'max-width', 'max-height',
# It will break text formatting:
'overflow',
'clip',
'visibility',
# It will break list formatting:
'content',
'quotes',
'counter-reset', 'counter-increment',
'marker-offset',
'list-style-type',
'list-style-image',
'list-style-position',
'list-style',
# It will break printing:
'size',
'marks',
'page-break-before', 'page-break-after',
'page-break-inside',
'page',
'orphans', 'widows',
# It will break color scheme:
'color',
'background-color',
'background-image',
'background-repeat',
'background-attachment',
'background-position',
'background',
# It will break fonts:
'font-family',
'font-style',
'font-variant',
'font-weight',
'font-stretch',
'font-size',
'font-size-adjust',
'font',
# It will break font settings:
'text-indent',
# It isn`t usable, so disable it:
'text-shadow',
# It will break font settings:
'letter-spacing', 'word-spacing',
'text-transform',
'white-space',
# It is not useful:
'speak-header',
# It will break table`s view:
'caption-side',
# It will break table`s building mechanism:
'table-layout',
# It will break table`s formatting:
# http://www.w3.org/TR/1998/REC-CSS2-19980512/tables.html#borders
'border-collapse',
'border-spacing',
'empty-cells',
# It will break default cursors:
'cursor',
# It will break block formatting:
'outline-width',
'outline-style',
'outline-color',
'outline',
]
for_table = [
# We need vertical align:
# (for other elements it will break text formatting)
'vertical-align',
# We need horizontal align:
# (for other elements it will break text formatting)
'text-align',
]
for_image = [
# We need floating for images:
# (for other elements it will break block formatting)
'float',
# We need size of images.
# (for other elements it will break block formatting)
'width', 'height',
]
allowed = [
# If we need float for image, we need it too:
'clear',
# We need it, but we must disable: 'blink'
'text-decoration',
]
text_decoration_base = List(values=[
'underline', 'overline', 'line-through',
# Disable: 'blink',
])
text_decoration = Or(rules=[
Sequence(rule=text_decoration_base, min_split=1), none, inherit,
])
replace = {
'text-decoration': text_decoration,
}
common = get_dict(source=values, leave=allowed, append=replace)
tables = get_dict(source=values, leave=allowed + for_table, append=replace)
images = get_dict(source=values, leave=allowed + for_image, append=replace)
#style_div = Style(rules={
# 'display': List(values=[
# 'none'], invalid=True, element_exception=True),
#}) | /redsolutioncms.django-trusted-html-0.2.0.zip/redsolutioncms.django-trusted-html-0.2.0/trustedhtml/rules/css/custom.py | 0.623721 | 0.155046 | custom.py | pypi |
from trustedhtml.classes import List, RegExp, Or, Sequence, Complex
from trustedhtml.rules.css.consts import inherit, none
from trustedhtml.rules.css.grammar import grammar
from trustedhtml.rules.css.syndata import positive_number, positive_length, positive_percentage
family_name = RegExp(regexp=
r'(([!#$%%&(-~]|\\%(nl)s|%(nonascii)s|%(escape)s)*|%(string1)s|%(string2)s)$' % grammar,
)
generic_family = List(values=[
'serif', 'sans-serif', 'cursive', 'fantasy', 'monospace'
])
font_family = Or(rules=[
Sequence(rule=Or(rules=[
family_name,
generic_family
]), regexp='\s*,\s*', join_string=',', min_split=1),
inherit,
])
font_style = List(values=[
'normal', 'italic', 'oblique',
'inherit',
])
font_variant = List(values=[
'normal', 'small-caps',
'inherit',
])
font_weight = List(values=[
'normal', 'bold', 'bolder', 'lighter',
'100', '200', '300', '400', '500', '600', '700', '800', '900',
'inherit',
])
font_stretch = List(values=[
'normal', 'wider', 'narrower', 'ultra-condensed', 'extra-condensed',
'condensed', 'semi-condensed', 'semi-expanded', 'expanded',
'extra-expanded', 'ultra-expanded',
'inherit',
])
absolute_and_relative_size = List(values=[
'xx-small', 'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large',
] + [
'larger', 'smaller',
])
font_size = Or(rules=[
absolute_and_relative_size, positive_length, positive_percentage, inherit,
])
font_size_adjust = Or(rules=[
positive_number, none, inherit,
])
slash_line_height = RegExp(
regexp=r'/%(w)s(?P<s>normal|%(positive-num)s|%(positive-length)s|%(positive-percentage)s|inherit)' % grammar,
expand=r'/\g<s>',
)
# Fix: correct will be using visudet.line_height
font = Or(rules=[
List(values=[
'caption', 'icon', 'menu', 'message-box', 'small-caption', 'status-bar',
'inherit',
]), Complex(rules=[
font_style,
font_variant,
font_weight,
font_size,
slash_line_height,
font_family,
])
])
# BUG: font-size and font-family are required.
# Correct sequence is: [ <'font-style'> || <'font-variant'> || <'font-weight'> ]? <'font-size'> [ / <'line-height'> ]? <'font-family'> | /redsolutioncms.django-trusted-html-0.2.0.zip/redsolutioncms.django-trusted-html-0.2.0/trustedhtml/rules/css/fonts.py | 0.54698 | 0.341116 | fonts.py | pypi |
from trustedhtml.classes import String, RegExp, Uri, No, Sequence, Style
from trustedhtml.rules.html.grammar import grammar
from trustedhtml.rules import css
idref = name = RegExp(regexp=r'(%(name)s)$' % grammar)
name_required = RegExp(regexp=r'(%(name)s)$' % grammar, element_exception=True)
idrefs = RegExp(regexp=r'(%(name)s(%(w)s%(name)s)*)$' % grammar)
idrefs_comma = RegExp(regexp=r'(%(name)s(%(w)s,%(w)s%(name)s)*)$' % grammar)
number = RegExp(regexp=r'(%(number)s)$' % grammar)
number_required = RegExp(regexp=r'(%(number)s)$' % grammar, element_exception=True)
positive_number = RegExp(regexp=r'(%(positive-number)s)$' % grammar)
text = String()
text_required = String(element_exception=True)
text_default = String(default='')
uri = Uri()
uri_required = Uri(element_exception=True)
uri_image = Uri(type=Uri.IMAGE)
uri_image_required = Uri(type=Uri.IMAGE, element_exception=True)
uri_object = Uri(type=Uri.OBJECT)
uris = Sequence(rule=Uri())
color = RegExp(regexp=r'(%(color)s)$' % grammar)
# Browsers support more than 16 colors, so you can use color_list from css
# IE support rgb(r,g,b) format, so you can use color_list from css
#color = Or(rules=[
# css.syndata.color_list,
# css.syndata.color_spec,
# RegExp(regexp=r'(#%(h)s{6})$' % grammar)
#])
pixels = RegExp(regexp=r'(%(number)s)$' % grammar)
length = RegExp(regexp=r'(%(length)s)$' % grammar)
multi_length = RegExp(regexp=r'(%(multi-length)s)$' % grammar)
multi_lengths = RegExp(regexp=r'(%(multi-length)s(%(w)s,%(w)s%(multi-length)s)*)$' % grammar)
length_required = RegExp(regexp=r'(%(length)s)$' % grammar, element_exception=True)
coords = RegExp(regexp=r'(%(length)s(%(w)s,%(w)s%(length)s)(%(w)s,%(w)s%(length)s)+)$' % grammar)
content_type = RegExp(regexp=r'(%(content-type)s)$' % grammar)
content_types = RegExp(regexp=r'(%(content-type)s(%(w)s,%(w)s%(content-type)s)*)$' % grammar)
content_type_required = RegExp(regexp=r'(%(content-type)s)$' % grammar, element_exception=True)
language_code = RegExp(regexp=r'(%(language-code)s)$' % grammar)
charset = RegExp(regexp=r'(%(charset)s)$' % grammar)
charsets = RegExp(regexp=r'(%(charset)s(%(w)s,?%(w)s%(charset)s)*)$' % grammar)
character = RegExp(regexp=r'(.)$')
datetime = RegExp(regexp=r'(%(datetime)s)$' % grammar)
link_types = RegExp(regexp=r'(%(link-types)s)$' % grammar)
# Full list (but "Authors may wish to define additional link types"):
# http://www.w3.org/TR/REC-html40/types.html#h-6.12
media_descs = Sequence(regexp=r'\s*,\s*', join_string=',', rule=
RegExp(regexp=r'(%(media-desc)s)' % grammar), # Yes without $ in the end
)
style_sheet = css.full
frame_target = RegExp(regexp=r'(%(frame-target)s)$' % grammar)
script = No() | /redsolutioncms.django-trusted-html-0.2.0.zip/redsolutioncms.django-trusted-html-0.2.0/trustedhtml/rules/html/types.py | 0.682045 | 0.200695 | types.py | pypi |
"""Training Library containing training routines."""
from __future__ import absolute_import
import warnings
import numpy as np
from .core import Booster, STRING_TYPES, XGBoostError, CallbackEnv, EarlyStopException
from .compat import (SKLEARN_INSTALLED, XGBStratifiedKFold)
from . import rabit
from . import callback
def _train_internal(params, dtrain,
num_boost_round=10, evals=(),
obj=None, feval=None,
xgb_model=None, callbacks=None):
"""internal training function"""
callbacks = [] if callbacks is None else callbacks
evals = list(evals)
if isinstance(params, dict) \
and 'eval_metric' in params \
and isinstance(params['eval_metric'], list):
params = dict((k, v) for k, v in params.items())
eval_metrics = params['eval_metric']
params.pop("eval_metric", None)
params = list(params.items())
for eval_metric in eval_metrics:
params += [('eval_metric', eval_metric)]
bst = Booster(params, [dtrain] + [d[0] for d in evals])
nboost = 0
num_parallel_tree = 1
if xgb_model is not None:
if not isinstance(xgb_model, STRING_TYPES):
xgb_model = xgb_model.save_raw()
bst = Booster(params, [dtrain] + [d[0] for d in evals], model_file=xgb_model)
nboost = len(bst.get_dump())
_params = dict(params) if isinstance(params, list) else params
if 'num_parallel_tree' in _params:
num_parallel_tree = _params['num_parallel_tree']
nboost //= num_parallel_tree
if 'num_class' in _params:
nboost //= _params['num_class']
# Distributed code: Load the checkpoint from rabit.
version = bst.load_rabit_checkpoint()
assert(rabit.get_world_size() != 1 or version == 0)
rank = rabit.get_rank()
start_iteration = int(version / 2)
nboost += start_iteration
callbacks_before_iter = [
cb for cb in callbacks if cb.__dict__.get('before_iteration', False)]
callbacks_after_iter = [
cb for cb in callbacks if not cb.__dict__.get('before_iteration', False)]
for i in range(start_iteration, num_boost_round):
for cb in callbacks_before_iter:
cb(CallbackEnv(model=bst,
cvfolds=None,
iteration=i,
begin_iteration=start_iteration,
end_iteration=num_boost_round,
rank=rank,
evaluation_result_list=None))
# Distributed code: need to resume to this point.
# Skip the first update if it is a recovery step.
if version % 2 == 0:
bst.update(dtrain, i, obj)
bst.save_rabit_checkpoint()
version += 1
assert(rabit.get_world_size() == 1 or version == rabit.version_number())
nboost += 1
evaluation_result_list = []
# check evaluation result.
if len(evals) != 0:
bst_eval_set = bst.eval_set(evals, i, feval)
if isinstance(bst_eval_set, STRING_TYPES):
msg = bst_eval_set
else:
msg = bst_eval_set.decode()
res = [x.split(':') for x in msg.split()]
evaluation_result_list = [(k, float(v)) for k, v in res[1:]]
try:
for cb in callbacks_after_iter:
cb(CallbackEnv(model=bst,
cvfolds=None,
iteration=i,
begin_iteration=start_iteration,
end_iteration=num_boost_round,
rank=rank,
evaluation_result_list=evaluation_result_list))
except EarlyStopException:
break
# do checkpoint after evaluation, in case evaluation also updates booster.
bst.save_rabit_checkpoint()
version += 1
if bst.attr('best_score') is not None:
bst.best_score = float(bst.attr('best_score'))
bst.best_iteration = int(bst.attr('best_iteration'))
else:
bst.best_iteration = nboost - 1
bst.best_ntree_limit = (bst.best_iteration + 1) * num_parallel_tree
return bst
def train(params, dtrain, num_boost_round=10, evals=(), obj=None, feval=None,
maximize=False, early_stopping_rounds=None, evals_result=None,
verbose_eval=True, xgb_model=None, callbacks=None, learning_rates=None):
# pylint: disable=too-many-statements,too-many-branches, attribute-defined-outside-init
"""Train a booster with given parameters.
Parameters
----------
params : dict
Booster params.
dtrain : DMatrix
Data to be trained.
num_boost_round: int
Number of boosting iterations.
evals: list of pairs (DMatrix, string)
List of items to be evaluated during training, this allows user to watch
performance on the validation set.
obj : function
Customized objective function.
feval : function
Customized evaluation function.
maximize : bool
Whether to maximize feval.
early_stopping_rounds: int
Activates early stopping. Validation error needs to decrease at least
every <early_stopping_rounds> round(s) to continue training.
Requires at least one item in evals.
If there's more than one, will use the last.
Returns the model from the last iteration (not the best one).
If early stopping occurs, the model will have three additional fields:
bst.best_score, bst.best_iteration and bst.best_ntree_limit.
(Use bst.best_ntree_limit to get the correct value if num_parallel_tree
and/or num_class appears in the parameters)
evals_result: dict
This dictionary stores the evaluation results of all the items in watchlist.
Example: with a watchlist containing [(dtest,'eval'), (dtrain,'train')] and
a parameter containing ('eval_metric': 'logloss')
Returns: {'train': {'logloss': ['0.48253', '0.35953']},
'eval': {'logloss': ['0.480385', '0.357756']}}
verbose_eval : bool or int
Requires at least one item in evals.
If `verbose_eval` is True then the evaluation metric on the validation set is
printed at each boosting stage.
If `verbose_eval` is an integer then the evaluation metric on the validation set
is printed at every given `verbose_eval` boosting stage. The last boosting stage
/ the boosting stage found by using `early_stopping_rounds` is also printed.
Example: with verbose_eval=4 and at least one item in evals, an evaluation metric
is printed every 4 boosting stages, instead of every boosting stage.
learning_rates: list or function (deprecated - use callback API instead)
List of learning rate for each boosting round
or a customized function that calculates eta in terms of
current number of round and the total number of boosting round (e.g. yields
learning rate decay)
xgb_model : file name of stored xgb model or 'Booster' instance
Xgb model to be loaded before training (allows training continuation).
callbacks : list of callback functions
List of callback functions that are applied at end of each iteration.
It is possible to use predefined callbacks by using xgb.callback module.
Example: [xgb.callback.reset_learning_rate(custom_rates)]
Returns
-------
booster : a trained booster model
"""
callbacks = [] if callbacks is None else callbacks
# Most of legacy advanced options becomes callbacks
if isinstance(verbose_eval, bool) and verbose_eval:
callbacks.append(callback.print_evaluation())
else:
if isinstance(verbose_eval, int):
callbacks.append(callback.print_evaluation(verbose_eval))
if early_stopping_rounds is not None:
callbacks.append(callback.early_stop(early_stopping_rounds,
maximize=maximize,
verbose=bool(verbose_eval)))
if evals_result is not None:
callbacks.append(callback.record_evaluation(evals_result))
if learning_rates is not None:
warnings.warn("learning_rates parameter is deprecated - use callback API instead",
DeprecationWarning)
callbacks.append(callback.reset_learning_rate(learning_rates))
return _train_internal(params, dtrain,
num_boost_round=num_boost_round,
evals=evals,
obj=obj, feval=feval,
xgb_model=xgb_model, callbacks=callbacks)
class CVPack(object):
""""Auxiliary datastruct to hold one fold of CV."""
def __init__(self, dtrain, dtest, param):
""""Initialize the CVPack"""
self.dtrain = dtrain
self.dtest = dtest
self.watchlist = [(dtrain, 'train'), (dtest, 'test')]
self.bst = Booster(param, [dtrain, dtest])
def update(self, iteration, fobj):
""""Update the boosters for one iteration"""
self.bst.update(self.dtrain, iteration, fobj)
def eval(self, iteration, feval):
""""Evaluate the CVPack for one iteration."""
return self.bst.eval_set(self.watchlist, iteration, feval)
def mknfold(dall, nfold, param, seed, evals=(), fpreproc=None, stratified=False,
folds=None, shuffle=True):
"""
Make an n-fold list of CVPack from random indices.
"""
evals = list(evals)
np.random.seed(seed)
if stratified is False and folds is None:
if shuffle is True:
idx = np.random.permutation(dall.num_row())
else:
idx = np.arange(dall.num_row())
idset = np.array_split(idx, nfold)
elif folds is not None and isinstance(folds, list):
idset = [x[1] for x in folds]
nfold = len(idset)
else:
sfk = XGBStratifiedKFold(n_splits=nfold, shuffle=True, random_state=seed)
idset = [x[1] for x in sfk.split(X=dall.get_label(), y=dall.get_label())]
ret = []
for k in range(nfold):
dtrain = dall.slice(np.concatenate([idset[i] for i in range(nfold) if k != i]))
dtest = dall.slice(idset[k])
# run preprocessing on the data set if needed
if fpreproc is not None:
dtrain, dtest, tparam = fpreproc(dtrain, dtest, param.copy())
else:
tparam = param
plst = list(tparam.items()) + [('eval_metric', itm) for itm in evals]
ret.append(CVPack(dtrain, dtest, plst))
return ret
def aggcv(rlist):
# pylint: disable=invalid-name
"""
Aggregate cross-validation results.
If verbose_eval is true, progress is displayed in every call. If
verbose_eval is an integer, progress will only be displayed every
`verbose_eval` trees, tracked via trial.
"""
cvmap = {}
idx = rlist[0].split()[0]
for line in rlist:
arr = line.split()
assert idx == arr[0]
for it in arr[1:]:
if not isinstance(it, STRING_TYPES):
it = it.decode()
k, v = it.split(':')
if k not in cvmap:
cvmap[k] = []
cvmap[k].append(float(v))
msg = idx
results = []
for k, v in sorted(cvmap.items(), key=lambda x: (x[0].startswith('test'), x[0])):
v = np.array(v)
if not isinstance(msg, STRING_TYPES):
msg = msg.decode()
mean, std = np.mean(v), np.std(v)
results.extend([(k, mean, std)])
return results
def cv(params, dtrain, num_boost_round=10, nfold=3, stratified=False, folds=None,
metrics=(), obj=None, feval=None, maximize=False, early_stopping_rounds=None,
fpreproc=None, as_pandas=True, verbose_eval=None, show_stdv=True,
seed=0, callbacks=None, shuffle=True):
# pylint: disable = invalid-name
"""Cross-validation with given parameters.
Parameters
----------
params : dict
Booster params.
dtrain : DMatrix
Data to be trained.
num_boost_round : int
Number of boosting iterations.
nfold : int
Number of folds in CV.
stratified : bool
Perform stratified sampling.
folds : a KFold or StratifiedKFold instance
Sklearn KFolds or StratifiedKFolds.
metrics : string or list of strings
Evaluation metrics to be watched in CV.
obj : function
Custom objective function.
feval : function
Custom evaluation function.
maximize : bool
Whether to maximize feval.
early_stopping_rounds: int
Activates early stopping. CV error needs to decrease at least
every <early_stopping_rounds> round(s) to continue.
Last entry in evaluation history is the one from best iteration.
fpreproc : function
Preprocessing function that takes (dtrain, dtest, param) and returns
transformed versions of those.
as_pandas : bool, default True
Return pd.DataFrame when pandas is installed.
If False or pandas is not installed, return np.ndarray
verbose_eval : bool, int, or None, default None
Whether to display the progress. If None, progress will be displayed
when np.ndarray is returned. If True, progress will be displayed at
boosting stage. If an integer is given, progress will be displayed
at every given `verbose_eval` boosting stage.
show_stdv : bool, default True
Whether to display the standard deviation in progress.
Results are not affected, and always contains std.
seed : int
Seed used to generate the folds (passed to numpy.random.seed).
callbacks : list of callback functions
List of callback functions that are applied at end of each iteration.
It is possible to use predefined callbacks by using xgb.callback module.
Example: [xgb.callback.reset_learning_rate(custom_rates)]
shuffle : bool
Shuffle data before creating folds.
Returns
-------
evaluation history : list(string)
"""
if stratified is True and not SKLEARN_INSTALLED:
raise XGBoostError('sklearn needs to be installed in order to use stratified cv')
if isinstance(metrics, str):
metrics = [metrics]
if isinstance(params, list):
_metrics = [x[1] for x in params if x[0] == 'eval_metric']
params = dict(params)
if 'eval_metric' in params:
params['eval_metric'] = _metrics
else:
params = dict((k, v) for k, v in params.items())
if len(metrics) == 0 and 'eval_metric' in params:
if isinstance(params['eval_metric'], list):
metrics = params['eval_metric']
else:
metrics = [params['eval_metric']]
params.pop("eval_metric", None)
results = {}
cvfolds = mknfold(dtrain, nfold, params, seed, metrics, fpreproc,
stratified, folds, shuffle)
# setup callbacks
callbacks = [] if callbacks is None else callbacks
if early_stopping_rounds is not None:
callbacks.append(callback.early_stop(early_stopping_rounds,
maximize=maximize,
verbose=False))
if isinstance(verbose_eval, bool) and verbose_eval:
callbacks.append(callback.print_evaluation(show_stdv=show_stdv))
else:
if isinstance(verbose_eval, int):
callbacks.append(callback.print_evaluation(verbose_eval, show_stdv=show_stdv))
callbacks_before_iter = [
cb for cb in callbacks if cb.__dict__.get('before_iteration', False)]
callbacks_after_iter = [
cb for cb in callbacks if not cb.__dict__.get('before_iteration', False)]
for i in range(num_boost_round):
for cb in callbacks_before_iter:
cb(CallbackEnv(model=None,
cvfolds=cvfolds,
iteration=i,
begin_iteration=0,
end_iteration=num_boost_round,
rank=0,
evaluation_result_list=None))
for fold in cvfolds:
fold.update(i, obj)
res = aggcv([f.eval(i, feval) for f in cvfolds])
for key, mean, std in res:
if key + '-mean' not in results:
results[key + '-mean'] = []
if key + '-std' not in results:
results[key + '-std'] = []
results[key + '-mean'].append(mean)
results[key + '-std'].append(std)
try:
for cb in callbacks_after_iter:
cb(CallbackEnv(model=None,
cvfolds=cvfolds,
iteration=i,
begin_iteration=0,
end_iteration=num_boost_round,
rank=0,
evaluation_result_list=res))
except EarlyStopException as e:
for k in results.keys():
results[k] = results[k][:(e.best_iteration + 1)]
break
if as_pandas:
try:
import pandas as pd
results = pd.DataFrame.from_dict(results)
except ImportError:
pass
return results | /redspark-xgboost-0.72.3.tar.gz/redspark-xgboost-0.72.3/xgboost/training.py | 0.884227 | 0.302855 | training.py | pypi |
"""Distributed XGBoost Rabit related API."""
from __future__ import absolute_import
import sys
import ctypes
import numpy as np
from .core import _LIB, c_str, STRING_TYPES
from .compat import pickle
def _init_rabit():
"""internal library initializer."""
if _LIB is not None:
_LIB.RabitGetRank.restype = ctypes.c_int
_LIB.RabitGetWorldSize.restype = ctypes.c_int
_LIB.RabitIsDistributed.restype = ctypes.c_int
_LIB.RabitVersionNumber.restype = ctypes.c_int
def init(args=None):
"""Initialize the rabit library with arguments"""
if args is None:
args = []
arr = (ctypes.c_char_p * len(args))()
arr[:] = args
_LIB.RabitInit(len(arr), arr)
def finalize():
"""Finalize the process, notify tracker everything is done."""
_LIB.RabitFinalize()
def get_rank():
"""Get rank of current process.
Returns
-------
rank : int
Rank of current process.
"""
ret = _LIB.RabitGetRank()
return ret
def get_world_size():
"""Get total number workers.
Returns
-------
n : int
Total number of process.
"""
ret = _LIB.RabitGetWorldSize()
return ret
def tracker_print(msg):
"""Print message to the tracker.
This function can be used to communicate the information of
the progress to the tracker
Parameters
----------
msg : str
The message to be printed to tracker.
"""
if not isinstance(msg, STRING_TYPES):
msg = str(msg)
is_dist = _LIB.RabitIsDistributed()
if is_dist != 0:
_LIB.RabitTrackerPrint(c_str(msg))
else:
sys.stdout.write(msg)
sys.stdout.flush()
def get_processor_name():
"""Get the processor name.
Returns
-------
name : str
the name of processor(host)
"""
mxlen = 256
length = ctypes.c_ulong()
buf = ctypes.create_string_buffer(mxlen)
_LIB.RabitGetProcessorName(buf, ctypes.byref(length), mxlen)
return buf.value
def broadcast(data, root):
"""Broadcast object from one node to all other nodes.
Parameters
----------
data : any type that can be pickled
Input data, if current rank does not equal root, this can be None
root : int
Rank of the node to broadcast data from.
Returns
-------
object : int
the result of broadcast.
"""
rank = get_rank()
length = ctypes.c_ulong()
if root == rank:
assert data is not None, 'need to pass in data when broadcasting'
s = pickle.dumps(data, protocol=pickle.HIGHEST_PROTOCOL)
length.value = len(s)
# run first broadcast
_LIB.RabitBroadcast(ctypes.byref(length),
ctypes.sizeof(ctypes.c_ulong), root)
if root != rank:
dptr = (ctypes.c_char * length.value)()
# run second
_LIB.RabitBroadcast(ctypes.cast(dptr, ctypes.c_void_p),
length.value, root)
data = pickle.loads(dptr.raw)
del dptr
else:
_LIB.RabitBroadcast(ctypes.cast(ctypes.c_char_p(s), ctypes.c_void_p),
length.value, root)
del s
return data
# enumeration of dtypes
DTYPE_ENUM__ = {
np.dtype('int8'): 0,
np.dtype('uint8'): 1,
np.dtype('int32'): 2,
np.dtype('uint32'): 3,
np.dtype('int64'): 4,
np.dtype('uint64'): 5,
np.dtype('float32'): 6,
np.dtype('float64'): 7
}
def allreduce(data, op, prepare_fun=None):
"""Perform allreduce, return the result.
Parameters
----------
data: numpy array
Input data.
op: int
Reduction operators, can be MIN, MAX, SUM, BITOR
prepare_fun: function
Lazy preprocessing function, if it is not None, prepare_fun(data)
will be called by the function before performing allreduce, to initialize the data
If the result of Allreduce can be recovered directly,
then prepare_fun will NOT be called
Returns
-------
result : array_like
The result of allreduce, have same shape as data
Notes
-----
This function is not thread-safe.
"""
if not isinstance(data, np.ndarray):
raise Exception('allreduce only takes in numpy.ndarray')
buf = data.ravel()
if buf.base is data.base:
buf = buf.copy()
if buf.dtype not in DTYPE_ENUM__:
raise Exception('data type %s not supported' % str(buf.dtype))
if prepare_fun is None:
_LIB.RabitAllreduce(buf.ctypes.data_as(ctypes.c_void_p),
buf.size, DTYPE_ENUM__[buf.dtype],
op, None, None)
else:
func_ptr = ctypes.CFUNCTYPE(None, ctypes.c_void_p)
def pfunc(_):
"""prepare function."""
prepare_fun(data)
_LIB.RabitAllreduce(buf.ctypes.data_as(ctypes.c_void_p),
buf.size, DTYPE_ENUM__[buf.dtype],
op, func_ptr(pfunc), None)
return buf
def version_number():
"""Returns version number of current stored model.
This means how many calls to CheckPoint we made so far.
Returns
-------
version : int
Version number of currently stored model
"""
ret = _LIB.RabitVersionNumber()
return ret
# intialization script
_init_rabit() | /redspark-xgboost-0.72.3.tar.gz/redspark-xgboost-0.72.3/xgboost/rabit.py | 0.710929 | 0.191971 | rabit.py | pypi |
"""Plotting Library."""
from __future__ import absolute_import
import re
from io import BytesIO
import numpy as np
from .core import Booster
from .sklearn import XGBModel
def plot_importance(booster, ax=None, height=0.2,
xlim=None, ylim=None, title='Feature importance',
xlabel='F score', ylabel='Features',
importance_type='weight', max_num_features=None,
grid=True, show_values=True, **kwargs):
"""Plot importance based on fitted trees.
Parameters
----------
booster : Booster, XGBModel or dict
Booster or XGBModel instance, or dict taken by Booster.get_fscore()
ax : matplotlib Axes, default None
Target axes instance. If None, new figure and axes will be created.
grid : bool, Turn the axes grids on or off. Default is True (On).
importance_type : str, default "weight"
How the importance is calculated: either "weight", "gain", or "cover"
"weight" is the number of times a feature appears in a tree
"gain" is the average gain of splits which use the feature
"cover" is the average coverage of splits which use the feature
where coverage is defined as the number of samples affected by the split
max_num_features : int, default None
Maximum number of top features displayed on plot. If None, all features will be displayed.
height : float, default 0.2
Bar height, passed to ax.barh()
xlim : tuple, default None
Tuple passed to axes.xlim()
ylim : tuple, default None
Tuple passed to axes.ylim()
title : str, default "Feature importance"
Axes title. To disable, pass None.
xlabel : str, default "F score"
X axis title label. To disable, pass None.
ylabel : str, default "Features"
Y axis title label. To disable, pass None.
show_values : bool, default True
Show values on plot. To disable, pass False.
kwargs :
Other keywords passed to ax.barh()
Returns
-------
ax : matplotlib Axes
"""
# TODO: move this to compat.py
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError('You must install matplotlib to plot importance')
if isinstance(booster, XGBModel):
importance = booster.get_booster().get_score(importance_type=importance_type)
elif isinstance(booster, Booster):
importance = booster.get_score(importance_type=importance_type)
elif isinstance(booster, dict):
importance = booster
else:
raise ValueError('tree must be Booster, XGBModel or dict instance')
if len(importance) == 0:
raise ValueError('Booster.get_score() results in empty')
tuples = [(k, importance[k]) for k in importance]
if max_num_features is not None:
tuples = sorted(tuples, key=lambda x: x[1])[-max_num_features:]
else:
tuples = sorted(tuples, key=lambda x: x[1])
labels, values = zip(*tuples)
if ax is None:
_, ax = plt.subplots(1, 1)
ylocs = np.arange(len(values))
ax.barh(ylocs, values, align='center', height=height, **kwargs)
if show_values is True:
for x, y in zip(values, ylocs):
ax.text(x + 1, y, x, va='center')
ax.set_yticks(ylocs)
ax.set_yticklabels(labels)
if xlim is not None:
if not isinstance(xlim, tuple) or len(xlim) != 2:
raise ValueError('xlim must be a tuple of 2 elements')
else:
xlim = (0, max(values) * 1.1)
ax.set_xlim(xlim)
if ylim is not None:
if not isinstance(ylim, tuple) or len(ylim) != 2:
raise ValueError('ylim must be a tuple of 2 elements')
else:
ylim = (-1, len(values))
ax.set_ylim(ylim)
if title is not None:
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
ax.grid(grid)
return ax
_NODEPAT = re.compile(r'(\d+):\[(.+)\]')
_LEAFPAT = re.compile(r'(\d+):(leaf=.+)')
_EDGEPAT = re.compile(r'yes=(\d+),no=(\d+),missing=(\d+)')
_EDGEPAT2 = re.compile(r'yes=(\d+),no=(\d+)')
def _parse_node(graph, text):
"""parse dumped node"""
match = _NODEPAT.match(text)
if match is not None:
node = match.group(1)
graph.node(node, label=match.group(2), shape='circle')
return node
match = _LEAFPAT.match(text)
if match is not None:
node = match.group(1)
graph.node(node, label=match.group(2), shape='box')
return node
raise ValueError('Unable to parse node: {0}'.format(text))
def _parse_edge(graph, node, text, yes_color='#0000FF', no_color='#FF0000'):
"""parse dumped edge"""
try:
match = _EDGEPAT.match(text)
if match is not None:
yes, no, missing = match.groups()
if yes == missing:
graph.edge(node, yes, label='yes, missing', color=yes_color)
graph.edge(node, no, label='no', color=no_color)
else:
graph.edge(node, yes, label='yes', color=yes_color)
graph.edge(node, no, label='no, missing', color=no_color)
return
except ValueError:
pass
match = _EDGEPAT2.match(text)
if match is not None:
yes, no = match.groups()
graph.edge(node, yes, label='yes', color=yes_color)
graph.edge(node, no, label='no', color=no_color)
return
raise ValueError('Unable to parse edge: {0}'.format(text))
def to_graphviz(booster, fmap='', num_trees=0, rankdir='UT',
yes_color='#0000FF', no_color='#FF0000', **kwargs):
"""Convert specified tree to graphviz instance. IPython can automatically plot the
returned graphiz instance. Otherwise, you should call .render() method
of the returned graphiz instance.
Parameters
----------
booster : Booster, XGBModel
Booster or XGBModel instance
fmap: str (optional)
The name of feature map file
num_trees : int, default 0
Specify the ordinal number of target tree
rankdir : str, default "UT"
Passed to graphiz via graph_attr
yes_color : str, default '#0000FF'
Edge color when meets the node condition.
no_color : str, default '#FF0000'
Edge color when doesn't meet the node condition.
kwargs :
Other keywords passed to graphviz graph_attr
Returns
-------
ax : matplotlib Axes
"""
try:
from graphviz import Digraph
except ImportError:
raise ImportError('You must install graphviz to plot tree')
if not isinstance(booster, (Booster, XGBModel)):
raise ValueError('booster must be Booster or XGBModel instance')
if isinstance(booster, XGBModel):
booster = booster.get_booster()
tree = booster.get_dump(fmap=fmap)[num_trees]
tree = tree.split()
kwargs = kwargs.copy()
kwargs.update({'rankdir': rankdir})
graph = Digraph(graph_attr=kwargs)
for i, text in enumerate(tree):
if text[0].isdigit():
node = _parse_node(graph, text)
else:
if i == 0:
# 1st string must be node
raise ValueError('Unable to parse given string as tree')
_parse_edge(graph, node, text, yes_color=yes_color,
no_color=no_color)
return graph
def plot_tree(booster, fmap='', num_trees=0, rankdir='UT', ax=None, **kwargs):
"""Plot specified tree.
Parameters
----------
booster : Booster, XGBModel
Booster or XGBModel instance
fmap: str (optional)
The name of feature map file
num_trees : int, default 0
Specify the ordinal number of target tree
rankdir : str, default "UT"
Passed to graphiz via graph_attr
ax : matplotlib Axes, default None
Target axes instance. If None, new figure and axes will be created.
kwargs :
Other keywords passed to to_graphviz
Returns
-------
ax : matplotlib Axes
"""
try:
import matplotlib.pyplot as plt
import matplotlib.image as image
except ImportError:
raise ImportError('You must install matplotlib to plot tree')
if ax is None:
_, ax = plt.subplots(1, 1)
g = to_graphviz(booster, fmap=fmap, num_trees=num_trees, rankdir=rankdir, **kwargs)
s = BytesIO()
s.write(g.pipe(format='png'))
s.seek(0)
img = image.imread(s)
ax.imshow(img)
ax.axis('off')
return ax | /redspark-xgboost-0.72.3.tar.gz/redspark-xgboost-0.72.3/xgboost/plotting.py | 0.83602 | 0.51251 | plotting.py | pypi |
"""Training Library containing training routines."""
from __future__ import absolute_import
from . import rabit
from .core import EarlyStopException
def _get_callback_context(env):
"""return whether the current callback context is cv or train"""
if env.model is not None and env.cvfolds is None:
context = 'train'
elif env.model is None and env.cvfolds is not None:
context = 'cv'
return context
def _fmt_metric(value, show_stdv=True):
"""format metric string"""
if len(value) == 2:
return '%s:%g' % (value[0], value[1])
elif len(value) == 3:
if show_stdv:
return '%s:%g+%g' % (value[0], value[1], value[2])
else:
return '%s:%g' % (value[0], value[1])
else:
raise ValueError("wrong metric value")
def print_evaluation(period=1, show_stdv=True):
"""Create a callback that print evaluation result.
We print the evaluation results every ``period`` iterations
and on the first and the last iterations.
Parameters
----------
period : int
The period to log the evaluation results
show_stdv : bool, optional
Whether show stdv if provided
Returns
-------
callback : function
A callback that print evaluation every period iterations.
"""
def callback(env):
"""internal function"""
if env.rank != 0 or len(env.evaluation_result_list) == 0 or period is False or period == 0:
return
i = env.iteration
if (i % period == 0 or i + 1 == env.begin_iteration or i + 1 == env.end_iteration):
msg = '\t'.join([_fmt_metric(x, show_stdv) for x in env.evaluation_result_list])
rabit.tracker_print('[%d]\t%s\n' % (i, msg))
return callback
def record_evaluation(eval_result):
"""Create a call back that records the evaluation history into eval_result.
Parameters
----------
eval_result : dict
A dictionary to store the evaluation results.
Returns
-------
callback : function
The requested callback function.
"""
if not isinstance(eval_result, dict):
raise TypeError('eval_result has to be a dictionary')
eval_result.clear()
def init(env):
"""internal function"""
for k, _ in env.evaluation_result_list:
pos = k.index('-')
key = k[:pos]
metric = k[pos + 1:]
if key not in eval_result:
eval_result[key] = {}
if metric not in eval_result[key]:
eval_result[key][metric] = []
def callback(env):
"""internal function"""
if len(eval_result) == 0:
init(env)
for k, v in env.evaluation_result_list:
pos = k.index('-')
key = k[:pos]
metric = k[pos + 1:]
eval_result[key][metric].append(v)
return callback
def reset_learning_rate(learning_rates):
"""Reset learning rate after iteration 1
NOTE: the initial learning rate will still take in-effect on first iteration.
Parameters
----------
learning_rates: list or function
List of learning rate for each boosting round
or a customized function that calculates eta in terms of
current number of round and the total number of boosting round (e.g. yields
learning rate decay)
- list l: eta = l[boosting_round]
- function f: eta = f(boosting_round, num_boost_round)
Returns
-------
callback : function
The requested callback function.
"""
def get_learning_rate(i, n, learning_rates):
"""helper providing the learning rate"""
if isinstance(learning_rates, list):
if len(learning_rates) != n:
raise ValueError("Length of list 'learning_rates' has to equal 'num_boost_round'.")
new_learning_rate = learning_rates[i]
else:
new_learning_rate = learning_rates(i, n)
return new_learning_rate
def callback(env):
"""internal function"""
context = _get_callback_context(env)
if context == 'train':
bst, i, n = env.model, env.iteration, env.end_iteration
bst.set_param('learning_rate', get_learning_rate(i, n, learning_rates))
elif context == 'cv':
i, n = env.iteration, env.end_iteration
for cvpack in env.cvfolds:
bst = cvpack.bst
bst.set_param('learning_rate', get_learning_rate(i, n, learning_rates))
callback.before_iteration = True
return callback
def early_stop(stopping_rounds, maximize=False, verbose=True):
"""Create a callback that activates early stoppping.
Validation error needs to decrease at least
every <stopping_rounds> round(s) to continue training.
Requires at least one item in evals.
If there's more than one, will use the last.
Returns the model from the last iteration (not the best one).
If early stopping occurs, the model will have three additional fields:
bst.best_score, bst.best_iteration and bst.best_ntree_limit.
(Use bst.best_ntree_limit to get the correct value if num_parallel_tree
and/or num_class appears in the parameters)
Parameters
----------
stopp_rounds : int
The stopping rounds before the trend occur.
maximize : bool
Whether to maximize evaluation metric.
verbose : optional, bool
Whether to print message about early stopping information.
Returns
-------
callback : function
The requested callback function.
"""
state = {}
def init(env):
"""internal function"""
bst = env.model
if len(env.evaluation_result_list) == 0:
raise ValueError('For early stopping you need at least one set in evals.')
if len(env.evaluation_result_list) > 1 and verbose:
msg = ("Multiple eval metrics have been passed: "
"'{0}' will be used for early stopping.\n\n")
rabit.tracker_print(msg.format(env.evaluation_result_list[-1][0]))
maximize_metrics = ('auc', 'map', 'ndcg')
maximize_at_n_metrics = ('auc@', 'map@', 'ndcg@')
maximize_score = maximize
metric = env.evaluation_result_list[-1][0]
if any(env.evaluation_result_list[-1][0].split('-')[-1].startswith(x)
for x in maximize_at_n_metrics):
maximize_score = True
if any(env.evaluation_result_list[-1][0].split('-')[-1].split(":")[0] == x
for x in maximize_metrics):
maximize_score = True
if verbose and env.rank == 0:
msg = "Will train until {} hasn't improved in {} rounds.\n"
rabit.tracker_print(msg.format(metric, stopping_rounds))
state['maximize_score'] = maximize_score
state['best_iteration'] = 0
if maximize_score:
state['best_score'] = float('-inf')
else:
state['best_score'] = float('inf')
if bst is not None:
if bst.attr('best_score') is not None:
state['best_score'] = float(bst.attr('best_score'))
state['best_iteration'] = int(bst.attr('best_iteration'))
state['best_msg'] = bst.attr('best_msg')
else:
bst.set_attr(best_iteration=str(state['best_iteration']))
bst.set_attr(best_score=str(state['best_score']))
else:
assert env.cvfolds is not None
def callback(env):
"""internal function"""
score = env.evaluation_result_list[-1][1]
if len(state) == 0:
init(env)
best_score = state['best_score']
best_iteration = state['best_iteration']
maximize_score = state['maximize_score']
if (maximize_score and score > best_score) or \
(not maximize_score and score < best_score):
msg = '[%d]\t%s' % (
env.iteration,
'\t'.join([_fmt_metric(x) for x in env.evaluation_result_list]))
state['best_msg'] = msg
state['best_score'] = score
state['best_iteration'] = env.iteration
# save the property to attributes, so they will occur in checkpoint.
if env.model is not None:
env.model.set_attr(best_score=str(state['best_score']),
best_iteration=str(state['best_iteration']),
best_msg=state['best_msg'])
elif env.iteration - best_iteration >= stopping_rounds:
best_msg = state['best_msg']
if verbose and env.rank == 0:
msg = "Stopping. Best iteration:\n{}\n\n"
rabit.tracker_print(msg.format(best_msg))
raise EarlyStopException(best_iteration)
return callback | /redspark-xgboost-0.72.3.tar.gz/redspark-xgboost-0.72.3/xgboost/callback.py | 0.945939 | 0.438184 | callback.py | pypi |
Parameter Structure for Machine Learning
========================================
One of the most important ingredients of machine learning projects are the parameters.
Parameters act as a way of communication between users and the library. In this article, we will introduce the parameter module of DMLC, a lightweight C++ module that is designed to support
general machine learning libraries. It comes with the following nice properties:
- Easy declaration of typed fields, default values and constraints.
- Auto checking of constraints and throw exceptions when constraint is not met.
- Auto generation of human readable docstrings on parameters.
- Serialization and de-serialization into JSON and ```std::map<std::string, std::string>```.
Use Parameter Module
--------------------
### Declare the Parameter
In the dmlc parameter module, every parameter can be declared as a structure.
This means you can easily access these fields as they normally are efficiently.
For example, it is very common to write
```c++
weight -= param.learning_rate * gradient;
```
The only difference between a normal structure is that we will need to declare
all the fields, as well as their default value and constraints.
The following code gives an example of declaring parameter structure ```MyParam```.
```c++
#include <dmlc/parameter.h>
// declare the parameter, normally put it in header file.
struct MyParam : public dmlc::Parameter<MyParam> {
float learning_rate;
int num_hidden;
int activation;
std::string name;
// declare parameters
DMLC_DECLARE_PARAMETER(MyParam) {
DMLC_DECLARE_FIELD(num_hidden).set_range(0, 1000)
.describe("Number of hidden unit in the fully connected layer.");
DMLC_DECLARE_FIELD(learning_rate).set_default(0.01f)
.describe("Learning rate of SGD optimization.");
DMLC_DECLARE_FIELD(activation).add_enum("relu", 1).add_enum("sigmoid", 2)
.describe("Activation function type.");
DMLC_DECLARE_FIELD(name).set_default("layer")
.describe("Name of the net.");
}
};
// register the parameter, this is normally in a cc file.
DMLC_REGISTER_PARAMETER(MyParam);
```
We can find that the only difference is the lines after ```DMLC_DECLARE_PARAMETER(MyParam)```,
where all the fields are declared. In this example, we have declared parameters of ```float,int,string``` types.
Here are some highlights in this example:
- For the numeric parameters, it is possible to set a range constraints via ```.set_range(begin, end)```.
- It is possible to define enumeration types, in this case activation.
User is only allowed to set ```sigmoid``` or ```relu``` into the activation field, and they will be mapped into 1 and 2 separately.
- The ```describe``` function adds a description on the field, which is used to generate human readable docstring.
### Set the Parameters
After we declared the parameters, we can declare this structure as normal structure.
Except that the ```MyParam``` structure now comes with a few member functions
to make parameter manipulation easy.
To set the parameters from external data source, we can use the ```Init``` function.
```c++
int main() {
MyParam param;
std::vector<std::pair<std::string, std::string> > param_data = {
{"num_hidden", "100"},
{"activation", "relu"},
{"name", "myname"}
};
// set the parameters
param.Init(param_data);
return 0;
}
```
After the ```Init``` function is called, the ```param``` will be filled with the specified key values in ```param_data```.
More importantly, the ```Init``` function will do automatic checking of parameter range and throw an ```dmlc::ParamError```
with detailed error message if things went wrong.
### Generate Human Readable Docstrings
Another useful feature of the parameter module is to get an human readable docstring of the parameter.
This is helpful when we are creating language binding such as python and R, and we can use it to generate docstring of
foreign language interface.
The following code obtains the dostring of ```MyParam```.
```c++
std::string docstring = MyParam::__DOC__();
```
We also provide a more structured way to access the detail of the fields(name, default value, detailed description) via
```c++
std::vector<dmlc::ParamFieldInfo> fields = MyParam::__FIELDS__();
```
### Serialization of Parameters
One of the most common way to serialize the parameter is to convert it back to representation of ```std::map<string, string>```
by using the following code.
```c++
std::map<string, string> dict = param.__DICT__();
```
The ```std::map<string, string>``` can further be serialized easily. This way of serialization is more device and platform(32/64 bit) agnostic.
However, this is not very compact, and recommended only used to serialize the general parameters set by the user.
Direct serialization and loading of JSON format is also support.
### Play with an Example
We provide an example program [parameter.cc](https://github.com/dmlc/dmlc-core/blob/master/example/parameter.cc), to
demonstrate the usage mentioned above, and allow you to play with it and get sense of what is going on.
How does it work
----------------
Hope you like the parameter module so far. In this section, we will explain how does it work. Making such parameter module
in ```C++``` is not easy. Because this basically means some way of reflection -- getting the information of fields in a
structure out, which is not supported by ```C++```.
Consider the following program, how do the Init function know the location of ```num_hidden```, and set it correctly
in ```Init``` function?
```c++
#include <vector>
#include <string>
#include <dmlc/parameter.h>
// declare the parameter, normally put it in header file.
struct MyParam : public dmlc::Parameter<MyParam> {
float learning_rate;
int num_hidden;
// declare parameters
DMLC_DECLARE_PARAMETER(MyParam) {
DMLC_DECLARE_FIELD(num_hidden);
DMLC_DECLARE_FIELD(learning_rate).set_default(0.01f);
}
};
// register the parameter, this is normally in a cc file.
DMLC_REGISTER_PARAMETER(MyParam);
int main(int argc, char *argv[]) {
MyParam param;
std::vector<std::pair<std::string, std::string> > param_data = {
{"num_hidden", "100"},
};
param.Init(param_data);
return 0;
}
```
The secret lies in the function ```DMLC_DECLARE_PARAMETER(MyParam)```, this is a macro defined in the parameter module.
If we expand the macro, the code roughly becomes the following code.
```c++
struct Parameter<MyParam> {
template<typename ValueType>
inline FieldEntry<ValueType>&
DECLARE(ParamManagerSingleton<MyParam> *manager,
const std::string& key,
ValueType& ref){
// offset gives a generic way to access the address of the field
// from beginning of the structure.
size_t offset = ((char*)&ref - (char*)this);
parameter::FieldEntry<ValueType> *e =
new parameter::FieldEntry<ValueType>(key, offset);
manager->AddEntry(key, e);
return *e;
}
};
struct MyParam : public dmlc::Parameter<MyParam> {
float learning_rate;
int num_hidden;
// declare parameters
inline void __DECLARE__(ParamManagerSingleton<MyParam> *manager) {
this->DECLARE(manager, "num_hidden", num_hidden);
this->DECLARE(manager, "learning_rate", learning_rate).set_default(0.01f);
}
};
// This code is only used to show the general idea.
// This code will only run once, the real code is done via singleton declaration pattern.
{
static ParamManagerSingleton<MyParam> manager;
MyParam tmp;
tmp->__DECLARE__(&manager);
}
```
This is not the actual code that runs, but generally shows the idea on how it works.
The key is that the structure layout is fixed for all the instances of objects.
To figure out how to access each of the field, we can
- Create an instance of MyParam, call the ```__DECLARE__``` function.
- The relative position of the field against the head of the structure is recorded into a global singleton.
- When we call ```Init```, we can get the ```offset``` from the singleton, and access the address of the field via ```(ValueType*)((char*)this + offset)```.
You are welcomed to check out the real details in [dmlc/parameter.h](https://github.com/dmlc/dmlc-core/blob/master/include/dmlc/parameter.h).
By using the generic template programming in C++, we have created a simple and useful parameter module for machine learning libraries.
This module is used extensively by DMLC projects. Hope you will find it useful as well :).
| /redspark-xgboost-0.72.3.tar.gz/redspark-xgboost-0.72.3/xgboost/dmlc-core/doc/parameter.md | 0.884533 | 0.976468 | parameter.md | pypi |
Tutorial
========
This is rabit's tutorial, a ***Reliable Allreduce and Broadcast Interface***.
All the example codes are in the [guide](https://github.com/dmlc/rabit/blob/master/guide/) folder of the project.
To run the examples locally, you will need to build them with ```make```.
**List of Topics**
* [What is Allreduce](#what-is-allreduce)
* [Common Use Case](#common-use-case)
* [Use Rabit API](#use-rabit-api)
- [Structure of a Rabit Program](#structure-of-a-rabit-program)
- [Allreduce and Lazy Preparation](#allreduce-and-lazy-preparation)
- [Checkpoint and LazyCheckpoint](#checkpoint-and-lazycheckpoint)
* [Compile Programs with Rabit](#compile-programs-with-rabit)
* [Running Rabit Jobs](#running-rabit-jobs)
* [Fault Tolerance](#fault-tolerance)
What is Allreduce
-----------------
The main methods provided by rabit are Allreduce and Broadcast. Allreduce performs reduction across different computation nodes,
and returns the result to every node. To understand the behavior of the function, consider the following example in [basic.cc](../guide/basic.cc) (there is a python example right after this if you are more familiar with python).
```c++
#include <rabit.h>
using namespace rabit;
const int N = 3;
int main(int argc, char *argv[]) {
int a[N];
rabit::Init(argc, argv);
for (int i = 0; i < N; ++i) {
a[i] = rabit::GetRank() + i;
}
printf("@node[%d] before-allreduce: a={%d, %d, %d}\n",
rabit::GetRank(), a[0], a[1], a[2]);
// allreduce take max of each elements in all processes
Allreduce<op::Max>(&a[0], N);
printf("@node[%d] after-allreduce-max: a={%d, %d, %d}\n",
rabit::GetRank(), a[0], a[1], a[2]);
// second allreduce that sums everything up
Allreduce<op::Sum>(&a[0], N);
printf("@node[%d] after-allreduce-sum: a={%d, %d, %d}\n",
rabit::GetRank(), a[0], a[1], a[2]);
rabit::Finalize();
return 0;
}
```
You can run the example using the rabit_demo.py script. The following command
starts the rabit program with two worker processes.
```bash
../tracker/rabit_demo.py -n 2 basic.rabit
```
This will start two processes, one process with rank 0 and the other with rank 1, both processes run the same code.
The ```rabit::GetRank()``` function returns the rank of current process.
Before the call to Allreduce, process 0 contains the array ```a = {0, 1, 2}```, while process 1 has the array
```a = {1, 2, 3}```. After the call to Allreduce, the array contents in all processes are replaced by the
reduction result (in this case, the maximum value in each position across all the processes). So, after the
Allreduce call, the result will become ```a = {1, 2, 3}```.
Rabit provides different reduction operators, for example, if you change ```op::Max``` to ```op::Sum```,
the reduction operation will be a summation, and the result will become ```a = {1, 3, 5}```.
You can also run the example with different processes by setting -n to different values.
If you are more familiar with python, you can also use rabit in python. The same example as before can be found in [basic.py](../guide/basic.py):
```python
import numpy as np
import rabit
rabit.init()
n = 3
rank = rabit.get_rank()
a = np.zeros(n)
for i in xrange(n):
a[i] = rank + i
print '@node[%d] before-allreduce: a=%s' % (rank, str(a))
a = rabit.allreduce(a, rabit.MAX)
print '@node[%d] after-allreduce-max: a=%s' % (rank, str(a))
a = rabit.allreduce(a, rabit.SUM)
print '@node[%d] after-allreduce-sum: a=%s' % (rank, str(a))
rabit.finalize()
```
You can run the program using the following command
```bash
../tracker/rabit_demo.py -n 2 basic.py
```
Broadcast is another method provided by rabit besides Allreduce. This function allows one node to broadcast its
local data to all other nodes. The following code in [broadcast.cc](../guide/broadcast.cc) broadcasts a string from
node 0 to all other nodes.
```c++
#include <rabit.h>
using namespace rabit;
const int N = 3;
int main(int argc, char *argv[]) {
rabit::Init(argc, argv);
std::string s;
if (rabit::GetRank() == 0) s = "hello world";
printf("@node[%d] before-broadcast: s=\"%s\"\n",
rabit::GetRank(), s.c_str());
// broadcast s from node 0 to all other nodes
rabit::Broadcast(&s, 0);
printf("@node[%d] after-broadcast: s=\"%s\"\n",
rabit::GetRank(), s.c_str());
rabit::Finalize();
return 0;
}
```
The following command starts the program with three worker processes.
```bash
../tracker/rabit_demo.py -n 3 broadcast.rabit
```
Besides strings, rabit also allows to broadcast constant size array and vectors.
The counterpart in python can be found in [broadcast.py](../guide/broadcast.py). Here is a snippet so that you can get a better sense of how simple is to use the python library:
```python
import rabit
rabit.init()
n = 3
rank = rabit.get_rank()
s = None
if rank == 0:
s = {'hello world':100, 2:3}
print '@node[%d] before-broadcast: s=\"%s\"' % (rank, str(s))
s = rabit.broadcast(s, 0)
print '@node[%d] after-broadcast: s=\"%s\"' % (rank, str(s))
rabit.finalize()
```
Common Use Case
---------------
Many distributed machine learning algorithms involve splitting the data into different nodes,
computing statistics locally, and finally aggregating them. Such workflow is usually done repetitively through many iterations before the algorithm converges. Allreduce naturally meets the structure of such programs,
common use cases include:
* Aggregation of gradient values, which can be used in optimization methods such as L-BFGS.
* Aggregation of other statistics, which can be used in KMeans and Gaussian Mixture Models.
* Find the best split candidate and aggregation of split statistics, used for tree based models.
Rabit is a reliable and portable library for distributed machine learning programs, that allow programs to run reliably on different platforms.
Use Rabit API
-------------
This section introduces topics about how to use rabit API.
You can always refer to [API Documentation](http://homes.cs.washington.edu/~tqchen/rabit/doc) for definition of each functions.
This section trys to gives examples of different aspectes of rabit API.
#### Structure of a Rabit Program
The following code illustrates the common structure of a rabit program. This is an abstract example,
you can also refer to [wormhole](https://github.com/dmlc/wormhole/blob/master/learn/kmeans/kmeans.cc) for an example implementation of kmeans algorithm.
```c++
#include <rabit.h>
int main(int argc, char *argv[]) {
...
rabit::Init(argc, argv);
// load the latest checked model
int version = rabit::LoadCheckPoint(&model);
// initialize the model if it is the first version
if (version == 0) model.InitModel();
// the version number marks the iteration to resume
for (int iter = version; iter < max_iter; ++iter) {
// at this point, the model object should allow us to recover the program state
...
// each iteration can contain multiple calls of allreduce/broadcast
rabit::Allreduce<rabit::op::Max>(&data[0], n);
...
// checkpoint model after one iteration finishes
rabit::CheckPoint(&model);
}
rabit::Finalize();
return 0;
}
```
Besides the common Allreduce and Broadcast functions, there are two additional functions: ```LoadCheckPoint```
and ```CheckPoint```. These two functions are used for fault-tolerance purposes.
As mentioned before, traditional machine learning programs involve several iterations. In each iteration, we start with a model, make some calls
to Allreduce or Broadcast and update the model. The calling sequence in each iteration does not need to be the same.
* When the nodes start from the beginning (i.e. iteration 0), ```LoadCheckPoint``` returns 0, so we can initialize the model.
* ```CheckPoint``` saves the model after each iteration.
- Efficiency Note: the model is only kept in local memory and no save to disk is performed when calling Checkpoint
* When a node goes down and restarts, ```LoadCheckPoint``` will recover the latest saved model, and
* When a node goes down, the rest of the nodes will block in the call of Allreduce/Broadcast and wait for
the recovery of the failed node until it catches up.
Please see the [Fault Tolerance](#fault-tolerance) section to understand the recovery procedure executed by rabit.
#### Allreduce and Lazy Preparation
Allreduce is one of the most important function provided by rabit. You can call allreduce by specifying the
reduction operator, pointer to the data and size of the buffer, as follows
```c++
Allreduce<operator>(pointer_of_data, size_of_data);
```
This is the basic use case of Allreduce function. It is common that user writes the code to prepare the data needed
into the data buffer, pass the data to Allreduce function, and get the reduced result. However, when a node restarts
from failure, we can directly recover the result from other nodes(see also [Fault Tolerance](#fault-tolerance)) and
the data preparation procedure no longer necessary. Rabit Allreduce add an optional parameter preparation function
to support such scenario. User can pass in a function that corresponds to the data preparation procedure to Allreduce
calls, and the data preparation function will only be called when necessary. We use [lazy_allreduce.cc](../guide/lazy_allreduce.cc)
as an example to demonstrate this feature. It is modified from [basic.cc](../guide/basic.cc), and you can compare the two codes.
```c++
#include <rabit.h>
using namespace rabit;
const int N = 3;
int main(int argc, char *argv[]) {
int a[N] = {0};
rabit::Init(argc, argv);
// lazy preparation function
auto prepare = [&]() {
printf("@node[%d] run prepare function\n", rabit::GetRank());
for (int i = 0; i < N; ++i) {
a[i] = rabit::GetRank() + i;
}
};
printf("@node[%d] before-allreduce: a={%d, %d, %d}\n",
rabit::GetRank(), a[0], a[1], a[2]);
// allreduce take max of each elements in all processes
Allreduce<op::Max>(&a[0], N, prepare);
printf("@node[%d] after-allreduce-sum: a={%d, %d, %d}\n",
rabit::GetRank(), a[0], a[1], a[2]);
// rum second allreduce
Allreduce<op::Sum>(&a[0], N);
printf("@node[%d] after-allreduce-max: a={%d, %d, %d}\n",
rabit::GetRank(), a[0], a[1], a[2]);
rabit::Finalize();
return 0;
}
```
Here we use features of C++11 because the lambda function makes things much shorter.
There is also C++ compatible callback interface provided in the [API](http://homes.cs.washington.edu/~tqchen/rabit/doc).
You can compile the program by typing ```make lazy_allreduce.mock```. We link against the mock library so that we can see
the effect when a process goes down. You can run the program using the following command
```bash
../tracker/rabit_demo.py -n 2 lazy_allreduce.mock mock=0,0,1,0
```
The additional arguments ```mock=0,0,1,0``` will cause node 0 to kill itself before second call of Allreduce (see also [mock test](#link-against-mock-test-rabit-library)).
You will find that the prepare function's print is only executed once and node 0 will no longer execute the preparation function when it restarts from failure.
You can also find python version of the example in [lazy_allreduce.py](../guide/lazy_allreduce.py), and run it using the followin command
```bash
../tracker/rabit_demo.py -n 2 lazy_allreduce.py mock=0,0,1,0
```
Since lazy preparation function may not be called during execution. User should be careful when using this feature. For example, a possible mistake
could be putting some memory allocation code in the lazy preparation function, and the computing memory was not allocated when lazy preparation function is not called.
The example in [lazy_allreduce.cc](../guide/lazy_allreduce.cc) provides a simple way to migrate normal prepration code([basic.cc](../guide/basic.cc)) to lazy version: wrap the preparation
code with a lambda function, and pass it to allreduce.
#### Checkpoint and LazyCheckpoint
Common machine learning algorithms usually involves iterative computation. As mentioned in the section ([Structure of a Rabit Program](#structure-of-a-rabit-program)),
user can and should use Checkpoint to ```save``` the progress so far, so that when a node fails, the latest checkpointed model can be loaded.
There are two model arguments you can pass to Checkpoint and LoadCheckpoint: ```global_model``` and ```local_model```:
* ```global_model``` refers to the model that is commonly shared across all the nodes
- For example, the centriods of clusters in kmeans is shared across all nodes
* ```local_model``` refers to the model that is specifically tied to the current node
- For example, in topic modeling, the topic assignments of subset of documents in current node is local model
Because the different nature of the two types of models, different strategy will be used for them.
```global_model``` is simply saved in local memory of each node, while ```local_model``` will replicated to some other
nodes (selected using a ring replication strategy). The checkpoint is only saved in the memory without touching the disk which makes rabit programs more efficient.
User is encouraged to use ```global_model``` only when is sufficient for better efficiency.
To enable a model class to be checked pointed, user can implement a [serialization interface](../include/rabit_serialization.h). The serialization interface already
provide serialization functions of STL vector and string. For python API, user can checkpoint any python object that can be pickled.
There is a special Checkpoint function called [LazyCheckpoint](http://homes.cs.washington.edu/~tqchen/rabit/doc/namespacerabit.html#a99f74c357afa5fba2c80cc0363e4e459),
which can be used for ```global_model``` only cases under certain condition.
When LazyCheckpoint is called, no action is taken and the rabit engine only remembers the pointer to the model.
The serialization will only happen when another node fails and the recovery starts. So user basically pays no extra cost calling LazyCheckpoint.
To use this function, the user need to ensure the model remain unchanged until the last call of Allreduce/Broadcast in the current version finishes.
So that when recovery procedure happens in these function calls, the serialized model will be the same.
For example, consider the following calling sequence
```
LazyCheckPoint, code1, Allreduce, code2, Broadcast, code3, LazyCheckPoint
```
The user must only change the model in code3. Such condition can usually be satiesfied in many scenarios, and user can use LazyCheckpoint to further
improve the efficiency of the program.
Compile Programs with Rabit
---------------------------
Rabit is a portable library, to use it, you only need to include the rabit header file.
* You will need to add the path to [../include](../include) to the header search path of the compiler
- Solution 1: add ```-I/path/to/rabit/include``` to the compiler flag in gcc or clang
- Solution 2: add the path to the environment variable CPLUS_INCLUDE_PATH
* You will need to add the path to [../lib](../lib) to the library search path of the compiler
- Solution 1: add ```-L/path/to/rabit/lib``` to the linker flag
- Solution 2: add the path to environment variable LIBRARY_PATH AND LD_LIBRARY_PATH
* Link against lib/rabit.a
- Add ```-lrabit``` to the linker flag
The procedure above allows you to compile a program with rabit. The following two sections contain additional
options you can use to link against different backends other than the normal one.
#### Link against MPI Allreduce
You can link against ```rabit_mpi.a``` instead of using MPI Allreduce, however, the resulting program is backed by MPI and
is not fault tolerant anymore.
* Simply change the linker flag from ```-lrabit``` to ```-lrabit_mpi```
* The final linking needs to be done by mpi wrapper compiler ```mpicxx```
#### Link against Mock Test Rabit Library
If you want to use a mock to test the program in order to see the behavior of the code when some nodes go down, you can link against ```rabit_mock.a``` .
* Simply change the linker flag from ```-lrabit``` to ```-lrabit_mock```
The resulting rabit mock program can take in additional arguments in the following format
```
mock=rank,version,seq,ndeath
```
The four integers specify an event that will cause the program to ```commit suicide```(exit with -2)
* rank specifies the rank of the node to kill
* version specifies the version (iteration) of the model where you want the process to die
* seq specifies the sequence number of the Allreduce/Broadcast call since last checkpoint, where the process will be killed
* ndeath specifies how many times this node died already
For example, consider the following script in the test case
```bash
../tracker/rabit_demo.py -n 10 test_model_recover 10000\
mock=0,0,1,0 mock=1,1,1,0 mock=1,1,1,1
```
* The first mock will cause node 0 to exit when calling the second Allreduce/Broadcast (seq = 1) in iteration 0
* The second mock will cause node 1 to exit when calling the second Allreduce/Broadcast (seq = 1) in iteration 1
* The third mock will cause node 1 to exit again when calling second Allreduce/Broadcast (seq = 1) in iteration 1
- Note that ndeath = 1 means this will happen only if node 1 died once, which is our case
Running Rabit Jobs
------------------
Rabit is a portable library that can run on multiple platforms.
All the rabit jobs can be submitted using [dmlc-tracker](https://github.com/dmlc/dmlc-core/tree/master/tracker)
Fault Tolerance
---------------
This section introduces how fault tolerance works in rabit.
The following figure shows how rabit deals with failures.

The scenario is as follows:
* Node 1 fails between the first and second call of Allreduce after the second checkpoint
* The other nodes wait in the call of the second Allreduce in order to help node 1 to recover.
* When node 1 restarts, it will call ```LoadCheckPoint```, and get the latest checkpoint from one of the existing nodes.
* Then node 1 can start from the latest checkpoint and continue running.
* When node 1 calls the first Allreduce again, as the other nodes already know the result, node 1 can get it from one of them.
* When node 1 reaches the second Allreduce, the other nodes find out that node 1 has catched up and they can continue the program normally.
This fault tolerance model is based on a key property of Allreduce and
Broadcast: All the nodes get the same result after calling Allreduce/Broadcast.
Because of this property, any node can record the results of history
Allreduce/Broadcast calls. When a node is recovered, it can fetch the lost
results from some alive nodes and rebuild its model.
The checkpoint is introduced so that we can discard the history results of
Allreduce/Broadcast calls before the latest checkpoint. This saves memory
consumption used for backup. The checkpoint of each node is a model defined by
users and can be split into 2 parts: a global model and a local model. The
global model is shared by all nodes and can be backed up by any nodes. The
local model of a node is replicated to some other nodes (selected using a ring
replication strategy). The checkpoint is only saved in the memory without
touching the disk which makes rabit programs more efficient. The strategy of
rabit is different from the fail-restart strategy where all the nodes restart
from the same checkpoint when any of them fail. In rabit, all the alive nodes
will block in the Allreduce call and help the recovery. To catch up, the
recovered node fetches its latest checkpoint and the results of
Allreduce/Broadcast calls after the checkpoint from some alive nodes.
This is just a conceptual introduction to rabit's fault tolerance model. The actual implementation is more sophisticated,
and can deal with more complicated cases such as multiple nodes failure and node failure during recovery phase.
| /redspark-xgboost-0.72.3.tar.gz/redspark-xgboost-0.72.3/xgboost/rabit/doc/guide.md | 0.933997 | 0.944995 | guide.md | pypi |
from enum import Enum
from fractions import Fraction
from numbers import Rational, Real
from typing import Union
class Pos:
'''Block position with explicit world'''
def __init__(self, x: int, y: int, z: int, world: str) -> None:
self.x: int = x
self.y: int = y
self.z: int = z
self.world: str = world
class Vec3i:
def __init__(self, x: int, y: int, z: int) -> None:
self.x: int = x
self.y: int = y
self.z: int = z
class Interface:
'''Interface definition'''
def __init__(self, name: str, lsb: Pos, increment: Vec3i, option: list[str]) -> None:
self.name: str = name
self.lsb: Pos = lsb
self.increment: Vec3i = increment
self.option: list[str] = option
class BlockUpdateType(Enum):
NEIGHBOR_UPDATE = 'neighborUpdate'
POST_PLACEMENT = 'postPlacement'
ANY = 'any'
class AlarmAt(Enum):
START = 'start'
STOP = 'stop'
class ApiException(Exception):
def __init__(self, code: int) -> None:
super().__init__()
self._code: int = code
@property
def code(self):
return self._code
def __eq__(self, other: object) -> bool:
return (isinstance(other, ApiException)
and self._code == other._code)
def __hash__(self) -> int:
return hash((self._code,))
class ApiExceptions:
GENERAL_ERROR = ApiException(-1)
ARGUMENT_INVALID = ApiException(-2)
NAME_ILLEGAL = ApiException(-3)
NAME_EXISTS = ApiException(-4)
NAME_NOT_FOUND = ApiException(-5)
INTERNAL_ERROR = ApiException(-6)
CHUNK_UNLOADED = ApiException(-7)
class Interval:
'''Non-ambiguous interval representation.'''
def __init__(self, interval_gametick: int, tps: Union[int, Rational] = 20):
self._gametick = interval_gametick
self._tps = tps
def __eq__(self, other: object) -> bool:
return (isinstance(other, Interval)
and self._gametick == other._gametick)
def __hash__(self) -> int:
return hash((self._gametick,))
@property
def gametick(self) -> int:
return self._gametick
@property
def redstonetick(self) -> Union[int, Rational]:
# int is not rational to mypy :(
interval = Fraction(self._gametick, 2)
if interval.denominator == 1:
return interval.numerator
return interval
@property
def second(self) -> Union[int, Rational]:
interval = Fraction(self._gametick, self._tps)
if interval.denominator == 1:
return interval.numerator
return interval
@property
def tps(self) -> Union[int, Rational]:
return self._tps
def gametick(interval: int, tps: Union[int, Rational] = 20) -> Interval:
'''Non-ambiguous interval.'''
return Interval(interval, tps)
def redstonetick(interval: Union[int, Real],
tps: Union[int, Rational] = 20) -> Interval:
'''Non-ambiguous interval. Non-integral value will be floored after
converted to gametick.'''
return Interval(int(interval * 2), tps)
def second(interval: Union[int, Real],
tps: Union[int, Rational] = 20) -> Interval:
'''Non-ambiguous interval. Non-integral value will be floored after
converted to gametick.'''
return Interval(int(interval * tps), tps) | /redstone_computer_utilities-0.3.0a0-py3-none-any.whl/redstone_computer_utilities/datatype.py | 0.920142 | 0.345657 | datatype.py | pypi |
from __future__ import annotations
from abc import abstractmethod
from enum import Enum, auto
from functools import update_wrapper
from typing import Any, Callable, Iterator, Optional, Union
import psycopg2
from redtape.connectors import RedshiftConnector
from redtape.specification import (
DatabaseObject,
DatabaseObjectType,
Group,
Operation,
Privilege,
Specification,
User,
)
class OperationDispatch:
"""Decorator to dispatch on operation attribute state.
Opreations should be registered like:
>>> od = OperationDispatch()
>>> class Manager:
... @od.register(Operation.CREATE)
... def handle_create(self):
... pass
Attributes:
registry (dict): Map of registered operations to handler methods.
"""
def __init__(self):
self.registry = {}
def __get__(self, instance, owner) -> Callable:
"""Return a handler for the operation set in instance.
Raises:
ValueError: If no handler has been registered for that operation.
"""
if instance is None:
return self
op = getattr(instance, "operation")
try:
method = self.registry[op]
except KeyError:
raise ValueError(
f"{instance.subject.__class__.__name__} does not support {op} operations."
)
return method.__get__(instance, owner)
def register(self, operation: Operation) -> Callable:
"""Register a handler for an operation."""
def decorator(method):
self.registry[operation] = method
return method
return decorator
class ManagementOperationError(Exception):
"""Represents an error when executing a ManagementOperation."""
def __init__(self, action: ManagementOperation):
self.action = action
message = f"Failed to execute: {action}"
super().__init__(message)
class ManagementOperation:
"""An operation to manage subjects and their privileges in a database.
ManagementOperations are executed by the DatabaseAdministrator, and define
the build_query method to create the query representation of the
operation.
Attributes:
operation (Operation): An operation like granting privileges,
or adding a user to a group.
subject (User, Group): The subject of an operation is the user
or group that the operation targets: the one who will be
granted permissions, created, or added to a group.
privilege (Privilege, optional): For GRANT or REVOKE operations,
the privilege to be granted or revoked to or from the subject.
"""
def __init__(
self,
operation: Operation,
subject: Union[User, Group],
privilege: Optional[Privilege] = None,
):
self.operation = operation
self.subject = subject
self.privilege = privilege
self._query: Optional[str] = None
def __str__(self):
if self.privilege is None:
return f"<{self.operation}: {self.subject}>"
else:
if self.operation is Operation.GRANT:
prep = "to"
elif self.operation is Operation.REVOKE:
prep = "from"
return f"<{self.operation}: {self.privilege} {prep} {self.subject}>"
def __repr__(self):
return (
f"ManagementOperation(operation={self.operation}, "
f"subject={self.subject}, privilege={self.privilege})"
)
@property
def query(self) -> str:
"""The query representation of this PrivilegeOperation."""
if self._query is None:
self._query = self.build_query()
return self._query
class UserManagementOperation(ManagementOperation):
"""ManagementOperations that affect Users.
Attributes:
group (Group, optional): For ADD_TO_GROUP or DROP_FROM_GROUP
operations, the group to be added to or dropped from.
"""
build_query = OperationDispatch()
def __init__(
self,
*args,
group: Optional[Group] = None,
database_object: Optional[DatabaseObject] = None,
**kwargs,
):
self.group = group
self.database_object = database_object
super().__init__(*args, **kwargs)
def __str__(self):
if self.group is None:
return super().__str__()
else:
if self.operation is Operation.ADD_TO_GROUP:
prep = "to"
elif self.operation is Operation.DROP_FROM_GROUP:
prep = "from"
return f"<{self.operation}: {self.privilege} {prep} {self.group}>"
def __repr__(self):
return (
f"UserManagementOperation(operation={self.operation}, "
f"subject={self.subject}, privilege={self.privilege}, "
f"group={self.group})"
)
@build_query.register(Operation.CREATE)
def build_create_query(self) -> str:
if self.subject.password is None:
raise TypeError(
f"Creating a user in Redshift requires a password not {type(self.subject.password)}."
)
return "CREATE USER {name}{password}{is_superuser};".format(
name=self.subject.name,
is_superuser=" CREATEUSER" if self.subject.is_superuser is True else "",
password=f" PASSWORD '{self.subject.password}'",
)
@build_query.register(Operation.DROP)
def build_drop_query(self) -> str:
return f"DROP USER {self.subject.name};"
@build_query.register(Operation.GRANT)
def build_grant_query(self) -> str:
if self.privilege is None:
raise TypeError(
f"{operation} requires a Privilege but {type(privilege)} was provided."
)
db_obj = self.privilege.database_object
support_on_all_query = (
DatabaseObjectType.TABLE,
DatabaseObjectType.VIEW,
DatabaseObjectType.FUNCTION,
DatabaseObjectType.PROCEDURE,
)
_type = (
db_obj._type
if db_obj._type is not DatabaseObjectType.VIEW
else DatabaseObjectType.TABLE
)
if any((db_obj.has_wildcard_part(t) for t in support_on_all_query)):
db, schema, _ = db_obj.parts
return (
f"GRANT {self.privilege.action.name} ON ALL "
f"{_type.name + 'S'} IN SCHEMA "
f"{db.name}.{schema.name} TO {self.subject.name};"
)
return (
f"GRANT {self.privilege.action.name} ON "
f"{_type.name} {self.privilege.database_object.name} TO {self.subject.name};"
)
@build_query.register(Operation.DROP_FROM_GROUP)
@build_query.register(Operation.ADD_TO_GROUP)
def build_group_queries(self) -> str:
if self.group is None:
raise TypeError(
f"{self.operation} requires a Group but "
f"{type(self.group)} was provided."
)
op = self.operation.canonical
return f"ALTER GROUP {self.group.name} {op} USER {self.subject.name};"
@build_query.register(Operation.ALTER_OWNER)
def build_ownership_query(self) -> str:
if self.database_object is None:
raise TypeError(
f"{self.operation} requires a DatabaseObject but "
f"{type(self.database_object)} was provided."
)
return f"ALTER {self.database_object._type} {self.database_object.name} OWNER TO {self.subject.name};"
class GroupManagementOperation(ManagementOperation):
"""ManagementOperatinos that affect Groups."""
build_query = OperationDispatch()
def __repr__(self):
return (
f"GroupManagementOperation(operation={self.operation}, "
f"subject={self.subject}, privilege={self.privilege}, "
f"group={self.group})"
)
@build_query.register(Operation.CREATE)
def build_create_query(self) -> str:
return f"CREATE GROUP {self.subject.name};"
@build_query.register(Operation.DROP)
def build_drop_query(self) -> str:
return f"DROP GROUP {self.subject.name};"
@build_query.register(Operation.GRANT)
def build_grant_query(self) -> str:
if self.privilege is None:
raise TypeError(
f"{operation} requires a Privilege but {type(privilege)} was provided."
)
db_obj = self.privilege.database_object
support_on_all_query = (
DatabaseObjectType.TABLE,
DatabaseObjectType.VIEW,
DatabaseObjectType.FUNCTION,
DatabaseObjectType.PROCEDURE,
)
_type = (
db_obj._type
if db_obj._type is not DatabaseObjectType.VIEW
else DatabaseObjectType.TABLE
)
if any((db_obj.has_wildcard_part(t) for t in support_on_all_query)):
db, schema, _ = db_obj.parts
return (
f"GRANT {self.privilege.action.name} ON ALL "
f"{_type.name + 'S'} IN SCHEMA "
f"{db.name}.{schema.name} TO {self.subject.name};"
)
return (
f"GRANT {self.privilege.action.name} ON "
f"{_type.name} {self.privilege.database_object.name} TO {self.subject.name};"
)
def no_filter(_: Any) -> bool:
"""Default filter does not filter anything."""
return True
class DatabaseAdministratorTrainer:
"""A trainer for DatabaseAdministrators.
Defines the operations that a DatabaseAdministrator will run,
as well as the order in which they will run. The Trainer supports
many filters to allow the client to control the training process.
Attributes:
desired_spec (Specification): The desired specification of users,
groups, and their privileges. The DatabaseAdministrator will
be trained to make the current_spec match the desired_spec.
current_spec (Specification): The current specification of users,
groups, and their privileges.
filter_users (Callable): A filter function to apply to users. Should
return True if a given User should be managed.
filter_groups (Callable): A filter function to apply to groups. Should
return True if a given Group should be managed.
filter_operations (Callable): A filter function to apply to operations.
Should return True if a given Operation should be executed.
filter_database_objects (Callable): A filter function to apply to
database objects. Should return True if a given DatabaseObject
should be managed.
filter_privileges (Callable): A filter function to apply to privileges.
Should return True if a given Privilege should be granted or
revoked.
"""
def __init__(
self,
desired_spec: Specification,
current_spec: Specification,
filter_users: Callable[[User], bool] = no_filter,
filter_groups: Callable[[Group], bool] = no_filter,
filter_operations: Callable[[Operation], bool] = no_filter,
filter_database_objects: Callable[[DatabaseObject], bool] = no_filter,
filter_privileges: Callable[[Privilege], bool] = no_filter,
):
self.desired = desired_spec
self.current = current_spec
self.filter_users = filter_users
self.filter_groups = filter_groups
self.filter_operations = filter_operations
self.filter_database_objects = filter_database_objects
self.filter_privileges = filter_privileges
self._management_ops: list[ManagementOperation] = []
self._desired_groups = None
self._desired_users = None
self._current_groups = None
self._current_users = None
@property
def desired_groups(self):
"""Returned filtered groups from desired specification."""
if self._desired_groups is None:
self._desired_groups = [
g for g in filter(self.filter_groups, self.desired.groups)
]
return self._desired_groups
@property
def current_groups(self):
"""Returned filtered groups from current specification."""
if self._current_groups is None:
self._current_groups = [
g for g in filter(self.filter_groups, self.current.groups)
]
return self._current_groups
@property
def desired_users(self):
"""Returned filtered users from desired specification."""
if self._desired_users is None:
self._desired_users = [
u for u in filter(self.filter_users, self.desired.users)
]
return self._desired_users
@property
def current_users(self):
"""Returned filtered users from current specification."""
if self._current_users is None:
self._current_users = [
u for u in filter(self.filter_users, self.current.users)
]
return self._current_users
def train(self) -> DatabaseAdministrator:
if self.filter_operations(Operation.CREATE) is True:
self.prepare_create_subjects()
if self.filter_operations(Operation.ADD_TO_GROUP) is True:
self.prepare_add_to_group()
if self.filter_operations(Operation.ALTER_OWNER) is True:
self.prepare_alter_ownership()
if self.filter_operations(Operation.GRANT) is True:
self.prepare_grant_group_privileges()
self.prepare_grant_user_privileges()
if self.filter_operations(Operation.REVOKE) is True:
self.prepare_revoke_group_privileges()
self.prepare_revoke_user_privileges()
if self.filter_operations(Operation.DROP_FROM_GROUP) is True:
self.prepare_drop_from_group()
if self.filter_operations(Operation.DROP) is True:
self.prepare_drop_subjects()
return DatabaseAdministrator(self._management_ops)
def prepare_alter_ownership(self):
for user in self.desired_users:
if user.owns is None or len(user.owns) == 0:
continue
for db_obj in user.owns:
self._management_ops.append(
UserManagementOperation(
subject=user,
operation=Operation.ALTER_OWNER,
database_object=db_obj,
)
)
def prepare_create_subjects(self):
self.prepare_subjects(
self.desired_groups, self.current.groups, Operation.CREATE
)
self.prepare_subjects(self.desired_users, self.current.users, Operation.CREATE)
def prepare_drop_subjects(self):
self.prepare_subjects(self.desired.groups, self.current_groups, Operation.DROP)
self.prepare_subjects(self.desired.users, self.current_users, Operation.DROP)
def prepare_subjects(
self,
desired_subjects: Union[list[User], list[Group]],
current_subjects: Union[list[User], list[Group]],
operation: Operation,
):
desired_names = set(d.name for d in desired_subjects)
current_names = set(d.name for d in current_subjects)
if operation is Operation.CREATE:
subjects = [
subject
for subject in desired_subjects
if subject.name in desired_names - current_names
]
elif operation is Operation.DROP:
subjects = [
subject
for subject in current_subjects
if subject.name in current_names - desired_names
]
else:
raise TypeError(f"Operation can only be CREATE or DROP not {operation}")
for subject in subjects:
if isinstance(subject, User):
self._management_ops.append(
UserManagementOperation(subject=subject, operation=operation)
)
else:
self._management_ops.append(
GroupManagementOperation(subject=subject, operation=operation)
)
def prepare_add_to_group(self):
"""Prepares ADD_TO_GROUP operations for all users.
If a user exists in the current specification, then we should add it to groups
that are in the desired spec, but they are not part of. If it doesn't exist,
then we need to add them to all groups.
"""
self.prepare_group_membership(
self.desired_users,
self.current.users,
self.current.groups + self.desired.groups,
Operation.ADD_TO_GROUP,
)
def prepare_drop_from_group(self):
"""Prepares DROP_FROM_GROUP operations for all users.
If a user exists in the desired specification, then we should remove it from groups
that are not in the desired spec. If it doesn't exist, then we need to drop the user
from all groups.
"""
self.prepare_group_membership(
self.current_users,
self.desired.users,
self.current.groups + self.desired.groups,
Operation.DROP_FROM_GROUP,
)
def prepare_group_membership(
self,
users: Iterable[User],
users_to_compare: Iterable[User],
groups: Iterable[Group],
operation: Operation,
):
"""Prepare group membership operations for all given users."""
users_map = {user.name: user for user in users_to_compare}
group_map = {group.name: group for group in groups}
for user in users:
if user.member_of is None or len(user.member_of) == 0:
continue
to_operate = user.member_of
try:
to_operate = to_operate - users_map[user.name].member_of
except (TypeError, KeyError):
# No information about current group membership exists.
# KeyError raised if user doesn't currently exists.
# TypeError raised if user is not a member of any groups.
pass
for group_name in to_operate:
group = group_map[group_name]
self._management_ops.append(
UserManagementOperation(
subject=user, operation=operation, group=group
)
)
def prepare_grant_group_privileges(self):
for group in self.desired_groups:
if group.privileges is None:
continue
privileges = filter(self.filter_privileges, group.privileges)
try:
idx = self.current.groups.index(group)
except ValueError:
# Group not found, but should be created
# before privileges are granted. So we assume
# everything needs to be granted.
current_privileges = []
else:
current_privileges = self.current.groups[idx].privileges
self.prepare_subject_privileges(
group, privileges, current_privileges, Operation.GRANT
)
def prepare_grant_user_privileges(self):
for user in self.desired_users:
if user.privileges is None:
continue
privileges = filter(self.filter_privileges, user.privileges)
try:
idx = self.current.users.index(user)
except ValueError:
# Group not found, but should be created
# before privileges are granted. So we assume
# everything needs to be granted.
current_privileges = []
else:
current_privileges = self.current.users[idx].privileges
self.prepare_subject_privileges(
user, privileges, current_privileges, Operation.GRANT
)
def prepare_revoke_group_privileges(self):
for group in self.current_groups:
if group.privileges is None:
continue
privileges = filter(self.filter_privileges, group.privileges)
try:
idx = self.desired.groups.index(group)
except ValueError:
# Group not found, which means it will be deleted
# assuming no filters. To ensure deletion can happen
# we need to revoke all privileges.
desired_privileges = []
else:
desired_privileges = self.desired.groups[idx].privileges
self.prepare_subject_privileges(
group, desired_privileges, privileges, Operation.GRANT
)
def prepare_revoke_user_privileges(self):
for user in self.current_users:
if user.privileges is None:
continue
privileges = filter(self.filter_privileges, user.privileges)
try:
idx = self.desired.users.index(user)
except ValueError:
# User not found, which means it will be deleted
# assuming no filters. To ensure deletion can happen
# we need to revoke all privileges.
desired_privileges = []
else:
desired_privileges = self.desired.users[idx].privileges
self.prepare_subject_privileges(
user, desired_privileges, privileges, Operation.REVOKE
)
def prepare_subject_privileges(
self,
subject: Union[User, Group],
desired_privileges: list[Privilege],
current_privileges: list[Privilege],
operation: Operation,
):
if operation not in (Operation.GRANT, Operation.REVOKE):
raise TypeError(
f"Privileges can only be granted or revoked not {operation}"
)
if isinstance(subject, User):
action_cls = UserManagementOperation
else:
action_cls = GroupManagementOperation
for privilege in desired_privileges:
if privilege in current_privileges:
continue
self._management_ops.append(
action_cls(subject=subject, operation=operation, privilege=privilege)
)
class OnError(Enum):
"""Behavior when encountering an error while running Admin actions."""
CONTINUE = "CONTINUE"
ABORT = "ABORT"
def _do_nothing(*args, **kwargs):
"""Does nothing."""
return
class DatabaseAdministrator:
"""An administrator in charge of managing users and groups.
Attributes:
ops (list[ManagemenOperation): The operations this DBA will
execute when calling the manage method.
"""
def __init__(self, ops: list[ManagementOperation]):
self.ops = ops
def queries(self) -> Iterator[tuple[str, ManagementOperation]]:
"""A generator over queries and their actions."""
for op in self.ops:
yield op.query, op
def manage(
self,
connector: RedshiftConnector,
before_callback: Callable[[str, ManagementOperation], Any] = _do_nothing,
progress_callback: Callable[[str, ManagementOperation, Any], Any] = _do_nothing,
success_callback: Callable[[str, ManagementOperation, Any], Any] = _do_nothing,
on_error_callback: Callable[
[str, ManagementOperation, psycopg2.Error, Any], Any
] = _do_nothing,
on_error: OnError = OnError.CONTINUE,
) -> tuple[bool, Optional[list[ManagementOperationError]]]:
"""Run the given actions and execute callbacks.
Args:
connector (RedshiftConnector): A database connector. Currently
only RedshiftConnector is supported.
before_callback (Callable): A function called before
executing an action. It will receive two positional arguments:
the query and the action that originated it.
progress_callback (Callable): A function called after
an action has ran regardless of success status. It will receive
three positional arguments: the query, the action that
originated it, and whatever before_callback returns.
success_callback (Callable): A function called after
an action has ran if the execution succeeded. It will receive
two positional arguments: the query and the action that
originated it.
on_error_callback (Callable): A function called after
an action has ran if the execution failed. It will receive
three positional arguments: the query, the action that
originated it and the exception raised.
on_error (OnError): Control behavior if an action fails:
OnError.ABORT to immediatly finish, OnError.Continue to
continue with remaining actions.
"""
errors = []
success = True
for idx, tup in enumerate(self.queries()):
query, action = tup
before_result = before_callback(query, action)
try:
with connector.connect() as conn:
_ = conn.run_query(query)
except psycopg2.Error as e:
success = False
on_error_callback(query, action, e, before_result)
print(on_error == OnError.ABORT)
if on_error == OnError.ABORT:
raise ManagementOperationError(action) from e
else:
exc = ManagementOperationError(action)
exc.__cause__ = e
errors.append(exc)
else:
success_callback(query, action, before_result)
finally:
progress_callback(query, action, idx)
return success, errors | /redtape_py-0.3.1-py3-none-any.whl/redtape/admin.py | 0.91689 | 0.163079 | admin.py | pypi |
from __future__ import annotations
import json
from enum import Enum
from functools import singledispatch, wraps
from itertools import groupby
from typing import Any, Callable, TypeVar
import cattrs
import yaml
from attrs import asdict
from .models import (
Action,
DatabaseObject,
DatabaseObjectType,
Group,
Operation,
Ownerships,
Password,
PasswordType,
Privilege,
Privileges,
Specification,
User,
ValidationFailure,
)
def value_serializer(inst, attr, value):
"""attrs.asdict value serializer.
Inverts argument positions before calling serializer since singledispatch
looks at first argument type, and attrs.asdict calls value_serializer
with value in the third position.
Arguments:
value:
attr:
inst:
"""
return serializer(value, attr, inst)
@singledispatch
def serializer(value, attr, inst):
"""Dispatch to serialization functions.
The arguments come from attrs.asdict.
Arguments:
value:
attr:
inst:
"""
return value
@serializer.register
def _(value: Privileges, attr, inst) -> "dict[str, Any]":
"""Serialize privileges by nesting them."""
nested_privileges: dict[str, Any] = {}
def action_groupby_key(p: Privilege):
return p.action.name.lower()
def object_type_groupby_key(p: Privilege):
return p.database_object._type.name.lower()
for object_key, object_group in groupby(value, object_type_groupby_key):
privileges = nested_privileges.setdefault(object_key, {})
for action_key, action_group in groupby(object_group, action_groupby_key):
privileges[action_key] = [p.database_object.name for p in action_group]
return nested_privileges
@serializer.register
def _(value: Ownerships, attr, inst) -> "dict[str, Any]":
"""Serialize Ownerships by nesting them."""
nested_ownerships: dict[str, Any] = {}
def object_type_groupby_key(db_obj: DatabaseObject):
return db_obj._type.name.lower()
for object_key, object_group in groupby(value, object_type_groupby_key):
nested_ownerships[object_key] = [obj.name.lower() for obj in object_group]
return nested_ownerships
@serializer.register
def _(value: Password, attr, inst) -> "dict[str, str]":
"""Serialize Password by renaming attributes."""
d = {
"type": value._type.value,
}
if value.value is not None:
d["value"] = value.value
if value.salt is not None:
d["salt"] = value.salt
return d
@serializer.register
def _(value: Enum, attr, inst) -> str:
"""Serialize Enums by returning their names in lowercase."""
return value.name.lower()
T = TypeVar("T")
def add_method(cls_list: list[T], mod=None):
"""Decorate a function to attach it to many classes."""
def decorator(func: Callable):
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
for cls in cls_list:
if mod is None:
setattr(cls, func.__name__, wrapper)
else:
setattr(cls, func.__name__, mod(wrapper))
return func
return decorator
def filter_none(_, value) -> bool:
"""Filter that checks for None."""
return value is not None
@add_method([Specification, User, Group])
def to_dict(self: T) -> dict[str, Any]:
"""Serialize self to dictionary using attrs.asdict"""
return asdict(self, filter=filter_none, value_serializer=value_serializer)
@add_method([Specification, User, Group])
def to_yaml(self: T) -> str:
"""Dump to YAML string after serializing to dictionary."""
return yaml.safe_dump(self.to_dict())
@add_method([Specification, User, Group])
def to_json(self: T) -> str:
"""Dump to JSON string after serializing to dictionary."""
return json.dumps(self.to_dict())
_converter = cattrs.GenConverter(prefer_attrib_converters=True)
_converter.register_structure_hook(
DatabaseObject,
cattrs.gen.make_dict_unstructure_fn(
DatabaseObject,
_converter,
_type=cattrs.gen.override(rename="type"),
),
)
def _deserialize_privileges(d: dict[str, Any], *args, **kwargs) -> Privileges:
flat_privileges = Privileges()
for db_obj_type, v in d.items():
for action, db_objs in v.items():
action = Action[action.upper()]
for db_obj in db_objs:
database_object = DatabaseObject(
type=DatabaseObjectType(db_obj_type.upper()),
name=db_obj,
)
flat_privileges.add(
Privilege(database_object=database_object, action=action)
)
return flat_privileges
_converter.register_structure_hook(Privileges, _deserialize_privileges)
def _deserialize_ownerships(d: dict[str, Any], *args, **kwargs) -> Ownerships:
flat_ownerships = Ownerships()
for db_obj_type, db_objs in d.items():
for db_obj in db_objs:
database_object = DatabaseObject(
type=DatabaseObjectType(db_obj_type.upper()),
name=db_obj,
)
flat_ownerships.add(database_object)
return flat_ownerships
_converter.register_structure_hook(Ownerships, _deserialize_ownerships)
_converter.register_structure_hook(
Password,
cattrs.gen.make_dict_structure_fn(
Password,
_converter,
_type=cattrs.gen.override(rename="type"),
),
)
_converter.register_unstructure_hook(
Group,
cattrs.gen.make_dict_unstructure_fn(
Group,
_converter,
_cattrs_omit_if_default=True,
),
)
_converter.register_unstructure_hook(
User,
cattrs.gen.make_dict_unstructure_fn(
User,
_converter,
_cattrs_omit_if_default=True,
),
)
@add_method([Specification, User, Group], mod=classmethod)
def from_dict(cls: T, d: dict[str, Any]) -> T:
"""Initialize a class from a Dictionary."""
return _converter.structure(d, cls)
@add_method([Specification, User, Group], mod=classmethod)
def from_yaml(cls: T, s: str) -> T:
"""Initialize a class by loading a YAML string."""
return cls.from_dict(yaml.safe_load(s))
@add_method([Specification, User, Group], mod=classmethod)
def from_json(cls: T, s: str):
"""Initialize a class by loading a JSON string."""
return cls.from_dict(json.loads(s)) | /redtape_py-0.3.1-py3-none-any.whl/redtape/specification/__init__.py | 0.898812 | 0.153867 | __init__.py | pypi |
from plone import api
from plone.app.event.base import default_timezone
from plone.app.textfield import RichText
from plone.app.z3cform.widget import AjaxSelectFieldWidget
from plone.app.z3cform.widget import DateFieldWidget
from plone.app.z3cform.widget import DatetimeFieldWidget
from plone.autoform import directives
from plone.autoform import directives as form
from plone.supermodel import model
from redturtle.bandi import bandiMessageFactory as _
from z3c.form.browser.checkbox import CheckBoxFieldWidget
from z3c.form.browser.radio import RadioFieldWidget
from zope import schema
from zope.interface import provider
from zope.schema.interfaces import IContextAwareDefaultFactory
@provider(IContextAwareDefaultFactory)
def getDefaultEnte(context):
default_ente = api.portal.get_registry_record(
"redturtle.bandi.interfaces.settings.IBandoSettings.default_ente"
)
if default_ente:
return default_ente
else:
return None
class IBandoSchema(model.Schema):
"""A Dexterity schema for Annoucements"""
# fields
riferimenti_bando = RichText(
title=_("riferimenti_bando_label", default=u"References"),
description=_("riferimenti_bando_help", default=u""),
required=False,
)
apertura_bando = schema.Datetime(
title=_("apertura_bando_label", default=u"Opening date"),
description=_(
"apertura_bando_help",
default=u"Date and time of the opening of the announcement. Use "
u"this field if you want to set a specific opening date. "
u"If not set, the announcement will be open immediately.",
),
required=False,
)
chiusura_procedimento_bando = schema.Date(
title=_(
"chiusura_procedimento_bando_label",
default=u"Closing date procedure",
),
description=_("chiusura_procedimento_bando_help", default=u""),
required=False,
)
scadenza_bando = schema.Datetime(
title=_("scadenza_bando_label", default=u"Expiration date and time"),
description=_(
"scadenza_bando_help",
default=u"Deadline to participate in the announcement",
),
required=False,
)
ente_bando = schema.Tuple(
title=_(u"ente_label", default=u"Authority"),
description=_(u"ente_help", default=u"Select some authorities."),
required=False,
defaultFactory=getDefaultEnte,
value_type=schema.TextLine(),
missing_value=None,
)
destinatari = schema.List(
title=_("destinatari_label", default=u"Recipients"),
description=_("destinatari_help", default=""),
required=True,
value_type=schema.Choice(vocabulary="redturtle.bandi.destinatari.vocabulary"),
)
tipologia_bando = schema.Choice(
title=_("tipologia_bando_label", default=u"Announcement type"),
description=_("tipologia_bando_help", default=""),
vocabulary="redturtle.bandi.tipologia.vocabulary",
required=True,
)
# order
form.order_after(riferimenti_bando="IRichText.text")
form.order_after(chiusura_procedimento_bando="IRichText.text")
form.order_after(scadenza_bando="IRichText.text")
form.order_after(ente_bando="IRichText.text")
form.order_after(destinatari="IRichText.text")
form.order_after(tipologia_bando="IRichText.text")
# widgets
directives.widget(
"ente_bando",
AjaxSelectFieldWidget,
vocabulary="redturtle.bandi.enti.vocabulary",
)
directives.widget(
"apertura_bando",
DatetimeFieldWidget,
default_timezone=default_timezone,
)
directives.widget(
"chiusura_procedimento_bando",
DateFieldWidget,
default_timezone=default_timezone,
)
directives.widget(
"scadenza_bando",
DatetimeFieldWidget,
default_timezone=default_timezone,
)
directives.widget(destinatari=CheckBoxFieldWidget)
directives.widget(tipologia_bando=RadioFieldWidget) | /redturtle.bandi-1.4.3-py3-none-any.whl/redturtle/bandi/interfaces/bandoSchema.py | 0.568895 | 0.170681 | bandoSchema.py | pypi |
from redturtle.chefcookie import _
from redturtle.chefcookie.defaults import GENERAL_LABELS
from redturtle.chefcookie.defaults import HEADER_LABELS
from redturtle.chefcookie.defaults import PROFILING_COOKIES_LABELS
from redturtle.chefcookie.defaults import PROFILING_COOKIES_SPECIFIC_LABELS
from redturtle.chefcookie.defaults import TECHNICAL_COOKIES_LABELS
from redturtle.chefcookie.defaults import TECHNICAL_COOKIES_SPECIFIC_LABELS
from zope import schema
from zope.interface import Invalid
from zope.publisher.interfaces.browser import IDefaultBrowserLayer
try:
from plone.supermodel.model import Schema
except ImportError:
from zope.interface import Interface
Schema = Interface
import json
def validate_cfg_json(value):
"""check that we have at least valid json and its a dict"""
try:
jv = json.loads(value)
except ValueError as e:
message = getattr(e, "message", str(e))
raise Invalid(
_(
"invalid_json",
"JSON is not valid, parser complained: ${message}",
mapping={
"message": "{msg} {pos}".format(
msg=message, pos=getattr(e, "pos", "")
),
},
)
)
if not isinstance(jv, dict):
raise Invalid(_("invalid_cfg_no_dict", "JSON root must be a mapping (dict)"))
return True
class IRedturtleChefcookieLayer(IDefaultBrowserLayer):
"""Marker interface that defines a browser layer."""
class IChefCookieSettingsConfigs(Schema):
enable_cc = schema.Bool(
title=_(
"chefcookie_enable_label",
default=u"Enable chefcookie",
),
description=_(
"chefcookie_only_enable_chefcookie_help",
default=u"Select to use chefcookie",
),
required=False,
)
show_settings_icon = schema.Bool(
title=_(
"show_settings_icon_label",
default=u"Show settings icon",
),
description=_(
"show_settings_icon_help",
default=u"If selected, an icon that opens cookie settings will be "
"displayed on the right side of the page. You should always allow "
"users to change their settings, so if you disable this option, be "
"sure to insert a link somewhere in the page (e.g. in the footer). "
'It should be an "a" tag with data-cc-open-settings attribute.',
),
required=False,
default=True,
)
cookie_name = schema.TextLine(
title=_("chefcookie_cookie_prefix_label", default=u"Cookie prefix"),
description=_(
"chefcookie_cookie_prefix_help",
default=u"Set the cookie prefix",
),
default=u"cc_",
required=True,
)
registry_endpoint = schema.TextLine(
title=_("chefcookie_registry_endpoint_label", default=u"Registry endpoint"),
description=_(
"chefcookie_registry_endpoint_help",
default=u"If set and chefcookie will send usage data to endpoint",
),
required=False,
)
domain_whitelist = schema.List(
title=_("chefcookie_domain_whitelist_labels", default=u"Domain whitelist"),
description=_(
"chefcookie_domain_whitelist_labels_help",
default=u"Insert a list of domains for which the banner should be used. Useful when we can visit a site with multiple domains",
),
missing_value=[],
default=[],
value_type=schema.TextLine(),
required=False,
)
analytics_id = schema.TextLine(
title=_("chefcookie_analytics_id_label", default=u"Analytics Id"),
description=_(
"chefcookie_analytics_id_help",
default=u"If set and the user has accepted the Analytics cookies, "
"this id will be used to track the user. To enable this checkbox in "
'the banner, you also need to add the "analytics" labels into '
'"Technical cookies specific labels" field.',
),
required=False,
)
matomo_id = schema.TextLine(
title=_("chefcookie_matomo_id_label", default=u"Matomo Id"),
description=_(
"chefcookie_matomo_id_help",
default=u"If set and the user has accepted the Matomo cookies, this id will be used to track the user.",
),
required=False,
)
facebook_id = schema.TextLine(
title=_("chefcookie_facebook_id_label", default=u"Facebook Id"),
description=_(
"chefcookie_facebook_id_help",
default=u"If set and the user has accepted the Facebook cookies, this id will be used to track the user.",
),
required=False,
)
hotjar_id = schema.TextLine(
title=_("chefcookie_hotjar_id_label", default=u"HotJar Id"),
description=_(
"chefcookie_hotjar_id_help",
default=u"If set and the user has accepted the HotJar cookies, this id will be used to track the user.",
),
required=False,
)
linkedin_id = schema.TextLine(
title=_("chefcookie_linkedin_id_label", default=u"LinkedIn Id"),
description=_(
"chefcookie_linkedin_id_help",
default=u"If set and the user has accepted the LinkedIn cookies, this id will be used to track the user.",
),
required=False,
)
only_technical_cookies = schema.Bool(
title=_(
"chefcookie_only_technical_cookies_label",
default=u"Only technical cookies",
),
description=_(
"chefcookie_only_technical_cookies_help",
default=u"Select if your website only provide technical cookies.",
),
required=False,
)
policy_url = schema.SourceText(
title=_("chefcookie_policy_url_label", default=u"Policy URL"),
description=_(
"chefcookie_policy_url_help",
default="Insert the cookie policy page URL. One per each language in the website. "
'This can be used in "Banner header" field as "{policy_url}" to '
"dynamically replace it with the given URL.",
),
default=u"{}",
constraint=validate_cfg_json,
required=False,
)
iframes_mapping = schema.List(
title=_("chefcookie_iframes_mapping_labels", default=u"Iframes mapping"),
description=_(
"chefcookie_iframes_mapping_labels_help",
default=u"Insert a list of mappings between a provider and a list of possible domains for their iframes. If the user blocks their cookies, the iframes will be blocked as well.",
),
default=[],
missing_value=[],
value_type=schema.TextLine(),
required=False,
)
links_mapping = schema.List(
title=_("chefcookie_links_mapping_labels", default=u"Links mapping "),
description=_(
"chefcookie_links_mapping_labels_help",
default=u"Insert a list of mappings between a provider and a list of possible links xpath selector for their anchor. If the user blocks their cookies, the provider will be blocked as well.",
),
default=[],
missing_value=[],
value_type=schema.TextLine(),
required=False,
)
class IChefCookieSettingsLabels(Schema):
header_labels = schema.SourceText(
title=_("chefcookie_header_label", default=u"Banner header"),
description=_(
"chefcookie_header_help",
default=u"Insert the text of the banner header. Once per each site "
"languages. If you want to insert a link to the policy page, use "
"the placeholder {policy_url}.",
),
default=HEADER_LABELS,
constraint=validate_cfg_json,
required=True,
)
general_labels = schema.SourceText(
title=_("chefcookie_general_labels", default=u"General labels"),
description=_(
"chefcookie_general_labels_help",
default=u"",
),
default=GENERAL_LABELS,
constraint=validate_cfg_json,
required=True,
)
technical_cookies_labels = schema.SourceText(
title=_(
"chefcookie_technical_cookies_labels",
default=u"Technical cookies labels",
),
description=_(
"chefcookie_technical_cookies_labels_help",
default=u"",
),
default=TECHNICAL_COOKIES_LABELS,
constraint=validate_cfg_json,
required=False,
)
technical_cookies_specific_labels = schema.SourceText(
title=_(
"chefcookie_technical_cookies_specific_labels",
default=u"Technical cookies specific labels",
),
description=_(
"chefcookie_technical_cookies_specific_labels_help",
default=u"Labels for specific technical cookies.",
),
default=TECHNICAL_COOKIES_SPECIFIC_LABELS,
constraint=validate_cfg_json,
required=True,
)
profiling_cookies_labels = schema.SourceText(
title=_(
"chefcookie_profiling_cookies_labels",
default=u"Profiling cookies labels",
),
description=_(
"chefcookie_profiling_cookies_labels_help",
default=u"",
),
default=PROFILING_COOKIES_LABELS,
constraint=validate_cfg_json,
required=True,
)
profiling_cookies_specific_labels = schema.SourceText(
title=_(
"chefcookie_profiling_cookies_specific_labels",
default=u"Profiling cookies specific labels",
),
description=_(
"chefcookie_profiling_cookies_specific_labels_help",
default=u"Labels for specific providers. If the relative id/flag is set, this will enable the flag in the banner.",
),
default=PROFILING_COOKIES_SPECIFIC_LABELS,
constraint=validate_cfg_json,
required=True,
)
class IChefCookieSettings(IChefCookieSettingsConfigs, IChefCookieSettingsLabels):
"""""" | /redturtle.chefcookie-2.1.0.tar.gz/redturtle.chefcookie-2.1.0/src/redturtle/chefcookie/interfaces.py | 0.531453 | 0.192501 | interfaces.py | pypi |
from sqlsoup import SQLSoup
from sqlalchemy import or_
import os
class SQLite3Vocab(object):
singleton = False
def __init__(self):
if not self.singleton:
self.__class__.singleton = True
else:
raise SystemError, 'SQLite3Vocab is singleton.'
path = os.path.dirname( os.path.realpath( __file__ ) )
self.engine = SQLSoup('sqlite:////%s/sqlite3.db' % path)
self.comune_id = self.engine.comuni.codice_istat
self.provincia_id = self.engine.province.sigla
self.regione_id = self.engine.regioni.codice_istat
def _filter_ente(self, tabel, sql_filters=None):
if sql_filters:
for sql_filter in sql_filters:
tabel = tabel.filter(sql_filter)
return tabel
def comune(self, name):
return self.engine.comuni.filter_by(comune=name).one()
def provincia(self, name):
return self.engine.province.filter_by(provincia=name).one()
def regione(self, name):
return self.engine.regioni.filter_by(regione=name).one()
def comuni(self, sql_filters=None):
query = self._filter_ente(self.engine.comuni, sql_filters)
return query.order_by(self.engine.comuni.comune).all()
def province(self, sql_filters=None):
query = self._filter_ente(self.engine.province, sql_filters)
return query.order_by(self.engine.province.provincia).all()
def regioni(self, sql_filters=None):
query = self._filter_ente(self.engine.regioni, sql_filters)
return query.order_by(self.engine.regioni.regione).all()
def comuniByLetter(self, query):
where = (or_(self.engine.comuni.comune.like(query+'%')),)
return self.comuni(sql_filters=where)
def provinceByLetter(self, query):
where = (or_(self.engine.province.provincia.like(query+'%')),)
return self.province(sql_filters=where)
def regioniByLetter(self, query):
where = (or_(self.engine.regioni.regione.like(query+'%')),)
return self.regioni(sql_filters=where)
def comuni4provincia(self, id):
where = or_(self.engine.comuni.provincia==id)
return self.engine.comuni.filter(where).order_by(self.engine.comuni.comune).all()
def province4regione(self, id):
where = or_(self.engine.province.regione==id)
return self.engine.province.filter(where).order_by(self.engine.province.provincia).all()
#Backward compatibility
allComuni = comuni
allProvince = province
allRegioni = regioni
def mapDisplayList(vocab, result=None):
if not result:
result = []
for x in vocab:
if x._table.name == 'regioni':
row = (x.codice_istat, x.regione)
elif x._table.name == 'province':
row = (x.sigla, x.provincia)
elif x._table.name == 'comuni':
row = (x.codice_istat, x.comune)
result.append(row)
return result | /redturtle.entiterritoriali-0.4.0.tar.gz/redturtle.entiterritoriali-0.4.0/redturtle/entiterritoriali/vocabulary/__init__.py | 0.47025 | 0.195633 | __init__.py | pypi |
from App.Common import package_home
from DateTime import DateTime
from plone import api
from plone.app.textfield.value import RichTextValue
from plone.namedfile.file import NamedBlobImage
from Products.CMFPlone.interfaces import INonInstallable
from zope.interface import implementer
try:
from Products.ATContentTypes.interfaces.interfaces import IATContentType
HAS_ARCHETYPES = True
except ImportError:
HAS_ARCHETYPES = False
import logging
import os
logger = logging.getLogger(__name__)
SIMPLE_TEXT = """
<p>Nunc <strong>nulla</strong>. Nullam vel sem. Ut tincidunt tincidunt erat.</p>
<p>Praesent turpis. Etiam ut purus mattis mauris sodales aliquam.</p>
<ul>
<li>one</li>
<li>two</li>
<li>three</li>
</ul>
"""
TEXT_WITH_LINK = """
<p>
This is an
<a class="internal-link" href="resolveuid/{uid}" title="">
internal link
</a>
</p>
<p>
This is
<a class="external-link" href="https://www.plone.org" title="">external</a>
</p>
"""
TEXT_WITH_EMPTY_TAGS = """
<p>Foo</p>
<p> </p>
<p><strong><br /></strong></p>
<p><strong>Bar</strong></p>
<p><i> </i></p>
<p><strong> </strong></p>
<p></p>
<p><i><br /></i></p>
<i>
"""
@implementer(INonInstallable)
class HiddenProfiles(object):
def getNonInstallableProfiles(self):
"""Hide uninstall profile from site-creation and quickinstaller."""
return ["redturtle.exporter.base:default"]
def post_install(context):
"""Create some contents"""
portal = api.portal.get()
# first of all create some contents
folder1 = api.content.create(
type="Folder", title="Folder foo", container=portal
)
folder2 = api.content.create(
type="Folder", title="Folder bar", container=portal
)
folder3 = api.content.create(
type="Folder", title="Folder baz", container=folder2
)
doc = api.content.create(
type="Document",
title="First Document",
description="Pellentesque habitant morbi tristique senectus",
container=portal,
)
doc2 = api.content.create(
type="Document",
title="Second Document",
description="it's inside a folder",
container=folder1,
)
doc3 = api.content.create(
type="Document",
title="Third document",
description="this is the defaulf view of a folder",
container=folder3,
)
folder3.setDefaultPage(doc3.getId())
doc4 = api.content.create(
type="Document",
title="Document with empty tags",
description="",
container=portal,
effectiveDate=DateTime(),
)
news = api.content.create(
type="News Item",
title="A News",
description="In hac habitasse platea dictumst",
container=portal,
)
api.content.create(
type="News Item",
title="Second News",
description="it's inside a folder",
container=folder2,
)
api.content.create(type="Event", title="Event foo", container=portal)
api.content.create(
type="Collection",
title="Collection item",
container=portal,
query=[
{
u"i": u"portal_type",
u"o": u"plone.app.querystring.operation.selection.is",
u"v": [u"Document", u"News Item"],
}
],
)
image = api.content.create(
type="Image", title="example image", container=folder3
)
file_obj = api.content.create(
type="File", title="example file", container=folder3
)
# Now let's add some text and files
set_text(item=doc, text=SIMPLE_TEXT)
set_text(item=doc3, text=SIMPLE_TEXT)
set_text(item=news, text=SIMPLE_TEXT)
set_text(item=doc2, text=TEXT_WITH_LINK, ref=doc.UID())
set_text(item=doc4, text=TEXT_WITH_EMPTY_TAGS)
set_image(item=image)
set_file(item=file_obj)
# and publish some contents
api.content.transition(obj=folder1, transition="publish")
api.content.transition(obj=doc, transition="publish")
api.content.transition(obj=doc4, transition="publish")
doc.setEffectiveDate(DateTime())
doc4.setEffectiveDate(DateTime())
folder1.setEffectiveDate(DateTime())
# finally create some users and groups
api.user.create(
username="john",
email="jdoe@plone.org",
properties=dict(
fullname="John Doe",
description="foo",
home_page="http://www.plone.org",
),
)
api.user.create(username="bob", email="bob@plone.org")
api.user.grant_roles(username="bob", roles=["Reviewer"])
api.group.create(groupname="staff")
group_tool = api.portal.get_tool(name="portal_groups")
group_tool.editGroup("staff", roles=["Editor", "Reader"])
api.group.add_user(groupname="Administrators", username="john")
api.group.add_user(groupname="staff", username="bob")
def set_text(item, text, ref=""):
if ref:
text = text.format(uid=ref)
if HAS_ARCHETYPES:
if IATContentType.providedBy(item):
item.setText(text, mimetype="text/html")
else:
item.text = RichTextValue(text, "text/html", "text/html")
def set_image(item):
path = os.path.join(package_home(globals()), "example_files", "plone.png")
with open(path, "rb") as fd:
image_data = fd.read()
item.image = NamedBlobImage(data=image_data, filename=u"plone.png")
def set_file(item):
path = os.path.join(
package_home(globals()), "example_files", "example.pdf"
)
with open(path, "rb") as fd:
file_data = fd.read()
item.file = NamedBlobImage(data=file_data, filename=u"example.pdf") | /redturtle.exporter.base-1.3.1.tar.gz/redturtle.exporter.base-1.3.1/src/redturtle/exporter/base/setuphandlers.py | 0.52756 | 0.201185 | setuphandlers.py | pypi |
from .migration.topics import TopicMigrator
from DateTime import DateTime
from plone import api
from plone.memoize.view import memoize
from Products.CMFCore.interfaces import IFolderish
from Products.Five.browser import BrowserView
from redturtle.exporter.base.browser.wrapper import Wrapper
from redturtle.exporter.base.interfaces import ICustomDataExporter
from zope.component import subscribers
import base64
import json
import logging
import six
logger = logging.getLogger(__name__)
def _clean_dict(dct, error):
new_dict = dct.copy()
message = str(error)
for key, value in dct.items():
if message.startswith(repr(value)):
del new_dict[key]
return key, new_dict
raise ValueError("Could not clean up object")
class GetItem(BrowserView):
def __call__(self):
data = self.get_data()
return self.get_json_object(data)
def get_data(self):
context_dict = Wrapper(self.context)
# custom exporters
handlers = [
x
for x in subscribers(
(self.context, self.request), ICustomDataExporter
)
]
for handler in sorted(handlers, key=lambda h: h.order):
context_dict.update(handler())
if context_dict.get("_defaultpage"):
context_dict.update(
{"default_page": context_dict.get("_defaultpage")}
)
return context_dict
def get_json_object(self, context_dict):
passed = False
while not passed:
try:
JSON = json.dumps(context_dict)
passed = True
except Exception as error:
if "serializable" in str(error):
key, context_dict = _clean_dict(context_dict, error)
logger.error(
"Not serializable member {0} of {1} ignored".format(
key, repr(self)
)
)
passed = False
else:
return "ERROR: Unknown error serializing object: {0}".format(
str(error)
)
self.request.response.setHeader("Content-Type", "application/json")
return JSON
class GetItemLink(GetItem):
def get_data(self):
"""
"""
data = super(GetItemLink, self).get_data()
if not data.get("title"):
data["title"] = data.get("id")
return data
class GetItemEvent(GetItem):
def get_data(self):
"""
"""
data = super(GetItemEvent, self).get_data()
data.update(
{
"start": DateTime(data.get("startDate"))
.asdatetime()
.isoformat(),
"end": DateTime(data.get("endDate")).asdatetime().isoformat(),
"contact_name": data.get("contactName"),
"contact_email": data.get("contactEmail"),
"contact_phone": data.get("contactPhone"),
"event_url": data.get("eventUrl"),
}
)
data.pop("startDate", None)
data.pop("endDate", None)
data.pop("contactName", None)
data.pop("contactEmail", None)
data.pop("contactPhone", None)
data.pop("eventUrl", None)
return data
class GetItemDocument(GetItem):
def get_data(self):
"""
"""
data = super(GetItemDocument, self).get_data()
data.update({"table_of_contents": self.context.tableContents})
return data
class GetItemTopic(GetItem):
def convert_criterion(self, old_criterion):
pass
def get_data(self):
"""
"""
data = super(GetItemTopic, self).get_data()
mt = TopicMigrator()
criterions_list = mt.__call__(self.context)
# check format in case of date values
for crit_dict in criterions_list:
values = crit_dict.get("v")
if not values:
continue
if isinstance(values, int):
continue
if not any(
[True for x in values if isinstance(x, DateTime)]
): # noqa
continue
new_values = []
for val in values:
new_values.append(val.asdatetime().isoformat())
if isinstance(values, tuple):
new_values = tuple(new_values)
crit_dict.update({"v": new_values})
sort_on = mt._collection_sort_on
sort_reversed = mt._collection_sort_reversed
data.update({"query": criterions_list})
data.update({"sort_on": sort_on})
data.update({"sort_reversed": sort_reversed})
if not data.get("itemCount"):
data.update({"item_count": "30"})
else:
data.update({"item_count": data.get("itemCount")})
return data
class GetItemCollection(GetItem):
def get_data(self):
"""
"""
data = super(GetItemCollection, self).get_data()
query = data["query"]
fixed_query = []
for el in query:
tmp_dict = {}
for key in el.keys():
if not isinstance(el[key], six.string_types):
tmp_lst = []
for item in el[key]:
tmp_lst.append(six.text_type(item))
tmp_dict.update({six.text_type(key): tmp_lst})
else:
tmp_dict.update(
{six.text_type(key): six.text_type(el[key])}
)
fixed_query.append(tmp_dict)
data.update({"query": fixed_query})
data["item_count"] = data.get("limit", 30)
del data["limit"]
return data
class GetItemFile(GetItem):
def get_data(self):
"""
Files from Plone 3 could have title not set.
In this case, set it with the id
"""
data = super(GetItemFile, self).get_data()
if not data.get("title"):
data["title"] = data.get("id")
return data
class GetItemImage(GetItem):
def get_data(self):
"""
Images from Plone 3 could have title not set.
In this case, set it with the id
"""
data = super(GetItemImage, self).get_data()
if not data.get("title"):
data["title"] = data.get("id")
return data
class GetCatalogResults(object):
items = []
item_paths = []
@property
@memoize
def query(self):
query = self.request.form.get("catalog_query", {})
if query:
query = eval(base64.b64decode(query), {"__builtins__": None}, {})
query.update({"sort_on": "getObjPositionInParent"})
return query
@property
@memoize
def brains(self):
pc = api.portal.get_tool(name="portal_catalog")
if api.env.plone_version() < "5.2":
return pc.unrestrictedSearchResults(**self.query)
return pc(**self.query)
@property
@memoize
def uids(self):
return [x.UID for x in self.brains]
@property
@memoize
def paths(self):
return [x.getPath() for x in self.brains]
def flatten(self, children):
""" Recursively flatten the tree """
for obj in children:
if obj["path"]:
self.items.append(obj["path"])
children = obj.get("children", None)
if children:
self.flatten(children)
def pathInList(self, path):
path_str = "{}/".format(path)
for item_path in self.paths:
if path_str in item_path:
return True
return False
def explain_tree(self, root):
results = []
children = root.listFolderContents()
for obj in children:
path = (
obj.absolute_url_path()
if not getattr(obj, "getObject", None)
else obj.getPath()
) # noqa
if obj.UID() not in self.uids:
if not self.pathInList(path):
# object is not in catalog results and isn't neither a
# folder in its tree
continue
obj_dict = {"path": path, "children": []}
if IFolderish.providedBy(obj):
obj_dict["children"] = self.explain_tree(obj)
results.append(obj_dict)
return results
def __call__(self):
self.items = []
query = self.request.form.get("catalog_query", {})
if query:
query = eval(base64.b64decode(query), {"__builtins__": None}, {})
query.update({"sort_on": "getObjPositionInParent"})
self.request.response.setHeader("Content-Type", "application/json")
self.items = []
root = api.portal.get()
tree = {"children": []}
tree["children"].extend(self.explain_tree(root))
if tree.get("path", None):
self.items.append(tree["path"])
self.flatten(tree["children"])
item_paths = self.items
return json.dumps(item_paths) | /redturtle.exporter.base-1.3.1.tar.gz/redturtle.exporter.base-1.3.1/src/redturtle/exporter/base/browser/jsonify.py | 0.555194 | 0.181517 | jsonify.py | pypi |
from Acquisition import aq_inner
from plone import api
from plone.restapi.serializer.converters import json_compatible
from redturtle.faq.interfaces import IFaq
from redturtle.faq.interfaces import IFaqFolder
from redturtle.faq.interfaces import IRedturtleFaqLayer
from redturtle.faq.interfaces import ISerializeFaqToJsonSummary
from zope.component import adapter
from zope.component import getMultiAdapter
from zope.interface import implementer
@implementer(ISerializeFaqToJsonSummary)
@adapter(IFaq, IRedturtleFaqLayer)
class FaqSummarySerializer(object):
"""
This is not the standard summary serializer because we want also blocks.
"""
def __init__(self, context, request):
self.context = context
self.request = request
def __call__(self):
obj = aq_inner(self.context)
result = {
"@id": obj.absolute_url(),
"id": obj.id,
"title": obj.title,
"description": obj.description,
"@type": obj.portal_type,
"created": json_compatible(obj.created()),
"modified": json_compatible(obj.modified()),
"UID": obj.UID(),
}
blocks = getattr(obj, "blocks", {})
if blocks:
result["blocks"] = blocks
result["blocks_layout"] = getattr(obj, "blocks_layout", [])
return result
@implementer(ISerializeFaqToJsonSummary)
@adapter(IFaqFolder, IRedturtleFaqLayer)
class FaqFolderSummarySerializer(FaqSummarySerializer):
"""
This is not the standard summary serializer because we want alsoits children.
"""
def __call__(self):
faqs = self.get_faqs()
if not faqs:
# do not show this folder
return {}
result = super(FaqFolderSummarySerializer, self).__call__()
result["icon"] = getattr(self.context, "icon", "")
result["items"] = faqs
return result
def get_faqs(self):
children = self.context.listFolderContents(
contentFilter={"portal_type": ["Faq", "FaqFolder"]}
)
query = self._build_query()
catalog = api.portal.get_tool(name="portal_catalog")
brains = catalog(**query)
faq_uids = [x.UID for x in brains]
res = []
if not brains:
# there are no faqs in this folder or none that match the search
return res
for child in children:
if child.portal_type == "Faq" and child.UID() not in faq_uids:
# this not meet the search
continue
data = getMultiAdapter((child, self.request), ISerializeFaqToJsonSummary)()
if data:
res.append(data)
return res
def _build_query(self):
path = "/".join(self.context.getPhysicalPath())
query = {
"path": path,
"portal_type": "Faq",
}
if "SearchableText" in self.request.form:
query["SearchableText"] = self.request.form["SearchableText"]
return query | /redturtle.faq-1.0.1.tar.gz/redturtle.faq-1.0.1/src/redturtle/faq/restapi/serializers/serializer.py | 0.664867 | 0.214455 | serializer.py | pypi |
from zope.interface import Attribute
from zope.interface import Interface
class IMigrationContextSteps(Interface):
"""
Marker interface for specific context steps
"""
class IDeserializer(Interface):
def __call__(value, filestore, item):
"""Convert to a field value"""
class ITransmogrifier(Interface):
"""The transmogrifier transforms objects through a pipeline"""
context = Attribute("The targeted IFolderish context")
def __call__(self, configuration_id, overrides):
"""Load and execute the named pipeline configuration
Any dictionaries passed in as extra keywords, are interpreted as
section configuration overrides. Only string keys and values are
accepted.
"""
def __getitem__(section):
"""Retrieve a section from the pipeline configuration"""
def keys():
"""List all sections in the pipeline configuration"""
def __iter__():
"""Iterate over all the section names in the pipeline configuration"""
class ISectionBlueprint(Interface):
"""Blueprints create pipe sections"""
def __call__(transmogrifier, name, options, previous):
"""Create a named pipe section for a transmogrifier
Returns an ISection with the given name and options, which will
use previous as an input iterator when iterated over itself.
"""
class ISection(Interface):
"""A section in a transmogrifier pipe"""
def __iter__():
"""Pipe sections are iterables.
During iteration they process the previous section to produce output
for the next pipe section.
"""
class IPortalTypeMapping(Interface):
"""
Map an input portal_type into a new one
"""
order = Attribute("The order which this adapter is run")
def __init__(context, request):
"""Adapts context and the request."""
def __call__(item):
"""
remap types
"""
class IPostMigrationStep(Interface):
"""
Do some post-migration steps
"""
order = Attribute("The order which this adapter is run")
def __init__(context, request):
"""Adapts context and the request."""
def __call__():
""" """ | /redturtle.importer.base-2.1.0.tar.gz/redturtle.importer.base-2.1.0/src/redturtle/importer/base/interfaces.py | 0.881334 | 0.430866 | interfaces.py | pypi |
from redturtle.importer.base.interfaces import IPostMigrationStep
from zope.component import adapter
from zope.interface import implementer
from zope.interface import Interface
from plone import api
from plone.app.uuid.utils import uuidToObject
from plone.dexterity.utils import iterSchemata
from redturtle.importer.base.interfaces import IDeserializer
from zope.component import queryMultiAdapter
from zope.event import notify
from zope.lifecycleevent import ObjectModifiedEvent
from zope.schema import getFieldsInOrder
import logging
import six
logger = logging.getLogger(__name__)
@adapter(Interface, Interface)
@implementer(IPostMigrationStep)
class FixRelations(object):
order = 1
def __init__(self, context, request):
self.context = context
self.request = request
def __call__(self, transmogrifier):
"""
"""
# nel transmogrifier c'e' una lista di tuple:
# (path, fieldname, value) per le quali vanno rifatte le relations
logger.info("## Fix Relations ##")
relations = getattr(transmogrifier, "fixrelations", [])
for (path, fieldname, value) in relations:
if not value:
continue
obj = api.content.get(path)
if not obj:
logger.warning(
"[FIX RELATIONS] - Unable to find {path}. No relations fixed.".format( # noqa
path=path
)
)
continue
logger.info("fix {0} {1} {2}".format(path, fieldname, value))
for schemata in iterSchemata(obj):
for name, field in getFieldsInOrder(schemata):
if name == fieldname:
if isinstance(value, six.string_types):
value = uuidToObject(value)
else:
value = [uuidToObject(uuid) for uuid in value]
deserializer = queryMultiAdapter(
(field, obj), IDeserializer
)
value = deserializer(
value, [], {}, True, logger=logger
)
field.set(field.interface(obj), value)
notify(ObjectModifiedEvent(obj)) | /redturtle.importer.base-2.1.0.tar.gz/redturtle.importer.base-2.1.0/src/redturtle/importer/base/transmogrifier/post_migration_steps/fix_relations.py | 0.514156 | 0.158891 | fix_relations.py | pypi |
from __future__ import print_function
from Acquisition import aq_base
from redturtle.importer.base.interfaces import ISection
from redturtle.importer.base.interfaces import ISectionBlueprint
from redturtle.importer.base.transmogrifier.utils import defaultMatcher
from zope.container.contained import notifyContainerModified
from zope.interface import provider
from zope.interface import implementer
@implementer(ISection)
@provider(ISectionBlueprint)
class OrderSection(object):
def __init__(self, transmogrifier, name, options, previous):
self.every = int(options.get("every", 1000))
self.previous = previous
self.context = transmogrifier.context
self.pathkey = defaultMatcher(options, "path-key", name, "path")
self.poskey = defaultMatcher(options, "pos-key", name, "gopip")
# Position of items without a position value
self.default_pos = int(options.get("default-pos", 1000000))
def __iter__(self):
# Store positions in a mapping containing an id to position mapping for
# each parent path {parent_path: {item_id: item_pos}}.
positions_mapping = {}
for item in self.previous:
keys = list(item.keys())
pathkey = self.pathkey(*keys)[0]
poskey = self.poskey(*keys)[0]
if not (pathkey and poskey):
yield item
continue
item_id = item[pathkey].split("/")[-1]
parent_path = "/".join(item[pathkey].split("/")[:-1])
if parent_path not in positions_mapping:
positions_mapping[parent_path] = {}
positions_mapping[parent_path][item_id] = item[poskey]
yield item
# Set positions on every parent
for path, positions in positions_mapping.items():
# Normalize positions
ordered_keys = sorted(
list(positions.keys()), key=lambda x: positions[x]
)
normalized_positions = {}
for pos, key in enumerate(ordered_keys):
normalized_positions[key] = pos
# TODO: After the new redturtle.importer.base release (>1.4), the
# utils.py provides a traverse method.
from redturtle.importer.base.transmogrifier.utils import traverse
parent = traverse(self.context, path)
# parent = self.context.unrestrictedTraverse(path.lstrip('/'))
if not parent:
continue
parent_base = aq_base(parent)
if getattr(parent_base, "getOrdering", None):
ordering = parent.getOrdering()
# Only DefaultOrdering of p.folder is supported
if not getattr(ordering, "_order", None) and not getattr(
ordering, "_pos", None
):
continue
order = ordering._order()
pos = ordering._pos()
order.sort(
key=lambda x: normalized_positions.get(
x, pos.get(x, self.default_pos)
)
)
for i, id_ in enumerate(order):
pos[id_] = i
notifyContainerModified(parent) | /redturtle.importer.base-2.1.0.tar.gz/redturtle.importer.base-2.1.0/src/redturtle/importer/base/transmogrifier/blueprints/ordersection.py | 0.479504 | 0.210401 | ordersection.py | pypi |
from __future__ import print_function
from plone import api
from redturtle.importer.base.interfaces import ISection
from redturtle.importer.base.interfaces import ISectionBlueprint
from redturtle.importer.base.transmogrifier.utils import Condition
from redturtle.importer.base.transmogrifier.utils import Expression
from redturtle.importer.base import logger
from six.moves import zip
from zope.annotation.interfaces import IAnnotations
from zope.interface import provider
from zope.interface import implementer
import six
VALIDATIONKEY = "redturtle.importer.base.logger"
@implementer(ISection)
@provider(ISectionBlueprint)
class PathManipulator(object):
""" This allows to modify the path parts given a template in the template
variable.
given a path: first/second/third/fourth
and we want to change all the items that match the condition to:
first_modified(and fixed)/second/fourth
the template will be:
template = string:first_fixed/=//=
using = to determinate that we want the same string in that path part,
and using nothing for parts that we no longer want to include.
One (*) wilcard can be included to note that any middle paths can be
used with no changes.
"""
def __init__(self, transmogrifier, name, options, previous):
# self.key = Expression(options['key'], transmogrifier, name, options)
self.template = Expression(
options["template"], transmogrifier, name, options
)
# self.value = Expression(options['value'], transmogrifier, name,
# options)
self.condition = Condition(
options.get("condition", "python:True"),
transmogrifier,
name,
options,
)
self.previous = previous
self.available_operators = ["*", "", "="]
self.anno = IAnnotations(api.portal.get().REQUEST)
self.storage = self.anno.setdefault(VALIDATIONKEY, [])
def __iter__(self):
for item in self.previous:
template = six.text_type(self.template(item))
result_path = [""]
if self.condition(item, key=template):
original_path = item["_path"].split("/")
# Save the original_path in the item
item["_original_path"] = "/".join(original_path)
template = template.split("/")
if len(original_path) != len(template) and (
"*" not in template or "**" not in template
):
logger.debug(
"The template and the length of the path is not the"
"same nad there is no wildcards on it"
)
yield item
# One to one substitution, no wildcards
if (
len(original_path) == len(template)
and u"*" not in template
and u"**" not in template
):
actions = list(zip(original_path, template))
for p_path, operator in actions:
if operator not in self.available_operators:
# Substitute one string for the other
result_path.append(operator)
elif operator == "=":
result_path.append(p_path)
elif operator == "":
pass
# We only attend to the number of partial paths before and
# after the wildcard
if u"*" in template or u"**" in template:
index = template.index(u"*")
# Process the head of the path (until wildcard)
head = list(zip(original_path, template[:index]))
for p_path, operator in head:
if operator not in self.available_operators:
# Substitute one string for the other
result_path.append(operator)
elif operator == "=":
result_path.append(p_path)
elif operator == "":
pass
# Need to know how many partial paths we have to copy (*)
tail_path_length = len(template[index:]) - 1
for p_path in original_path[index:-tail_path_length]:
result_path.append(p_path)
# Process the tail of the path (from wildcard)
original_path_reversed = list(original_path)
original_path_reversed.reverse()
tail = list(
zip(
original_path_reversed, template[-tail_path_length]
)
)
# Complete the tail
for p_path, operator in tail:
if operator not in self.available_operators:
# Substitute one string for the other
result_path.append(operator)
elif operator == "=":
result_path.append(p_path)
elif operator == "":
pass
# Update storage item counter path (for logging)
if item["_path"] in self.storage:
self.storage.remove(item["_path"])
self.storage.append("/".join(result_path))
# Update item path
item["_path"] = "/".join(result_path)
yield item | /redturtle.importer.base-2.1.0.tar.gz/redturtle.importer.base-2.1.0/src/redturtle/importer/base/transmogrifier/blueprints/pathmanipulator.py | 0.630912 | 0.234259 | pathmanipulator.py | pypi |
from Products.ATContentTypes.interface import IATContentType, IATDocument, \
IATEvent, IATNewsItem, IATFile, IATImage, IATLink
from Products.CMFCore.utils import getToolByName
from Products.CMFPlone.interfaces import IPloneSiteRoot, IPloneSiteRoot
from Products.Maps.adapters import GeoLocation
from Products.Maps.interfaces import IMarker, IRichMarker
from Products.Maps.interfaces.map import IMapView, IMap
from urllib2 import urlopen
from zope.annotation.interfaces import IAnnotations
from zope.component import adapts, getMultiAdapter
from zope.interface import implements
import urllib
class CoreRichMarker(GeoLocation):
'''
First of all we need to wake up the adapter and check the default values
when location is not set
>>> from zope.annotation.interfaces import IAnnotations
>>> from zope.component import getAdapter
>>> from Products.Maps.interfaces import IRichMarker
>>> adapter = getAdapter(self.portal.news, IRichMarker)
>>> adapter
<redturtle.maps.core.adapter.CoreRichMarker object at ...>
>>> adapter.gmapurl
'http://maps.google.com/maps/geo?q=&output=csv&key=None'
>>> adapter.status
>>> adapter.accuracy
>>> adapter.latitude
>>> adapter.longitude
>>> adapter.icon
'Red Marker'
Then we check what happens when we have something in the location
>>> self.portal.news.setLocation('via Modena 19, Ferrara')
>>> adapter = getAdapter(self.portal.news, IRichMarker)
>>> adapter.gmapurl
'...?q=via%20Modena%2019%2C%20Ferrara&output=csv&key=None'
>>> adapter.annotations['maps_location']
'via Modena 19, Ferrara'
>>> adapter.annotations['maps_data']
'200,8,44.8472706,11.5963263'
>>> adapter.status
'200'
>>> adapter.accuracy
'8'
>>> adapter.latitude
'44.8472706'
>>> adapter.longitude
'11.5963263'
>>> annotations = IAnnotations(self.portal.news)
>>> annotations == adapter.annotations
True
'''
implements(IRichMarker)
adapts(IATContentType)
def __init__(self, context):
self.context = context
self.annotations = IAnnotations(self.context)
self.config = getMultiAdapter((self.context,
self.context.REQUEST),
name="maps_configuration")
#ip ='66.249.93.104'
self.location = self.context.getLocation()
# self.location = self.annotations.get('maps_location', '')
if not self.location:
self.setDefault()
else:
self.updateMapsData()
def setDefault(self):
'''
Sets the default value for some adapter attributes
'''
self.lat = None
self.lon = None
self.status = None
self.accuracy = None
def updateMapsData(self):
'''
Get maps data from google and sets the adapter attributes
'''
if self.location != self.annotations.get('maps_location'):
page = urlopen(self.gmapurl).read()
status, accuracy, latitude, longitude = page.split(',')
self.annotations['maps_data'] = page
self.annotations['maps_location'] = self.location
status, accuracy, latitude, longitude = self.annotations['maps_data'].split(',')
self.lat = latitude
self.lon = longitude
self.status = status
self.accuracy = accuracy
@property
def gmapurl(self):
'''Returns the url to call to receive google geodata'''
url = 'http://maps.google.com/maps/geo?q=%s&output=csv&key=%s'
return url % (urllib.quote(self.location),
self.config.googlemaps_key)
@property
def latitude(self):
return self.lat
@property
def longitude(self):
return self.lon
@property
def title(self):
return self.context.title_or_id()
@property
def description(self):
return self.context.Description()
@property
def layers(self):
return self.context.Subject()
@property
def icon(self):
marker_icons = self.config.marker_icons
return marker_icons[0]['name']
@property
def url(self):
return self.context.absolute_url()
@property
def related_items(self):
return tuple()
@property
def contents(self):
return tuple()
@property
def base(self):
return self.getCustomContent('base')
def getCustomContent(self, name):
view = getMultiAdapter((self.context, self.context.REQUEST), name=name)
return view()
def getMapContent(self, title, name):
html = self.getCustomContent(name)
if html:
return {'title': title, 'text': html}
else:
return None
""" the next classes are not actually needed, but in few days we could use them to customize the tabs of that types"""
class EventRichMarker(CoreRichMarker):
adapts(IATEvent)
@property
def contents(self):
return tuple()
class NewsRichMarker(CoreRichMarker):
adapts(IATNewsItem)
@property
def contents(self):
return tuple()
class PageRichMarker(CoreRichMarker):
adapts(IATDocument)
@property
def contents(self):
return tuple()
class ImageRichMarker(CoreRichMarker):
adapts(IATImage)
@property
def contents(self):
return tuple()
class FileRichMarker(CoreRichMarker):
adapts(IATFile)
@property
def contents(self):
return tuple()
class LinkRichMarker(CoreRichMarker):
adapts(IATLink)
@property
def contents(self):
return tuple()
class MapSearchResults(object):
adapts(IPloneSiteRoot)
implements(IMap)
def __init__(self, context):
self.context = context
def getMarkers(self):
brains = self.context.queryCatalog(REQUEST=self.context.REQUEST)
for brain in brains:
x = brain.getObject()
yield IMarker(x) | /redturtle.maps.core-0.2.1.tar.gz/redturtle.maps.core-0.2.1/redturtle/maps/core/adapter.py | 0.696268 | 0.177704 | adapter.py | pypi |
import re
from postmonkey import PostMonkey
from zope import schema
from zope.interface import Interface
from zope.interface import invariant
from zope.interface import Invalid
from redturtle.monkey import _
class IRedturtleMonkey(Interface):
"""Marker interface that defines a ZTK browser layer. We can reference
this in the 'layer' attribute of ZCML <browser:* /> directives to ensure
the relevant registration only takes effect when this theme is installed.
The browser layer is installed via the browserlayer.xml GenericSetup
import step.
"""
class IMonkeyLocator(Interface):
"""Interface for mailchimp locator."""
class ICampaign(Interface):
"""Marker interface for AT Campaign."""
class NotAnEmailAddress(schema.ValidationError):
__doc__ = _(u"Invalid email address")
check_email = re.compile(r"[a-zA-Z0-9._%-]+@([a-zA-Z0-9-]+.)*[a-zA-Z]{2,4}")\
.match
def validate_email(value):
if not check_email(value):
raise NotAnEmailAddress(value)
return True
class INewsletterSubscribe(Interface):
email = schema.TextLine(
title=_(u"Email address"),
description=_(u"help_email",
default=u"Please enter your email address."),
required=True,
constraint=validate_email)
email_type = schema.Choice(
title=_(u"Mail format"),
vocabulary="redturtle.monkey.vocabularies.EmailType",
description=_(u"help_email_type",
default=u"Please choose type of newsletter you wish to receive."),
default="text",
required=False,
)
list_id = schema.TextLine(
title=_(u"List ID"),
required=True
)
class IMonkeySettings(Interface):
"""Global mailchimp settings. This describes records stored in the
configuration registry and obtainable via plone.registry.
"""
api_key = schema.TextLine(
title=_(u"MailChimp API Key"),
description=_(
u"help_api_key",
default=u"Enter in your MailChimp key here (.e.g. " +
u"'8b785dcabe4b5aa24ef84201ea7dcded-us4'). Log into " +
u"mailchimp.com, go to account -> extras -> API Keys & " +
u"Authorized Apps and copy the API Key to this field."
),
default=u"",
required=True
)
from_email = schema.TextLine(
title=_(u"Email from address"),
description=_(u"help_from_email",
default=u"Please enter FROM email address."),
required=True,
constraint=validate_email)
from_name = schema.TextLine(
title=_(u"Email from name"),
description=_(u"help_from_name",
default=u"Please enter FROM email name."),
required=True)
@invariant
def valid_api_key(data):
if len(data.api_key) == 0:
return
mailchimp = PostMonkey(data.api_key)
try:
return mailchimp.ping()
except:
raise Invalid(
u"Your MailChimp API key is not valid. Please go " +
u"to mailchimp.com and check your API key.")
class IMailchimpSlot(Interface):
"""A mapping between mailchimp slot and Plone content rendering."""
name = schema.TextLine(
title=_(u"Mailchimp slot name"),
required=True)
def render(objs=None, **kw):
"""Calls IMailchimpSlotRenderer to generate HTML for slot"""
class IMailchimpSlotRenderer(Interface):
"""Returns the rendered HTML for this slot"""
def render():
"""Returns the rendered HTML for this slot""" | /redturtle.monkey-1.3.0.tar.gz/redturtle.monkey-1.3.0/redturtle/monkey/interfaces.py | 0.621885 | 0.162613 | interfaces.py | pypi |
from Acquisition import aq_inner
from zope.interface import alsoProvides
from z3c.form.interfaces import IFormLayer
from plone.z3cform.interfaces import IWrappedForm
from plone.z3cform import z2
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from zope.interface import implements
from zope import schema
from z3c.form import field
from plone.memoize.compress import xhtml_compress
from plone.portlets.interfaces import IPortletDataProvider
from plone.app.portlets.portlets import base
from plone.app.z3cform.wysiwyg import WysiwygFieldWidget
from z3cformhelpers import AddForm
from z3cformhelpers import EditForm
from redturtle.monkey import _
from redturtle.monkey.interfaces import INewsletterSubscribe
from redturtle.monkey.browser.newsletter import NewsletterSubscriberForm
class IMailChimpPortlet(IPortletDataProvider):
name = schema.TextLine(
title=_(u'Title'),
description=_(u'Title of the portlet'))
list_id = schema.Choice(
title=_(u'Available lists'),
description=_(u'Select list to subscribe to.'),
required=True,
vocabulary='redturtle.monkey.vocabularies.AllCampaignLists'
)
custom_css = schema.TextLine(
title=_(u'Custom css'),
required=False,
description=_(u'Custom css class for portlet wrapper'))
text = schema.Text(
title=_(u"Text"),
description=_(u"The text to render"),
required=False)
class Assignment(base.Assignment):
implements(IMailChimpPortlet)
def __init__(self, name=u'', list_id=u'', custom_css=u'', text=u''):
self.name = name
self.list_id = list_id
self.custom_css = custom_css
self.text = text
@property
def title(self):
return _(u"MailChimp")
class Renderer(base.Renderer):
fields = field.Fields(INewsletterSubscribe)
_template = ViewPageTemplateFile('subscribe.pt')
form = NewsletterSubscriberForm
def __init__(self, *args):
base.Renderer.__init__(self, *args)
def render(self):
return xhtml_compress(self._template())
@property
def custom_css(self):
return self.data.custom_css or ''
@property
def name(self):
return self.data.name or _(u"Subscribe to newsletter")
def text(self):
return self.data.text or u''
def update(self):
super(Renderer, self).update()
z2.switch_on(self, request_layer=IFormLayer)
self.form = self.form(aq_inner(self.context), self.request)
self.form.portlet = self.data
alsoProvides(self.form, IWrappedForm)
self.form.update()
class AddForm(AddForm):
fields = field.Fields(IMailChimpPortlet)
fields['text'].widgetFactory = WysiwygFieldWidget
label = _(u"Add MailChimp Portlet")
description = _(
u"This portlet displays a subscription form for a " +
u"MailChimp newsletter.")
def create(self, data):
return Assignment(**data)
class EditForm(EditForm):
fields = field.Fields(IMailChimpPortlet)
fields['text'].widgetFactory = WysiwygFieldWidget
label = _(u"Edit MailChimp Portlet")
description = _(
u"This portlet displays a subscription form for a " +
u"MailChimp newsletter.") | /redturtle.monkey-1.3.0.tar.gz/redturtle.monkey-1.3.0/redturtle/monkey/portlet/subscribe.py | 0.623148 | 0.177954 | subscribe.py | pypi |
from zope.interface import Interface
from zope.component import getMultiAdapter
from plone.app.portlets.portlets import base
from plone.app.uuid.utils import uuidToObject
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from Products.CMFCore.utils import getToolByName
from Products.Archetypes.config import REFERENCE_CATALOG
from zope.interface import implements
from redturtle.monkey import _
class IAddToCampaign(Interface):
"""
Interface for Add to campaign
"""
class Renderer(base.Renderer):
render = ViewPageTemplateFile('addtocampaign.pt')
@property
def title(self):
return _(u"Add item to campaign")
def list_campaign(self):
result = {'related': [],
'not_related': []}
reference_catalog = getToolByName(self.context, REFERENCE_CATALOG)
references = reference_catalog.getBackReferences(self.context,
relationship="campaignItems")
related = [a.sourceUID for a in references]
portal_catalog = getToolByName(self.context, 'portal_catalog')
brains = portal_catalog(portal_type='Campaign')
not_related = [a.UID for a in brains if a.UID not in related]
result['related'] = [uuidToObject(u) for u in related]
result['not_related'] = [uuidToObject(u) for u in not_related]
return result
@property
def available(self):
portal_state = getMultiAdapter((self.context, self.request),
name=u'plone_portal_state')
if portal_state.anonymous():
return False
else:
return True
class Assignment(base.Assignment):
"""
Assignement for Add to campaign
"""
implements(IAddToCampaign)
@property
def title(self):
return _(u"Office information portlet")
class AddForm(base.NullAddForm):
"""
AddForm for Add to campaign
"""
def create(self):
assignment = Assignment()
return assignment
class EditForm(base.EditForm):
"""
EditForm for Add to campaign
""" | /redturtle.monkey-1.3.0.tar.gz/redturtle.monkey-1.3.0/redturtle/monkey/portlet/addtocampaign.py | 0.624408 | 0.151781 | addtocampaign.py | pypi |
from zope.interface import implementer
from plone.app.portlets.portlets.search import ISearchPortlet, Renderer as baseRenderer, AddForm as BaseAddForm, EditForm as BaseEditForm
from plone.app.portlets.portlets import base
from plone.api import content
from zope import schema
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from plone.app.vocabularies.catalog import CatalogSource
from redturtle.portlet.contextualsearch import ContextualSearchPortletMessageFactory as _
from zope.component import getMultiAdapter
from Acquisition import aq_inner
class IContextualSearchPortlet(ISearchPortlet):
"""A portlet that allows contextual search and extend search portlet base
"""
portletTitle = schema.TextLine(title=_(u"portlet_title", default=u"Portlet title"),
description=_(u"Insert the portlet title"),
required=False)
searchFolder = schema.Choice(title=_(u"Target folder"),
required=False,
description=_(u"Choose the folder to use for searches. If left blank, the search will use the current context as the starting folder"),
source=CatalogSource())
@implementer(IContextualSearchPortlet)
class Assignment(base.Assignment):
"""Portlet assignment.
This is what is actually managed through the portlets UI and associated
with columns.
"""
def __init__(self, portletTitle='', enableLivesearch=True, searchFolder='', showAdvanced=False):
self.enableLivesearch = enableLivesearch
self.portletTitle = portletTitle
self.searchFolder = searchFolder
self.showAdvanced = showAdvanced
@property
def title(self):
title = "Contextual search portlet"
if self.data.portletTitle:
return title + ": " + self.data.portletTitle
return title
class Renderer(baseRenderer):
"""Portlet renderer."""
render = ViewPageTemplateFile('contextualsearchportlet.pt')
def getPosition(self):
"""returns the actual position for the contextual search"""
if self.data.searchFolder:
rightObject = content.get(UID=self.data.searchFolder)
root_path = '/'.join(rightObject.getPhysicalPath())
return root_path
else:
folder = self.getRightContext()
return '/'.join(folder.getPhysicalPath())
def getPortletTitle(self):
"""return the portlet title"""
if self.data.portletTitle:
return self.data.portletTitle
else:
return "search"
def getRightContext(self):
"""
"""
plone_view = getMultiAdapter((aq_inner(self.context), self.request), name='plone')
if plone_view.isDefaultPageInFolder():
return plone_view.getParentObject()
else:
return self.context
class AddForm(BaseAddForm):
"""Portlet add form.
This is registered in configure.zcml. The form_fields variable tells
zope.formlib which fields to display. The create() method actually
constructs the assignment that is being added.
"""
schema = IContextualSearchPortlet
def updateWidgets(self):
super(AddForm, self).updateWidgets()
def create(self, data):
return Assignment(**data)
class EditForm(BaseEditForm):
"""Portlet edit form.
This is registered with configure.zcml. The form_fields variable tells
zope.formlib which fields to display.
"""
schema = IContextualSearchPortlet
def updateWidgets(self):
super(EditForm, self).updateWidgets() | /redturtle.portlet.contextualsearch-3.0.0.tar.gz/redturtle.portlet.contextualsearch-3.0.0/redturtle/portlet/contextualsearch/contextualsearchportlet.py | 0.659515 | 0.381508 | contextualsearchportlet.py | pypi |
from plone.app.blocks import utils
from plone.app.blocks.tiles import renderTiles
from plone.app.standardtiles import PloneMessageFactory as _
from plone.app.vocabularies.catalog import CatalogSource as CatalogSourceBase
from plone.memoize.view import memoize
from plone.supermodel import model
from plone.tiles import Tile
from plone.uuid.interfaces import IUUID
from Products.CMFCore.utils import getToolByName
from repoze.xmliter.utils import getHTMLSerializer
from zExceptions import Unauthorized
from zope import schema
from zope.browser.interfaces import IBrowserView
from zope.component.hooks import getSite
from plone.api import content
from zope.component import getMultiAdapter
from Acquisition import aq_inner
def uuidToObject(uuid):
"""Given a UUID, attempt to return a content object. Will return
None if the UUID can't be found. Raises Unauthorized if the current
user is not allowed to access the object.
"""
brain = uuidToCatalogBrainUnrestricted(uuid)
if brain is None:
return None
return brain.getObject()
def uuidToCatalogBrainUnrestricted(uuid):
"""Given a UUID, attempt to return a catalog brain even when the object is
not visible for the logged in user (e.g. during anonymous traversal)
"""
site = getSite()
if site is None:
return None
catalog = getToolByName(site, 'portal_catalog', None)
if catalog is None:
return None
result = catalog.unrestrictedSearchResults(UID=uuid)
if len(result) != 1:
return None
return result[0]
class CatalogSource(CatalogSourceBase):
"""ExistingContentTile specific catalog source to allow targeted widget
"""
def __contains__(self, value):
return True # Always contains to allow lazy handling of removed objs
class IContextualSearchTile(model.Schema):
"""Schema per la tile Contextual Search.
"""
tile_title = schema.TextLine(
title=_(u"Titolo tile"),
required=True,
)
content_uid = schema.Choice(
title=_(u"Seleziona cartella"),
description=_(u"Seleziona la cartella da usare come partenza per le ricerche. Se si lascia il campo vuoto, verra' usata la posizione corrente come cartella di partenza."),
required=False,
source=CatalogSource(),
)
class ContextualSearchTile(Tile):
"""Contextual Search tile
"""
@property
@memoize
def content_context(self):
uuid = self.data.get('content_uid')
if uuid != IUUID(self.context, None):
try:
item = uuidToObject(uuid)
except Unauthorized:
item = None
if not self.request.get('PUBLISHED'):
raise # Should raise while still traversing
if item is not None:
return item
return None
@property
@memoize
def default_view(self):
context = self.content_context
if context is not None:
item_layout = context.getLayout()
default_view = context.restrictedTraverse(item_layout)
return default_view
return None
@property
def item_macros(self):
default_view = self.default_view
if default_view and IBrowserView.providedBy(default_view):
# IBrowserView
if getattr(default_view, 'index', None):
return default_view.index.macros
elif default_view:
# FSPageTemplate
return default_view.macros
return None
@property
def item_panels(self):
default_view = self.default_view
html = default_view()
if isinstance(html, unicode):
html = html.encode('utf-8')
serializer = getHTMLSerializer([html], pretty_print=False,
encoding='utf-8')
panels = dict(
(node.attrib['data-panel'], node)
for node in utils.panelXPath(serializer.tree)
)
if panels:
request = self.request.clone()
request.URL = self.content_context.absolute_url() + '/'
try:
renderTiles(request, serializer.tree)
except RuntimeError: # maximum recursion depth exceeded
return []
clear = '<div style="clear: both;"></div>'
return [''.join([serializer.serializer(child)
for child in node.getchildren()])
for name, node in panels.items()] + [clear]
return []
def __getattr__(self, name):
# proxy attributes for this view to the selected view of the content
# item so views work
if name in ('data',
'content_context',
'default_view',
'item_macros',
'item_panels',
'getPhysicalPath',
'index_html',
) or name.startswith(('_', 'im_', 'func_')):
return Tile.__getattr__(self, name)
return getattr(self.default_view, name)
# Altra robissima simpaticissima
def getPosition(self):
"""returns the actual position for the contextual search"""
if self.data['content_uid']:
rightObject = content.get(UID=self.data['content_uid'])
root_path = '/'.join(rightObject.getPhysicalPath())
return root_path
else:
folder = self.getRightContext()
return '/'.join(folder.getPhysicalPath())
def getPortletTitle(self):
"""return the portlet title"""
if self.data.portletTitle:
return self.data.portletTitle
else:
return "search"
def getRightContext(self):
"""
"""
plone_view = getMultiAdapter((aq_inner(self.context), self.request), name='plone')
if plone_view.isDefaultPageInFolder():
return plone_view.getParentObject()
else:
return self.context | /redturtle.portlet.contextualsearch-3.0.0.tar.gz/redturtle.portlet.contextualsearch-3.0.0/redturtle/portlet/contextualsearch/tiles/contextualsearchtile.py | 0.661814 | 0.251483 | contextualsearchtile.py | pypi |
from Acquisition import aq_base
from Products.Five import BrowserView
from cStringIO import StringIO
from plone.app.portlets.interfaces import IPortletManager
from plone.memoize.view import memoize
from pprint import PrettyPrinter
from zope.component import getMultiAdapter, getUtilitiesFor
_marker = []
def shasattr(obj, attr):
""" shasattr implementation inspired by Products.Archetypes
https://github.com/plone/Products.Archetypes/blob/master/Products/Archetypes/utils.py # noqa
"""
return getattr(aq_base(obj), attr, _marker) is not _marker
class InspectPortlets(BrowserView):
'''
Base view to inspect portlets
'''
@property
@memoize
def portlet_managers(self):
''' Returns the portlet managers, e.g.:
- plone.leftcolumn
- plone.rightcolumn
- ...
'''
managers = getUtilitiesFor(IPortletManager)
return tuple(managers)
def assignments(self, obj):
'''
Get assignments for object, i.e. the portlets assigned in the context
of obj
'''
all_assignments = {}
for manager_name, manager in self.portlet_managers:
manager_assignments = getMultiAdapter((obj, manager))
try:
keys = manager_assignments.keys()
except AttributeError:
keys = []
if keys:
values = [
(repr(manager_assignments[x]),
repr(manager_assignments[x].__class__))
for x in keys
if manager_assignments[x]
]
if values:
all_assignments[manager_name] = values
return all_assignments
def update_results(self, obj):
'''
Update the results
'''
assignments = self.assignments(obj)
if assignments:
self.results['/'.join(obj.getPhysicalPath())] = assignments
if shasattr(obj, 'listFolderContents'):
for x in obj.listFolderContents():
self.update_results(x)
def __call__(self):
'''
Check the portlets defined here and in some sublevels
'''
self.results = {}
self.update_results(self.context)
printer = PrettyPrinter(stream=StringIO())
printer.pprint(self.results)
return printer._stream.getvalue() | /redturtle.portlets.inspector-1.0.0.zip/redturtle.portlets.inspector-1.0.0/redturtle/portlets/inspector/browser/inspector.py | 0.612194 | 0.193604 | inspector.py | pypi |
from AccessControl import Unauthorized
from App.config import getConfiguration
from DateTime import DateTime
from datetime import datetime
from datetime import timedelta
from dateutil.tz.tz import tzutc
from logging import FileHandler
from logging import Formatter
from logging import getLogger
from plone import api
from plone.api.exc import UserNotFoundError
from plone.app.event.base import default_timezone
from six.moves import map
from zope.i18nmessageid import MessageFactory
from redturtle.prenotazioni.utils import is_migration
import pytz
import dateutil
logger = getLogger("redturtle.prenotazioni")
_ = MessageFactory("redturtle.prenotazioni")
prenotazioniMessageFactory = MessageFactory("redturtle.prenotazioni")
prenotazioniFileLogger = getLogger("redturtle.prenotazioni.file")
def tznow():
"""Return a timezone aware now"""
tz = pytz.timezone(default_timezone())
return datetime.now().astimezone(tz)
def datetime_with_tz(date_str):
"""
Return a datetime timezone aware
"""
if isinstance(date_str, datetime):
date = date_str
elif isinstance(date_str, DateTime):
date = date_str.asdatetime()
else:
try:
date = dateutil.parser.parse(date_str)
except ValueError:
raise ValueError(f"Invalid date: {date_str}")
if date.tzinfo is None or isinstance(date.tzinfo, tzutc):
tz = pytz.timezone(default_timezone())
date = date.astimezone(tz)
return date
def time2timedelta(value):
"""
Transform the value in a timedelta object
value is supposed to be in the format HH(.*)MM
"""
hours, minutes = list(map(int, (value[0:2], value[-2:])))
return timedelta(hours=hours, minutes=minutes)
def get_or_create_obj(folder, key, portal_type):
"""
Get the object with id key from folder
If it does not exist create an object with the given key and portal_type
:param folder: a Plone folderish object
:param key: the key of the child object
:param portal_type: the portal_type of the child object
"""
if key in folder:
return folder[key]
try:
userid = folder.getOwner().getId()
if not userid:
raise UserNotFoundError()
with api.env.adopt_user(userid):
return api.content.create(type=portal_type, title=key, container=folder)
except (UserNotFoundError, Unauthorized):
with api.env.adopt_roles(["Manager"]):
return api.content.create(type=portal_type, title=key, container=folder)
def init_handler():
"""
Protect the namespace
"""
if prenotazioniFileLogger.handlers:
return
product_config = getattr(getConfiguration(), "product_config", {})
config = product_config.get("redturtle.prenotazioni", {})
logfile = config.get("logfile")
if not logfile:
return
hdlr = FileHandler(logfile)
formatter = Formatter("%(asctime)s %(levelname)s %(message)s", "%Y-%m-%d %H:%M:%S")
hdlr.setFormatter(formatter)
prenotazioniFileLogger.addHandler(hdlr)
init_handler()
# TODO: Delete this as soon as possible
def monkey_patch_restapi_validation():
"""This is needed this to migrate the data properly"""
from plone.restapi.deserializer.dxcontent import DeserializeFromJson
get_schema_data = DeserializeFromJson.get_schema_data
def get_schema_data_impostor(*args, **kwargs):
if is_migration():
# reject the errors array returned by the `get_schema_data`
return get_schema_data(*args, **kwargs)[0], []
else:
return get_schema_data(*args, **kwargs)
DeserializeFromJson.get_schema_data = get_schema_data_impostor
try:
from redturtle.volto.restapi.deserializer.dxfields import (
DatetimeFieldDeserializer,
)
except ImportError:
return
DatetimeFieldDeserializer___call__ = DatetimeFieldDeserializer.__call__
def DatetimeFieldDeserializer___call___impostor(*args, **kwargs):
if is_migration():
return datetime_with_tz(args[1])
else:
return DatetimeFieldDeserializer___call__(*args, **kwargs)
DatetimeFieldDeserializer.__call__ = DatetimeFieldDeserializer___call___impostor
monkey_patch_restapi_validation() | /redturtle.prenotazioni-2.0.0rc1-py3-none-any.whl/redturtle/prenotazioni/__init__.py | 0.536556 | 0.155976 | __init__.py | pypi |
from plone import api
from plone.restapi.interfaces import ISerializeToJson
from plone.restapi.serializer.converters import json_compatible
from redturtle.prenotazioni.content.prenotazione import IPrenotazione
from redturtle.prenotazioni.interfaces import (
ISerializeToPrenotazioneSearchableItem,
)
from redturtle.prenotazioni import logger
from zope.component import adapter
from zope.i18n import translate
from zope.interface import implementer
from zope.publisher.interfaces import IRequest
@implementer(ISerializeToJson)
@adapter(IPrenotazione, IRequest)
class PrenotazioneSerializer:
def __init__(self, prenotazione, request):
self.prenotazione = prenotazione
self.request = request
def __call__(self, *args, **kwargs):
booking_folder = self.prenotazione.getPrenotazioniFolder()
useful_docs = getattr(booking_folder, "cosa_serve", "")
if self.prenotazione.fiscalcode:
fiscalcode = self.prenotazione.fiscalcode.upper()
else:
fiscalcode = None
status = api.portal.get_tool("portal_workflow").getStatusOf(
"prenotazioni_workflow", self.prenotazione
)
booking_date = self.prenotazione.booking_date
booking_expiration_date = self.prenotazione.booking_expiration_date
if (
booking_expiration_date
and booking_date.date() != booking_expiration_date.date()
):
logger.warning("Booking date and expiration date are different, fixing")
booking_date = booking_date.date()
booking_expiration_date = booking_expiration_date.replace(
booking_date.year, booking_date.month, booking_date.day
)
return {
"UID": self.prenotazione.UID(),
"@type": self.prenotazione.portal_type,
"title": self.prenotazione.title,
"description": self.prenotazione.description,
"gate": self.prenotazione.gate,
"id": self.prenotazione.id,
"phone": self.prenotazione.phone,
"email": self.prenotazione.email,
"fiscalcode": fiscalcode,
"company": self.prenotazione.company,
"staff_notes": self.prenotazione.staff_notes,
"booking_date": json_compatible(booking_date),
"booking_expiration_date": json_compatible(booking_expiration_date),
"booking_status_label": translate(
status["review_state"], context=self.request
),
"booking_type": self.prenotazione.booking_type,
"vacation": self.prenotazione.isVacation(),
"booking_code": self.prenotazione.getBookingCode(),
"cosa_serve": useful_docs,
}
@implementer(ISerializeToPrenotazioneSearchableItem)
@adapter(IPrenotazione, IRequest)
class PrenotazioneSearchableItemSerializer:
def __init__(self, prenotazione, request):
self.prenotazione = prenotazione
self.request = request
def __call__(self, *args, **kwargs):
wf_tool = api.portal.get_tool("portal_workflow")
status = wf_tool.getStatusOf("prenotazioni_workflow", self.prenotazione)
return {
"title": self.prenotazione.Title(),
"booking_id": self.prenotazione.UID(),
"booking_code": self.prenotazione.getBookingCode(),
"booking_url": self.prenotazione.absolute_url(),
"booking_date": json_compatible(self.prenotazione.booking_date),
"booking_expiration_date": json_compatible(
self.prenotazione.booking_expiration_date
),
"booking_type": self.prenotazione.booking_type,
# "booking_room": None,
"booking_gate": self.prenotazione.gate,
"booking_status": status["review_state"],
"booking_status_label": translate(
status["review_state"], context=self.request
),
"booking_status_date": json_compatible(status["time"]),
"booking_status_notes": status["comments"],
"email": self.prenotazione.email,
"fiscalcode": self.prenotazione.fiscalcode,
"phone": self.prenotazione.phone,
"staff_notes": self.prenotazione.staff_notes,
"company": self.prenotazione.company,
"vacation": self.prenotazione.isVacation(),
} | /redturtle.prenotazioni-2.0.0rc1-py3-none-any.whl/redturtle/prenotazioni/restapi/serializers/adapters/prenotazione.py | 0.536313 | 0.310276 | prenotazione.py | pypi |
from DateTime import DateTime
from plone import api
from plone.restapi.services import Service
from redturtle.prenotazioni.interfaces import (
ISerializeToPrenotazioneSearchableItem,
)
from zExceptions import Unauthorized
from zope.component import getMultiAdapter
from zope.interface import implementer
from zope.publisher.interfaces import IPublishTraverse
@implementer(IPublishTraverse)
class BookingsSearch(Service):
"""
Preonotazioni search view
"""
def publishTraverse(self, request, userid):
if not request.get("userid", None):
request.set("userid", userid)
return self
def query(self):
query = {
"portal_type": "Prenotazione",
"sort_on": "Date",
"sort_order": "reverse",
}
if api.user.is_anonymous():
raise Unauthorized("You must be logged in to perform this action")
elif api.user.has_permission("redturtle.prenotazioni: search prenotazioni"):
userid = self.request.get("userid", None)
else:
userid = api.user.get_current().getUserId()
if userid:
query["fiscalcode"] = userid.upper()
start_date = self.request.get("from", None)
end_date = self.request.get("to", None)
gate = self.request.get("gate", None)
booking_type = self.request.get("booking_type", None)
SearchableText = self.request.get("SearchableText", None)
review_state = self.request.get("review_state", None)
# 2023-01-01 -> 2023-01-01T00:00:00
if start_date and len(start_date) == 10:
start_date = f"{start_date}T00:00:00"
if end_date and len(end_date) == 10:
end_date = f"{end_date}T23:59:59"
if start_date or end_date:
query["Date"] = {
"query": [DateTime(i) for i in [start_date, end_date] if i],
"range": f"{start_date and 'min' or ''}{start_date and end_date and ':' or ''}{end_date and 'max' or ''}", # noqa: E501
}
if gate:
query["Subject"] = "Gate: {}".format(gate)
if booking_type:
query["booking_type"] = booking_type
if SearchableText:
query["SearchableText"] = SearchableText
if review_state:
query["review_state"] = review_state
return query
def reply(self):
response = {"id": self.context.absolute_url() + "/@bookings"}
query = self.query()
response["items"] = [
getMultiAdapter(
(i.getObject(), self.request),
ISerializeToPrenotazioneSearchableItem,
)()
for i in api.portal.get_tool("portal_catalog")(**query)
]
response["items_total"] = len(response["items"])
return response
class BookingsSearchFolder(BookingsSearch):
def query(self):
query = super(BookingsSearchFolder, self).query()
query["path"] = "/".join(self.context.getPhysicalPath())
return query | /redturtle.prenotazioni-2.0.0rc1-py3-none-any.whl/redturtle/prenotazioni/restapi/services/bookings/search.py | 0.485844 | 0.228867 | search.py | pypi |
from plone import api
from plone.memoize.view import memoize
from plone.restapi.interfaces import ISerializeToJson
from plone.restapi.services import Service
from zope.component import getMultiAdapter
from zExceptions import BadRequest
from redturtle.prenotazioni.adapters.slot import ISlot
from datetime import datetime, date
from zope.interface import implementer
from zope.publisher.interfaces import IPublishTraverse
@implementer(IPublishTraverse)
class DaySlots(Service):
day = None
def publishTraverse(self, request, day):
if self.day is None:
try:
self.day = datetime.fromisoformat(day).date()
except ValueError:
raise BadRequest("Invalid date")
return self
@property
@memoize
def prenotazioni_context_state(self):
return api.content.get_view(
"prenotazioni_context_state",
context=self.context,
request=self.request,
)
def reply(self):
"""
Finds all the busy slots in a week which is taken in base of
`data` param passed by the request or the current one.
Returns:
dict: contains all the busy slots of the day. Dict format:
`{
"@id": "http://localhost:8080/Plone/prenotazioni_folder/@day/2023-05-22",
"bookings": {
"gate1":
[
{
"booking_code": "17E3E6",
"booking_date": "2023-05-22T09:09:00",
"booking_expiration_date": "2023-05-22T09:10:00+00:00",
"booking_type": "xxx",
"company": null,
"cosa_serve": null,
"description": "",
"email": "mario.rossi@example",
"fiscalcode": "",
"gate": "postazione1",
"id": "mario-rossi-1",
"phone": "",
"staff_notes": null,
"title": "Mario Rossi"
},
...
],
"gate2":
[
{
"booking_code": "17E3E6",
"booking_date": "2023-05-22T09:09:00+00:00",
"booking_expiration_date": "2023-05-22T09:10:00+00:00",
"booking_type": "yyy",
"company": null,
"cosa_serve": null,
"description": "",
"email": "mario.rossi@example",
"fiscalcode": "",
"gate": "postazione2",
"id": "mario-rossi",
"phone": "",
"staff_notes": null,
"title": "Mario Rossi"
},
...
]
},
"pauses": [
{
"start": "2023-05-22T07:15:00+00:00",
"stop": "2023-05-22T08:30:00+00:00"
},
...
]
"daily_schedule": {
"afternoon": {
"start": "2023-05-22T12:00:00+00:00",
"stop": "2023-05-22T16:00:00+00:00"
},
"morning": {
"start": "2023-05-22T05:00:00+00:00",
"stop": "2023-05-22T11:00:00+00:00"
}
}
}`
"""
if self.day is None:
self.day = date.today()
return {
"@id": f"{self.context.absolute_url()}/@day/{self.day.isoformat()}",
"bookings": self.get_bookings(),
"pauses": self.get_pauses(),
"daily_schedule": self.get_daily_schedule(),
"gates": self.get_gates(),
}
def get_bookings(self):
bookings = self.prenotazioni_context_state.get_bookings_in_day_folder(self.day)
bookings_result = {}
for gate in {i.gate for i in bookings}:
bookings_result[gate] = [
{
**getMultiAdapter((i, self.request), ISerializeToJson)(),
}
for i in bookings
if i.gate == gate
]
return bookings_result
def get_pauses(self):
return [
getMultiAdapter((ISlot(i), self.request), ISerializeToJson)()
for i in self.prenotazioni_context_state.get_pauses_in_day_folder(self.day)
]
def get_daily_schedule(self):
intervals = self.prenotazioni_context_state.get_day_intervals(self.day)
if not intervals:
return {}
return {
"morning": getMultiAdapter(
(intervals["morning"], self.request), ISerializeToJson
)(),
"afternoon": getMultiAdapter(
(intervals["afternoon"], self.request), ISerializeToJson
)(),
}
def get_gates(self):
return self.prenotazioni_context_state.get_gates(self.day) | /redturtle.prenotazioni-2.0.0rc1-py3-none-any.whl/redturtle/prenotazioni/restapi/services/day/day.py | 0.698432 | 0.316422 | day.py | pypi |
from datetime import timedelta
from plone import api
from plone.restapi.serializer.converters import json_compatible
from plone.restapi.services import Service
from redturtle.prenotazioni import _
from zExceptions import BadRequest
import calendar
import datetime
class AvailableSlots(Service):
def reply(self):
"""
Finds all the available slots in a month.
If you pass a start and end date, the search will be made between these dates.
If not, the search will start from current date until the end of current month.
"""
prenotazioni_week_view = api.content.get_view(
"prenotazioni_week_view", context=self.context, request=self.request
)
start = self.request.form.get("start", "")
end = self.request.form.get("end", "")
past_slots = self.request.form.get("past_slots", False)
if start:
start = datetime.date.fromisoformat(start)
else:
start = datetime.date.today()
if end:
end = datetime.date.fromisoformat(end)
else:
end = start.replace(day=calendar.monthrange(start.year, start.month)[1])
if start > end:
msg = self.context.translate(
_(
"available_slots_wrong_dates",
default="End date should be greater than start.",
)
)
raise BadRequest(msg)
booking_type = self.request.form.get("booking_type")
if booking_type:
slot_min_size = (
prenotazioni_week_view.prenotazioni.get_booking_type_duration(
booking_type
)
* 60
)
else:
slot_min_size = 0
response = {
"@id": f"{self.context.absolute_url()}/@available-slots",
"items": [],
}
for n in range(int((end - start).days) + 1):
booking_date = start + timedelta(n)
slots = prenotazioni_week_view.prenotazioni.get_anonymous_slots(
booking_date=booking_date
)
for slot in slots.get("anonymous_gate", []):
info = prenotazioni_week_view.prenotazioni.get_anonymous_booking_url(
booking_date, slot, slot_min_size=slot_min_size
)
if not info.get("url", ""):
continue
if not past_slots and not info.get("future"):
continue
response["items"].append(json_compatible(info.get("booking_date", "")))
return response | /redturtle.prenotazioni-2.0.0rc1-py3-none-any.whl/redturtle/prenotazioni/restapi/services/available_slots/get.py | 0.472197 | 0.221424 | get.py | pypi |
from email.utils import formataddr
from email.utils import parseaddr
from logging import getLogger
from plone import api
from plone.app.event.base import default_timezone
from plone.registry.interfaces import IRegistry
from Products.CMFPlone.interfaces.controlpanel import IMailSchema
from redturtle.prenotazioni import _
from redturtle.prenotazioni.utils import is_migration
from redturtle.prenotazioni.adapters.booker import IBooker
from redturtle.prenotazioni.interfaces import IPrenotazioneEmailMessage
from zope.component import getMultiAdapter
from zope.component import getUtility
from zope.i18n import translate
from plone.stringinterp.interfaces import IStringSubstitution
from zope.component import getAdapter
logger = getLogger(__name__)
def reallocate_gate(obj):
"""
We have to reallocate the gate for this object
Skip this step if we have a form.gate parameter in the request
DISABLED: SEEMS ONLY GENERATES PROBLEMS
"""
context = obj.object
if context.REQUEST.form.get("form.gate", "") and getattr(context, "gate", ""):
return
container = context.getPrenotazioniFolder()
booking_date = context.getBooking_date()
new_gate = IBooker(container).get_available_gate(booking_date)
if new_gate:
context.gate = new_gate
def reallocate_container(obj):
"""
If we moved Prenotazione to a new week we should move it
"""
container = obj.object.getPrenotazioniFolder()
IBooker(container).fix_container(obj.object)
def notify_on_after_transition_event(context, event):
"""The messages are being send only if the following flags on the PrenotazioniFolder are set"""
booking_folder = context.getPrenotazioniFolder()
flags = {
i: getattr(booking_folder, f"notify_on_{i}", False)
for i in ("confirm", "submit", "refuse")
}
if flags["confirm"] and flags["submit"]:
flags["submit"] = False
if flags.get(event.transition and event.transition.__name__ or "", False):
if not getattr(context, "email", ""):
# booking does not have an email set
return
adapter = getMultiAdapter(
(context, event),
IPrenotazioneEmailMessage,
name=event.transition.__name__,
)
if adapter:
if adapter.message:
send_email(adapter.message)
def autoconfirm(booking, event):
if api.content.get_state(obj=booking, default=None) == "pending":
if getattr(booking.getPrenotazioniFolder(), "auto_confirm", False):
api.content.transition(obj=booking, transition="confirm")
booking.reindexObject(idxs="review_state")
def notify_on_move(booking, event):
if not getattr(booking.getPrenotazioniFolder(), "notify_on_move", False):
return
if not getattr(booking, "email", ""):
# booking does not have an email set
return
adapter = getMultiAdapter((booking, event), IPrenotazioneEmailMessage)
if adapter:
if adapter.message:
send_email(adapter.message)
def send_email(msg):
if not msg:
logger.error("Could not send email due to no message was provided")
return
host = api.portal.get_tool(name="MailHost")
registry = getUtility(IRegistry)
encoding = registry.get("plone.email_charset", "utf-8")
host.send(msg, charset=encoding)
def get_mail_from_address():
registry = getUtility(IRegistry)
mail_settings = registry.forInterface(IMailSchema, prefix="plone", check=False)
from_address = mail_settings.email_from_address
from_name = mail_settings.email_from_name
if not from_address:
return ""
from_address = from_address.strip()
mfrom = formataddr((from_name, from_address))
if parseaddr(mfrom)[1] != from_address:
mfrom = from_address
return mfrom
def send_email_to_managers(booking, event):
# skip email for vacation/out-of-office
if is_migration():
return
if booking.isVacation():
return
booking_folder = booking.getPrenotazioniFolder()
booking_operator_url = (
getAdapter(booking, IStringSubstitution, "booking_operator_url")(),
)
email_list = getattr(booking_folder, "email_responsabile", "")
if email_list:
mail_template = api.content.get_view(
name="manager_notification_mail",
context=booking,
request=booking.REQUEST,
)
booking_date = getattr(booking, "booking_date", None)
parameters = {
"company": getattr(booking, "company", ""),
"booking_folder": booking_folder.title,
"booking_url": booking_operator_url,
"booking_date": booking_date.astimezone(
default_timezone(as_tzinfo=True)
).strftime("%d/%m/%Y"),
"booking_hour": booking_date.astimezone(
default_timezone(as_tzinfo=True)
).strftime("%H:%M"),
"booking_expiration_date": getattr(booking, "booking_expiration_date", ""),
"description": getattr(booking, "description", ""),
"email": getattr(booking, "email", ""),
"fiscalcode": getattr(booking, "fiscalcode", ""),
"gate": getattr(booking, "gate", ""),
"phone": getattr(booking, "phone", ""),
"staff_notes": getattr(booking, "staff_notes", ""),
"booking_type": getattr(booking, "booking_type", ""),
"title": getattr(booking, "title", ""),
}
mail_text = mail_template(**parameters)
mailHost = api.portal.get_tool(name="MailHost")
subject = translate(
_(
"new_booking_admin_notify_subject",
default="New booking for ${context}",
mapping={"context": booking_folder.title},
),
context=booking.REQUEST,
)
for mail in email_list:
if mail:
mailHost.send(
mail_text,
mto=mail,
mfrom=get_mail_from_address(),
subject=subject,
charset="utf-8",
msg_type="text/html",
immediate=True,
) | /redturtle.prenotazioni-2.0.0rc1-py3-none-any.whl/redturtle/prenotazioni/events/prenotazione.py | 0.408041 | 0.17427 | prenotazione.py | pypi |
from zope.interface import implementer
from zope.schema.interfaces import IVocabularyFactory
from zope.schema.vocabulary import SimpleTerm
from zope.schema.vocabulary import SimpleVocabulary
from plone import api
@implementer(IVocabularyFactory)
class VocMonths(object):
""" """
def __call__(self, context):
terms = [
SimpleTerm(
value=1,
token=1,
title=api.portal.translate(msgid="month_jan", domain="plonelocales"),
),
SimpleTerm(
value=2,
token=2,
title=api.portal.translate(msgid="month_feb", domain="plonelocales"),
),
SimpleTerm(
value=3,
token=3,
title=api.portal.translate(msgid="month_mar", domain="plonelocales"),
),
SimpleTerm(
value=4,
token=4,
title=api.portal.translate(msgid="month_apr", domain="plonelocales"),
),
SimpleTerm(
value=5,
token=5,
title=api.portal.translate(msgid="month_may", domain="plonelocales"),
),
SimpleTerm(
value=6,
token=6,
title=api.portal.translate(msgid="month_jun", domain="plonelocales"),
),
SimpleTerm(
value=7,
token=7,
title=api.portal.translate(msgid="month_jul", domain="plonelocales"),
),
SimpleTerm(
value=8,
token=8,
title=api.portal.translate(msgid="month_aug", domain="plonelocales"),
),
SimpleTerm(
value=9,
token=9,
title=api.portal.translate(msgid="month_sep", domain="plonelocales"),
),
SimpleTerm(
value=10,
token=10,
title=api.portal.translate(msgid="month_oct", domain="plonelocales"),
),
SimpleTerm(
value=11,
token=11,
title=api.portal.translate(msgid="month_nov", domain="plonelocales"),
),
SimpleTerm(
value=12,
token=12,
title=api.portal.translate(msgid="month_dec", domain="plonelocales"),
),
]
return SimpleVocabulary(terms)
VocMonthsFactory = VocMonths() | /redturtle.prenotazioni-2.0.0rc1-py3-none-any.whl/redturtle/prenotazioni/vocabularies/voc_months.py | 0.650578 | 0.176246 | voc_months.py | pypi |
from plone import api
from plone.memoize.view import memoize
from plone.z3cform.layout import wrap_form
from redturtle.prenotazioni import _
from redturtle.prenotazioni import datetime_with_tz
from redturtle.prenotazioni.adapters.booker import BookerException
from redturtle.prenotazioni.adapters.booker import IBooker
from redturtle.prenotazioni.browser.week import TIPOLOGIA_PRENOTAZIONE_NAME_COOKIE
from redturtle.prenotazioni.browser.z3c_custom_widget import CustomRadioFieldWidget
from redturtle.prenotazioni.config import REQUIRABLE_AND_VISIBLE_FIELDS
from redturtle.prenotazioni.content.prenotazione import IPrenotazione
from redturtle.prenotazioni.utilities.urls import urlify
from z3c.form import button
from z3c.form import field
from z3c.form import form
from z3c.form.interfaces import ActionExecutionError
from z3c.form.interfaces import HIDDEN_MODE
from z3c.form.interfaces import WidgetActionExecutionError
from zope.component import getUtility
from zope.interface import implementer
from zope.interface import Invalid
from zope.schema import Text
from zope.schema import TextLine
from zope.schema.interfaces import IVocabularyFactory
DEFAULT_REQUIRED_FIELDS = []
class IAddForm(IPrenotazione):
"""
Interface for creating a prenotazione
"""
title = TextLine(
title=_("label_booking_title", "Fullname"), default="", required=True
)
description = Text(
title=_("label_booking_description", "Subject"), default="", required=False
)
@implementer(IAddForm)
class AddForm(form.AddForm):
""" """
render_form = False
ignoreContext = True
@property
def fields_schema(self):
return field.Fields(IAddForm)
@property
def fields(self):
fields = self.fields_schema
fields["booking_type"].widgetFactory = CustomRadioFieldWidget
# omit some fields
fields = fields.omit("gate").omit("booking_expiration_date").omit("staff_notes")
# move title on top (after the type)
ids = [x for x in fields.keys()]
ids.insert(2, ids.pop(ids.index("title")))
fields = fields.select(*ids)
return fields
def updateWidgets(self):
super(AddForm, self).updateWidgets()
self.widgets["booking_date"].mode = HIDDEN_MODE
bookingdate = self.request.form.get(
"form.booking_date",
self.request.form.get("form.widgets.booking_date"),
)
self.widgets["booking_date"].value = bookingdate
required_fields_factory = getUtility(
IVocabularyFactory,
"redturtle.prenotazioni.requirable_booking_fields",
)
required_fields_vocabulary = required_fields_factory(self.context)
possibly_required_fields = [x.token for x in required_fields_vocabulary._terms]
for f in self.widgets.values():
# If you have a field required by schema, when you fill the field
# and then empty it you have a red alert without submit the form.
# In this way all the possibly requred field have the same
# behaviour: you see the red alert frame only after a submit
name = f.__name__
if name in DEFAULT_REQUIRED_FIELDS:
f.required = True
if name in possibly_required_fields:
# Zen of python: "Explicit is better than implicit."
# we could set False this field in schema, but parts of code
# lines below would be necessary anyway. so I prefer explicit
# what we are doning
if name in self.context.required_booking_fields:
f.required = True
else:
f.required = False
if (
name in REQUIRABLE_AND_VISIBLE_FIELDS
and name not in self.context.visible_booking_fields
):
f.mode = "hidden"
if not api.user.is_anonymous() and not api.user.has_permission(
"Modify portal content", obj=self.context
):
user = api.user.get_current()
for field_name in self.widgets:
if field_name == "title":
value = user.getProperty("fullname", "")
else:
value = user.getProperty(field_name, "")
if value:
self.widgets[field_name].value = value
self.widgets[field_name].readonly = "readonly"
@property
@memoize
def localized_time(self):
"""Facade for context/@@plone/toLocalizedTime"""
return api.content.get_view("plone", self.context, self.request).toLocalizedTime
@property
@memoize
def label(self):
"""
Check if user is anonymous
"""
booking_date = self.booking_DateTime
if not booking_date:
return ""
localized_date = self.localized_time(booking_date)
return _(
"label_selected_date",
"Selected date: ${date} — Time: ${slot}",
mapping={
"date": localized_date,
"slot": booking_date.strftime("%H:%M"),
},
)
@property
@memoize
def description(self):
"""
Check if user is anonymous
"""
return _("help_prenotazione_add", "")
@property
@memoize
def booking_DateTime(self):
"""Return the booking_date as passed in the request as a DateTime
object
"""
booking_date = self.request.form.get("form.booking_date", None)
if not booking_date:
booking_date = self.request.form.get("form.widgets.booking_date", None)
if not booking_date:
return None
return datetime_with_tz(booking_date)
@property
@memoize
def is_anonymous(self):
return api.user.is_anonymous()
@property
@memoize
def prenotazioni(self):
"""Returns the prenotazioni_context_state view.
Everyone should know about this!
"""
return api.content.get_view(
"prenotazioni_context_state", self.context, self.request
)
@property
@memoize
def back_to_booking_url(self):
"""This goes back to booking view."""
b_date = self.booking_DateTime
params = {}
if b_date:
params["data"] = b_date.strftime("%d/%m/%Y")
target = urlify(self.context.absolute_url(), params=params)
return target
@button.buttonAndHandler(_("action_book", "Book"))
def action_book(self, action):
"""
Book this resource
"""
data, errors = self.extractData()
if errors:
self.status = self.formErrorsMessage
return
required = self.context.required_booking_fields
# la tipologia di una prenotazione deve essere obbligatoria ticket: 19131
if "booking_type" not in required:
required.append("booking_type")
for field_id in self.fields.keys():
if field_id in required and not data.get(field_id, ""):
raise WidgetActionExecutionError(
field_id, Invalid(_("Required input is missing."))
)
if not data.get("booking_date"):
raise WidgetActionExecutionError(
"booking_date", Invalid(_("Please provide a booking date"))
)
booker = IBooker(self.context.aq_inner)
try:
obj = booker.book(data=data)
except BookerException as e:
api.portal.show_message(e.args[0], self.request, type="error")
raise ActionExecutionError(Invalid(e.args[0]))
msg = _("booking_created")
api.portal.show_message(message=msg, type="info", request=self.request)
booking_date = getattr(obj, "booking_date", None).strftime("%d/%m/%Y")
params = {
"data": booking_date,
"uid": obj.UID(),
}
target = urlify(
self.context.absolute_url(),
paths=["@@prenotazione_print"],
params=params,
)
self.request.response.expireCookie(
TIPOLOGIA_PRENOTAZIONE_NAME_COOKIE,
path="/",
)
return self.request.response.redirect(target)
@button.buttonAndHandler(_("action_cancel", default="Cancel"), name="cancel")
def action_cancel(self, action):
"""
Cancel
"""
target = self.back_to_booking_url
return self.request.response.redirect(target)
def show_message(self, msg, msg_type):
"""Facade for the show message api function"""
show_message = api.portal.show_message
return show_message(msg, request=self.request, type=msg_type)
def redirect(self, target, msg="", msg_type="error"):
"""Redirects the user to the target, optionally with a portal message"""
if msg:
self.show_message(msg, msg_type)
return self.request.response.redirect(target)
def has_enough_time(self):
"""Check if we have enough time to book something"""
booking_date = self.booking_DateTime
return self.prenotazioni.is_booking_date_bookable(booking_date)
def __call__(self):
"""Redirects to the context if no data is found in the request"""
# we should always have a booking date
if not self.booking_DateTime:
msg = _("please_pick_a_date", "Please select a time slot")
return self.redirect(self.back_to_booking_url, msg)
# and if we have it, we should have enough time to do something
if not self.has_enough_time():
msg = _(
"time_slot_to_short",
"You cannot book any booking_type at this time",
)
return self.redirect(self.back_to_booking_url, msg)
return super(AddForm, self).__call__()
WrappedAddForm = wrap_form(AddForm) | /redturtle.prenotazioni-2.0.0rc1-py3-none-any.whl/redturtle/prenotazioni/browser/prenotazione_add.py | 0.63861 | 0.256919 | prenotazione_add.py | pypi |
from plone import api
from plone.memoize.view import memoize
from plone.protect.utils import addTokenToUrl
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from redturtle.prenotazioni import _
from redturtle.prenotazioni import datetime_with_tz
from redturtle.prenotazioni import tznow
from redturtle.prenotazioni.adapters.booker import BookerException
from redturtle.prenotazioni.adapters.booker import IBooker
from redturtle.prenotazioni.utilities.urls import urlify
from z3c.form import button
from z3c.form import field
from z3c.form import form
from z3c.form.interfaces import ActionExecutionError
from zope.interface import implementer
from zope.interface import Interface
from zope.interface import Invalid
from zope.schema import Datetime
from zope.schema import TextLine
import logging
logger = logging.getLogger(__name__)
class IMoveForm(Interface):
"""
Interface for moving a prenotazione
"""
booking_date = Datetime(title=_("label_booking_time", "Booking time"), default=None)
gate = TextLine(title=_("label_gate", "Gate"), default="", required=False)
@implementer(IMoveForm)
class MoveForm(form.Form):
"""Controller for moving a booking"""
ignoreContext = True
template = ViewPageTemplateFile("templates/prenotazione_move.pt")
hidden_fields = []
fields = field.Fields(IMoveForm)
@property
@memoize
def prenotazioni_folder(self):
"""
The PrenotazioniFolder object that contains the context
"""
return self.context.getPrenotazioniFolder()
@property
@memoize
def prenotazioni_view(self):
"""
The prenotazioni_context_state view in the context
of prenotazioni_folder
"""
return api.content.get_view(
"prenotazioni_context_state",
self.prenotazioni_folder,
self.request,
)
# @memoize se usato rompre la vista
def slot_styles(self, slot):
"""
Return a css to underline the moved slot
"""
context = slot.context
if not context:
return "links"
styles = [self.prenotazioni_view.get_state(context)]
if context == self.context:
styles.append("links")
return " ".join(styles)
@property
@memoize
def back_to_booking_url(self):
"""This goes back to booking view."""
qs = {"data": (self.context.getBooking_date().strftime("%d/%m/%Y"))}
return urlify(self.prenotazioni_folder.absolute_url(), params=qs)
# @memoize se usato rompe la vista
def move_to_slot_links(self, day, slot, gate):
"""
Returns the url to move the booking in this slot
"""
if not self.prenotazioni_view.is_valid_day(day):
return []
if self.prenotazioni_view.maximum_bookable_date:
if day > self.prenotazioni_view.maximum_bookable_date.date():
return []
date = day.strftime("%Y-%m-%d")
params = {
"form.buttons.action_move": "Move",
"data": self.request.form.get("data", ""),
"form.widgets.gate": gate,
}
times = slot.get_values_hr_every(300)
urls = []
base_url = "/".join((self.context.absolute_url(), "prenotazione_move"))
now_str = tznow()
for t in times:
booking_date_str = "T".join((date, t))
booking_date = datetime_with_tz(booking_date_str)
params["form.widgets.booking_date"] = booking_date_str
urls.append(
{
"title": t,
"url": addTokenToUrl(urlify(base_url, params=params)),
"class": t.endswith(":00") and "oclock" or None,
"future": (now_str <= booking_date),
}
)
return urls
@button.buttonAndHandler(_("action_move", "Move"))
def action_move(self, action):
"""
Book this resource
# TODO: codice replicato in services/booking/move.py
"""
data, errors = self.extractData()
booker = IBooker(self.prenotazioni_folder)
try:
booker.move(booking=self.context, data=data)
except BookerException as e:
api.portal.show_message(e.args[0], self.request, type="error")
raise ActionExecutionError(Invalid(e.args[0]))
msg = _("booking_moved")
api.portal.show_message(msg, self.request, type="info")
booking_date = data["booking_date"].strftime("%d/%m/%Y")
target = urlify(
self.prenotazioni_folder.absolute_url(),
paths=["prenotazioni_week_view"],
params={"data": booking_date},
)
return self.request.response.redirect(target)
@button.buttonAndHandler(_("action_cancel", "Cancel"))
def action_cancel(self, action):
"""
Cancel
"""
target = self.back_to_booking_url
return self.request.response.redirect(target)
def __call__(self):
"""Hide the portlets before serving the template"""
self.request.set("disable_plone.leftcolumn", 1)
self.request.set("disable_plone.rightcolumn", 1)
return super(MoveForm, self).__call__() | /redturtle.prenotazioni-2.0.0rc1-py3-none-any.whl/redturtle/prenotazioni/browser/prenotazione_move.py | 0.585694 | 0.270718 | prenotazione_move.py | pypi |
from plone import api
from plone.memoize.view import memoize
from Products.CMFPlone.utils import safe_unicode
from Products.Five.browser import BrowserView
from Products.Five.browser.metaconfigure import ViewMixinForTemplates
from redturtle.prenotazioni import _
from z3c.form import interfaces
from z3c.form import util
from z3c.form.browser.radio import RadioWidget
from z3c.form.interfaces import IRadioWidget
from z3c.form.widget import FieldWidget
from z3c.form.widget import SequenceWidget
from zope.browserpage.viewpagetemplatefile import ViewPageTemplateFile as VPTF
from zope.component import getUtility
from zope.i18n import translate
from zope.pagetemplate.interfaces import IPageTemplate
from zope.schema.interfaces import IVocabularyFactory
from zope.schema.vocabulary import SimpleTerm
import zope
class ICustomRadioFieldWidget(interfaces.IFieldWidget):
""" """
class ICustomRadioWidget(IRadioWidget):
""" """
class RenderWidget(ViewMixinForTemplates, BrowserView):
index = VPTF("templates/booking_type_radio_widget.pt")
@property
@memoize
def prenotazione_add(self):
"""Returns the prenotazioni_context_state view.
Everyone should know about this!
"""
return api.content.get_view(
"prenotazione_add", self.context.context.aq_inner, self.request
).form(self.context.context.aq_inner, self.request)
@property
@memoize
def vocabulary(self):
voc_name = self.context.field.vocabularyName
if voc_name:
return getUtility(IVocabularyFactory, name=voc_name)(
self.context.context.aq_inner
)
@property
@memoize
def booking_types_bookability(self):
"""Get booking_type bookability"""
booking_date = self.prenotazione_add.booking_DateTime
prenotazioni = self.prenotazione_add.prenotazioni
return prenotazioni.booking_types_bookability(booking_date)
@property
@memoize
def unbookable_items(self):
"""Get booking_type bookability"""
keys = sorted(self.booking_types_bookability["unbookable"])
keys = [key for key in keys]
return [
self.vocabulary.getTerm(key) for key in keys if key in self.context.terms
]
@zope.interface.implementer_only(ICustomRadioWidget)
class CustomRadioWidget(RadioWidget):
""" """
@property
@memoize
def prenotazione_add(self):
"""Returns the prenotazioni_context_state view.
Everyone should know about this!
"""
return api.content.get_view(
"prenotazione_add", self.context, self.request
).form(self.context, self.request)
@property
@memoize
def vocabulary(self):
voc_name = self.field.vocabularyName
if voc_name:
return getUtility(IVocabularyFactory, name=voc_name)(self.context)
@property
@memoize
def booking_types_bookability(self):
"""Get booking_type bookability"""
booking_date = self.prenotazione_add.booking_DateTime
prenotazioni = self.prenotazione_add.prenotazioni
return prenotazioni.booking_types_bookability(booking_date)
@property
@memoize
def bookable_items(self):
"""Get booking_type bookability"""
keys = sorted(self.booking_types_bookability["bookable"])
keys = [safe_unicode(key) for key in keys]
return [self.vocabulary.getTerm(key) for key in keys if key in self.terms]
@property
@memoize
def unbookable_items(self):
"""Get booking_type bookability"""
keys = sorted(self.booking_types_bookability["unbookable"])
keys = [safe_unicode(key) for key in keys]
return [
self.vocabulary.getTerm(key) for key in keys if key in self.context.terms
]
@property
def items(self):
bookable = self.bookable_items
if not bookable:
return
results = []
for count, term in enumerate(self.bookable_items):
checked = self.isChecked(term)
id = "%s-%i" % (self.id, count)
if zope.schema.interfaces.ITitledTokenizedTerm.providedBy(term):
label = translate(term.title, context=self.request, default=term.title)
else:
label = util.toUnicode(term.value)
results.append(
{
"id": id,
"name": self.name,
"value": term.token,
"label": label,
"checked": checked,
"index": count,
}
)
return results
def get_radio_message(self, label):
message = "{} {}, {}".format(
self.context.translate(_("Field", "Campo")),
self.context.translate(label),
self.context.translate(
_(
"select-option",
"seleziona l'opzione desiderata dal gruppo di radio button seguente",
)
),
)
return message
def renderForValue(self, value, index=0):
# customize 'cause we need to pass index
terms = list(self.terms)
try:
term = self.terms.getTermByToken(value)
except LookupError:
if value == SequenceWidget.noValueToken:
term = SimpleTerm(value)
terms.insert(0, term)
else:
raise
checked = self.isChecked(term)
# id = '%s-%i' % (self.id, terms.index(term))
id = "{}-{}".format(self.id, index)
item = {
"id": id,
"name": self.name,
"value": term.token,
"checked": checked,
}
template = zope.component.getMultiAdapter(
(self.context, self.request, self.form, self.field, self),
IPageTemplate,
name=self.mode + "_single",
)
return template(self, item)
@zope.component.adapter(zope.schema.interfaces.IField, interfaces.IFormLayer)
@zope.interface.implementer(ICustomRadioFieldWidget)
def CustomRadioFieldWidget(field, request):
"""IFieldWidget factory for RadioWidget."""
return FieldWidget(field, CustomRadioWidget(request)) | /redturtle.prenotazioni-2.0.0rc1-py3-none-any.whl/redturtle/prenotazioni/browser/z3c_custom_widget.py | 0.707607 | 0.230216 | z3c_custom_widget.py | pypi |
from plone import api
from plone.memoize.view import memoize
from plone.protect.utils import addTokenToUrl
from Products.CMFCore.utils import getToolByName
from Products.Five.browser import BrowserView
from Products.statusmessages.interfaces import IStatusMessage
from redturtle.prenotazioni import _
from redturtle.prenotazioni.utilities.urls import urlify
class PrenotazionePrint(BrowserView):
"""
This is a view to proxy autorizzazione
"""
print_action = "javascript:this.print();"
def get_status_message(self):
review_state = api.content.get_state(obj=self.prenotazione)
messages_mapping = {
"pending": _(
"confirm_booking_waiting_message",
"Your booking has to be confirmed by the administrators.",
),
"confirmed": _(
"confirm_booking_confirmed_message",
"Your booking has been confirmed.",
),
"refused": _(
"confirm_booking_refused_message",
"Your booking has been refused.",
),
}
return messages_mapping.get(review_state, "")
@property
@memoize
def label(self):
"""The lable of this view"""
title = self.prenotazione.getPrenotazioniFolder().Title() # noqa
return _(
"reservation_request",
"Booking request for: ${name}",
mapping={"name": title},
)
@property
@memoize
def prenotazione(self):
"""
Get's the prenotazione by uid
"""
uid = self.request.get("uid")
if not uid:
return
pc = getToolByName(self.context, "portal_catalog")
query = {"portal_type": "Prenotazione", "UID": uid}
brains = pc.unrestrictedSearchResults(query)
if len(brains) != 1:
return None
return brains[0]._unrestrictedGetObject()
def __call__(self):
"""
Se non c'e' la prenotazione vai all'oggetto padre
"""
if not self.prenotazione:
qs = {}
data = self.request.get("data")
if data:
qs["data"] = data
msg = "Not found"
IStatusMessage(self.request).add(msg, "warning")
target = urlify(self.context.absolute_url(), params=qs)
return self.request.response.redirect(target)
else:
return super(PrenotazionePrint, self).__call__()
def protect_url(self, url):
return addTokenToUrl(url) | /redturtle.prenotazioni-2.0.0rc1-py3-none-any.whl/redturtle/prenotazioni/browser/prenotazione_print.py | 0.642769 | 0.240128 | prenotazione_print.py | pypi |
from datetime import datetime
from DateTime import DateTime
from plone import api
from plone.memoize.view import memoize
from Products.Five.browser import BrowserView
from Products.statusmessages.interfaces import IStatusMessage
from redturtle.prenotazioni import _
from redturtle.prenotazioni import logger
from redturtle.prenotazioni import time2timedelta
from redturtle.prenotazioni.adapters.booker import IBooker
from redturtle.prenotazioni.adapters.slot import BaseSlot
from redturtle.prenotazioni.utilities.urls import urlify
from six.moves import map
from z3c.form import button
from z3c.form import field
from z3c.form import form
from z3c.form.interfaces import ActionExecutionError
from zope.interface import implementer
from zope.interface import Interface
from zope.interface import Invalid
from zope.schema import Choice
from zope.schema import Date
from zope.schema import TextLine
from zope.schema import ValidationError
import six
class InvalidDate(ValidationError):
__doc__ = _("invalid_date")
class InvalidTime(ValidationError):
__doc__ = _("invalid_time")
def check_time(value):
"""
If value exist it should match TELEPHONE_PATTERN
"""
if not value:
return True
if isinstance(value, six.string_types):
value = value.strip()
try:
hh, mm = list(map(int, value.split(":")))
assert 0 <= hh <= 23
assert 0 <= mm <= 59
return True
except Exception:
msg = "Invalid time: %r" % value
logger.exception(msg)
raise InvalidTime(value)
class IVacationBooking(Interface):
title = TextLine(
title=_("label_title", "Title"),
description=_(
"description_title", "This text will appear in the calendar cells"
),
default="",
)
gate = Choice(
title=_("label_gate", "Gate"),
description=_("description_gate", "The gate that will be unavailable"),
default="",
vocabulary="redturtle.prenotazioni.gates",
)
start_date = Date(
title=_("label_start", "Start date "),
description=_(" format (YYYY-MM-DD)"),
default=None,
)
start_time = TextLine(
title=_("label_start_time", "Start time"),
description=_("invalid_time"),
constraint=check_time,
default="00:00",
)
end_time = TextLine(
title=_("label_end_time", "End time"),
description=_("invalid_time"),
constraint=check_time,
default="23:59",
)
@implementer(IVacationBooking)
class VacationBooking(form.Form):
"""
This is a view that allows to book a gate for a certain period
"""
ignoreContext = True
@property
def fields(self):
fields = field.Fields(IVacationBooking)
if not self.context.getGates():
return fields.omit("gate")
return fields
def updateWidgets(self):
super(VacationBooking, self).updateWidgets()
self.widgets["start_date"].value = datetime.today().strftime("%Y-%m-%d")
def get_parsed_data(self, data):
"""
Return the data already parsed for our convenience
"""
parsed_data = data.copy()
parsed_data["start_date"] = data["start_date"] # noqa
parsed_data["start_time"] = time2timedelta(data["start_time"])
parsed_data["end_time"] = time2timedelta(data["end_time"])
return parsed_data
@property
@memoize
def prenotazioni(self):
"""
The prenotazioni_context_state view in the context
"""
return api.content.get_view(
"prenotazioni_context_state", self.context, self.request
)
def get_start_time(self, data):
"""The requested start time
:returns: a datetime
"""
return (
datetime(*data["start_date"].timetuple()[:6]) + data["start_time"]
) # noqa
def get_end_time(self, data):
"""The requested end time
:returns: a datetime
"""
return datetime(*data["start_date"].timetuple()[:6]) + data["end_time"]
def get_vacation_slot(self, data):
"""The requested vacation slot"""
start_time = self.get_start_time(data)
end_time = self.get_end_time(data)
return BaseSlot(start_time, end_time)
def get_slots(self, data):
"""
Get the slots we want to book!
"""
start_date = data["start_date"]
gate = data.get("gate", "")
vacation_slot = self.get_vacation_slot(data)
slots = []
for period in ("morning", "afternoon"):
free_slots = self.prenotazioni.get_free_slots(start_date, period)
gate_free_slots = free_slots.get(gate, [])
[
slots.append(vacation_slot.intersect(slot))
for slot in gate_free_slots
if vacation_slot.overlaps(slot)
]
return slots
def has_slot_conflicts(self, data):
"""We want the operator to handle conflicts:
no other booking can be created if we already have stuff
"""
start_date = data["start_date"]
busy_slots = self.prenotazioni.get_busy_slots(start_date)
if not busy_slots:
return False
gate_busy_slots = busy_slots.get(data.get("gate", ""), [])
if not gate_busy_slots:
return False
vacation_slot = self.get_vacation_slot(data)
for slot in gate_busy_slots:
intersection = vacation_slot.intersect(slot)
if intersection and intersection.lower_value != intersection.upper_value:
return True
return False
def do_book(self, data):
"""
Execute the multiple booking
"""
booker = IBooker(self.context.aq_inner)
slots = self.get_slots(data)
start_date = DateTime(data["start_date"].strftime("%Y/%m/%d"))
for slot in slots:
booking_date = start_date + (float(slot.lower_value) / 86400)
slot.__class__ = BaseSlot
duration = len(slot) / 60
slot_data = {k: v for k, v in data.items() if k != "gate"}
slot_data["booking_date"] = booking_date
booker.create(slot_data, duration=duration, force_gate=data.get("gate"))
msg = _("booking_created")
IStatusMessage(self.request).add(msg, "info")
@button.buttonAndHandler(_("action_book", default="Book"))
def action_book(self, action):
"""
Book this resource
"""
data, errors = self.extractData()
parsed_data = self.get_parsed_data(data)
start_date = data["start_date"]
if self.has_slot_conflicts(parsed_data):
msg = _(
"slot_conflict_error",
"This gate has some booking schedule in this time " "period.",
)
raise ActionExecutionError(Invalid(msg))
elif not self.prenotazioni.is_valid_day(start_date):
msg = _("day_error", "This day is not valid.")
raise ActionExecutionError(Invalid(msg))
self.do_book(parsed_data)
qs = {"data": data["start_date"].strftime("%d/%m/%Y")}
target = urlify(self.context.absolute_url(), params=qs)
return self.request.response.redirect(target)
@button.buttonAndHandler(_("action_cancel", default="Cancel"))
def action_cancel(self, action):
"""
Cancel
"""
target = self.context.absolute_url()
return self.request.response.redirect(target)
def extra_script(self):
"""The scripts needed for the dateinput"""
view = api.content.get_view(
"redturtle.prenotazioni.dateinput.conf.js",
self.context,
self.request,
)
return view.render() + view.mark_with_class(["#form\\\\.start_date"])
class VacationBookingShow(BrowserView):
"""
Should this functionality be confirmed?
"""
def __call__(self):
"""Return True for the time being"""
return True | /redturtle.prenotazioni-2.0.0rc1-py3-none-any.whl/redturtle/prenotazioni/browser/vacations.py | 0.744749 | 0.292561 | vacations.py | pypi |
from plone import api
from plone.memoize.view import memoize
from plone.protect.authenticator import createToken
from Products.Five.browser import BrowserView
from redturtle.prenotazioni.config import MIN_IN_DAY
from redturtle.prenotazioni.utilities.urls import urlify
class PrenotazioneView(BrowserView):
"""View for Prenotazione"""
@property
@memoize
def prenotazioni_folder(self):
"""The parent prenotazioni folder"""
return self.context.getPrenotazioniFolder()
@property
@memoize
def prenotazioni(self):
"""The context state of the parent prenotazioni folder"""
return api.content.get_view(
"prenotazioni_context_state",
self.prenotazioni_folder,
self.request,
)
@property
def booking_date(self):
"""The parent prenotazioni folder"""
return self.context.getBooking_date()
@property
@memoize
def back_url(self):
"""Go back parent prenotazioni folder in the right day"""
booking_date = self.booking_date
target = self.prenotazioni_folder.absolute_url()
if booking_date:
qs = {"data": booking_date.strftime("%d/%m/%Y")}
target = urlify(target, params=qs)
return target
@property
@memoize
def move_url(self):
"""move this booking visiting this url"""
can_move = api.user.has_permission("Modify portal content", obj=self.context)
if not can_move:
return ""
booking_date = self.booking_date
target = "/".join((self.context.absolute_url(), "prenotazione_move"))
if booking_date:
qs = {"data": booking_date.strftime("%d/%m/%Y")}
target = urlify(target, params=qs)
return target
@property
@memoize
def review_state(self):
"""The review_state of this object"""
return self.prenotazioni.get_state(self.context)
@property
def reject_url(self):
can_review = api.user.has_permission("Review portal content", obj=self.context)
if not can_review:
return ""
return "{context_url}/content_status_modify?workflow_action=refuse&_authenticator={token}".format( # noqa
context_url=self.context.absolute_url(), token=createToken()
)
class ResetDuration(PrenotazioneView):
"""Reset data scadenza prenotazione: sometime is needed :p"""
def reset_duration(self):
"""Reset the duration for this booking object
Tries to get the duration information from the request,
fallbacks to the booking_type, and finally to 1 minute
"""
booking_type = self.context.getBooking_type()
duration = self.request.form.get("duration", 0)
if not duration:
duration = self.prenotazioni.get_booking_type_duration(booking_type)
duration = float(duration) / MIN_IN_DAY
self.context.setBooking_expiration_date(self.booking_date + duration)
def __call__(self):
"""Reset the dates"""
self.reset_duration()
return self.request.response.redirect(self.back_url) | /redturtle.prenotazioni-2.0.0rc1-py3-none-any.whl/redturtle/prenotazioni/browser/prenotazione.py | 0.721056 | 0.33516 | prenotazione.py | pypi |
from BTrees.OOBTree import OOTreeSet
from csv import writer
from datetime import date
from datetime import datetime
from datetime import timedelta
from io import StringIO
from json import dumps
from plone.memoize.view import memoize
from redturtle.prenotazioni import _
from redturtle.prenotazioni import logger
from redturtle.prenotazioni import prenotazioniFileLogger
from redturtle.prenotazioni.browser.base import BaseView as PrenotazioniBaseView
from time import mktime
from zope.annotation.interfaces import IAnnotations
from zope.interface import Interface
from zope.schema import Date
from zope.schema import TextLine
from zope.schema import ValidationError
import six
class InvalidDate(ValidationError):
__doc__ = _("invalid_end:search_date", "Invalid start or end date")
def check_date(value):
"""
Check if the input date is correct
"""
if isinstance(value, date):
return True
raise InvalidDate
class IQueryForm(Interface):
"""
Interface for querying stuff
"""
user = TextLine(title=_("label_user", "User"), default="", required=False)
start = Date(
title=_("label_start", "Start date "),
description=_(" format (YYYY-MM-DD)"),
default=None,
constraint=check_date,
required=False,
)
end = Date(
title=_("label_end", "End date"),
description=_(" format (YYYY-MM-DD)"),
default=None,
constraint=check_date,
required=False,
)
def date2timestamp(value, delta=0):
"""Conerts a date in the format "%Y-%m-%d" to a unix timestamp"""
try:
value = datetime.strptime(value, "%Y-%m-%d")
value = value + timedelta(delta)
return mktime(value.timetuple())
except Exception as e:
logger.exception(value)
raise (e)
def timestamp2date(value, date_format="%Y/%m/%d %H:%M"):
"""Converts a timestamp to date_format"""
return datetime.fromtimestamp(value).strftime(date_format)
class ContextForm(PrenotazioniBaseView):
"""
Aggregates data from the booking folders below
"""
logstorage_key = "redturtle.prenotazioni.logstorage"
file_logger = prenotazioniFileLogger
@property
@memoize
def logstorage(self):
"""This is an annotation OOTreeSet where we can store log entries"""
annotations = IAnnotations(self.context)
if self.logstorage_key not in annotations:
annotations[self.logstorage_key] = OOTreeSet()
return annotations[self.logstorage_key]
def add_entry(self, entry):
"""Add an entry to the logstorage"""
return self.logstorage.add(entry)
def remove_entry(self, entry):
"""Remove an entry from the logstorage"""
try:
return self.logstorage.remove(entry)
except KeyError:
pass
def csvencode(self, data, human_readable=False):
"""
Converts an array of info to a proper cvs string
If human_readable is set to True it will convert
timestamps and uids
"""
dummy_file = StringIO()
cw = writer(dummy_file)
for line in data:
if human_readable:
line[self._ei_date] = timestamp2date(line[self._ei_date])
line[4] = self.uid_to_url(line[4])["url"]
line.pop(3)
for idx, value in enumerate(line):
if isinstance(value, six.text_type):
line[idx] = value.encode("utf8")
cw.writerow(line)
return dummy_file.getvalue().strip("\r\n")
def csvlog(self, data):
"""Log something, dumping it on a file and storing it in the
logstorage
"""
encoded_data = self.csvencode([data])
self.file_logger.info(encoded_data)
self.add_entry(dumps(encoded_data)) | /redturtle.prenotazioni-2.0.0rc1-py3-none-any.whl/redturtle/prenotazioni/browser/stats/booking_stats.py | 0.592784 | 0.248483 | booking_stats.py | pypi |
from plone.app.event.base import default_timezone
from plone.stringinterp.adapters import BaseSubstitution
from redturtle.prenotazioni import _
from redturtle.prenotazioni import logger
from zope.component import adapter
from zope.interface import Interface
try:
from plone.app.event.base import spell_date
have_spell_date = True
except ImportError:
have_spell_date = False
logger.exception(
"\n\nImpossibile importare spell_date da plone.app.event; non si potrà"
" usare ${booking_human_readable_start} nel markup content rules\n\n"
)
@adapter(Interface)
class GateSubstitution(BaseSubstitution):
category = _("Booking")
description = _("The gate booked.")
def safe_call(self):
return getattr(self.context, "gate", "")
@adapter(Interface)
class BookingDateSubstitution(BaseSubstitution):
category = _("Booking")
description = _("The booked date.")
def safe_call(self):
plone = self.context.restrictedTraverse("@@plone")
date = getattr(self.context, "booking_date", "")
if not date:
return ""
return plone.toLocalizedTime(date)
@adapter(Interface)
class BookingEndDateSubstitution(BaseSubstitution):
category = _("Booking")
description = _("The booked end date.")
def safe_call(self):
plone = self.context.restrictedTraverse("@@plone")
date = getattr(self.context, "booking_expiration_date", "")
if not date:
return ""
return plone.toLocalizedTime(date)
@adapter(Interface)
class BookingTimeSubstitution(BaseSubstitution):
category = _("Booking")
description = _("The booked time.")
def safe_call(self):
date = getattr(self.context, "booking_date", "")
if not date:
return ""
return date.astimezone(default_timezone(as_tzinfo=True)).strftime("%H:%M")
@adapter(Interface)
class BookingTimeEndSubstitution(BaseSubstitution):
category = _("Booking")
description = _("The booking time end.")
def safe_call(self):
date = getattr(self.context, "booking_expiration_date", "")
if not date:
return ""
return date.astimezone(default_timezone(as_tzinfo=True)).strftime("%H:%M")
@adapter(Interface)
class BookingTypeSubstitution(BaseSubstitution):
category = _("Booking")
description = _("The booking type.")
def safe_call(self):
return getattr(self.context, "booking_type", "")
@adapter(Interface)
class BookingCodeSubstitution(BaseSubstitution):
category = _("Booking")
description = _("The booking code.")
def safe_call(self):
code = self.context.getBookingCode()
if not code:
return ""
return code
@adapter(Interface)
class BookingUrlSubstitution(BaseSubstitution):
category = _("Booking")
description = _("The booking print url.")
def safe_call(self):
return "{folder}/@@prenotazione_print?uid={uid}".format(
folder=self.context.getPrenotazioniFolder().absolute_url(),
uid=self.context.UID(),
)
@adapter(Interface)
class BookingPrintUrlWithDeleteTokenSubstitution(BookingUrlSubstitution):
"""
This is a backward compatibility with old version with token
"""
category = _("Booking")
description = _("[DEPRECATED] The booking print url with delete token.")
@adapter(Interface)
class BookingUserPhoneSubstitution(BaseSubstitution):
category = _("Booking")
description = _("The phone number of the user who made the reservation.")
def safe_call(self):
return getattr(self.context, "phone", "")
@adapter(Interface)
class BookingUserEmailSubstitution(BaseSubstitution):
category = _("Booking")
description = _("The email address of the user who made the reservation.")
def safe_call(self):
return getattr(self.context, "email", "")
@adapter(Interface)
class BookingOfficeContactPhoneSubstitution(BaseSubstitution):
category = _("Booking")
description = _("The booking office contact phone.")
def safe_call(self):
prenotazioni_folder = self.context.getPrenotazioniFolder()
return getattr(prenotazioni_folder, "phone", "")
@adapter(Interface)
class BookingOfficeContactPecSubstitution(BaseSubstitution):
category = _("Booking")
description = _("The booking office contact pec address.")
def safe_call(self):
prenotazioni_folder = self.context.getPrenotazioniFolder()
return getattr(prenotazioni_folder, "pec", "")
@adapter(Interface)
class BookingOfficeContactFaxSubstitution(BaseSubstitution):
category = _("Booking")
description = _("The booking office contact fax.")
def safe_call(self):
prenotazioni_folder = self.context.getPrenotazioniFolder()
return getattr(prenotazioni_folder, "fax", "")
@adapter(Interface)
class BookingHowToGetToOfficeSubsitution(BaseSubstitution):
category = _("Booking")
description = _(
"The information to reach the office where user book a" " reservation"
)
def safe_call(self):
prenotazioni_folder = self.context.getPrenotazioniFolder()
return getattr(prenotazioni_folder, "how_to_get_here", "")
@adapter(Interface)
class BookingOfficeCompleteAddressSubstitution(BaseSubstitution):
category = _("Booking")
description = _(
"The complete address information of the office where" "user book a reservation"
)
def safe_call(self):
prenotazioni_folder = self.context.getPrenotazioniFolder()
return getattr(prenotazioni_folder, "complete_address", "")
@adapter(Interface)
class BookingHRDateStartSubstitution(BaseSubstitution):
category = _("Booking")
description = _("The booking human readable date")
def safe_call(self):
# we need something like martedì 8 settembre 2020 alle ore 11:15
date = getattr(self.context, "booking_date", "")
if not date:
return ""
if not have_spell_date:
return "SPELL_DATE_NOT_AVAILABLE"
info = spell_date(self.context.booking_date, self.context)
day = "{day_name} {day_number} {month_name} {year} alle ore {hour}:{minute}".format( # noqa
day_name=info["wkday_name"],
day_number=info["day"],
month_name=info["month_name"],
year=info["year"],
hour=info["hour"],
minute=info["minute2"],
)
return day
@adapter(Interface)
class BookingUrlWithDeleteToken(BaseSubstitution):
category = _("Booking")
description = _("The booking url with delete token")
def safe_call(self):
return "{booking_url}/@@delete_reservation?uid={uid}".format(
booking_url=self.context.getPrenotazioniFolder().absolute_url(),
uid=self.context.UID(),
)
@adapter(Interface)
class BookingOperatorUrlSubstitution(BaseSubstitution):
category = _("Booking")
description = _("The booking operator url")
def safe_call(self):
return self.context.absolute_url() | /redturtle.prenotazioni-2.0.0rc1-py3-none-any.whl/redturtle/prenotazioni/adapters/stringinterp.py | 0.583322 | 0.160595 | stringinterp.py | pypi |
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from zope.component import adapter, getAdapter
from zope.interface import implementer
from Products.DCWorkflow.interfaces import IAfterTransitionEvent
from plone import api
from plone.stringinterp.interfaces import IContextWrapper
from plone.stringinterp.interfaces import IStringInterpolator
from plone.event.interfaces import IICalendar
from redturtle.prenotazioni.interfaces import IPrenotazioneEmailMessage
from redturtle.prenotazioni.prenotazione_event import IMovedPrenotazione
from redturtle.prenotazioni.content.prenotazione import IPrenotazione
from redturtle.prenotazioni import logger
class PrenotazioneEventEmailMessage:
prenotazione = None
event = None
def __init__(self, prenotazione, event):
self.prenotazione = prenotazione
self.event = event
@property
def message_subject(self) -> str:
raise NotImplementedError("The method was not implemented")
@property
def message_text(self) -> MIMEText:
raise NotImplementedError("The method was not implemented")
@property
def message(self) -> MIMEMultipart:
error_msg = "Could not send notification email due to: {message}"
mfrom = api.portal.get_registry_record("plone.email_from_address")
recipient = self.prenotazione.email
if not mfrom:
logger.error(
error_msg.format(message="Email from address is not configured")
)
return None
if not recipient:
logger.error(
error_msg.format(
message="Could not find recipients for the email message"
)
)
return None
msg = MIMEMultipart()
msg.attach(self.message_text)
msg["Subject"] = self.message_subject
msg["From"] = mfrom
msg["To"] = recipient
return msg
class PrenotazioneEventMessageICalMixIn:
@property
def message(self, *args, **kwargs):
message = super().message
if not message:
logger.error(
logger.error("Could not compose email due to no message was created")
)
return None
message.add_header("Content-class", "urn:content-classes:calendarmessage")
ical = getAdapter(object=self.prenotazione, interface=IICalendar)
name = f"{self.prenotazione.getId()}.ics"
icspart = MIMEText(ical.to_ical().decode("utf-8"), "calendar")
icspart.add_header("Filename", name)
icspart.add_header("Content-Disposition", f"attachment; filename={name}")
message.attach(icspart)
return message
@implementer(IPrenotazioneEmailMessage)
@adapter(IPrenotazione, IMovedPrenotazione)
class PrenotazioneMovedICalEmailMessage(
PrenotazioneEventMessageICalMixIn, PrenotazioneEventEmailMessage
):
@property
def message_subject(self) -> str:
return IStringInterpolator(IContextWrapper(self.prenotazione)())(
getattr(
self.prenotazione.getPrenotazioniFolder(),
"notify_on_move_subject",
"",
)
)
@property
def message_text(self) -> MIMEText:
return MIMEText(
IStringInterpolator(IContextWrapper(self.prenotazione)())(
getattr(
self.prenotazione.getPrenotazioniFolder(),
"notify_on_move_message",
None,
),
),
"html",
)
@implementer(IPrenotazioneEmailMessage)
@adapter(IPrenotazione, IAfterTransitionEvent)
class PrenotazioneAfterTransitionEmailMessage(PrenotazioneEventEmailMessage):
@property
def message_subject(self) -> str:
return IStringInterpolator(IContextWrapper(self.prenotazione)())(
getattr(
self.prenotazione.getPrenotazioniFolder(),
f"notify_on_{self.event.transition and self.event.transition.__name__}_subject",
"",
)
)
@property
def message_text(self) -> MIMEText:
return MIMEText(
IStringInterpolator(IContextWrapper(self.prenotazione)())(
getattr(
self.prenotazione.getPrenotazioniFolder(),
f"notify_on_{self.event.transition and self.event.transition.__name__}_message",
None,
),
),
"html",
)
@implementer(IPrenotazioneEmailMessage)
@adapter(IPrenotazione, IAfterTransitionEvent)
class PrenotazioneAfterTransitionEmailICalMessage(
PrenotazioneEventMessageICalMixIn, PrenotazioneAfterTransitionEmailMessage
):
pass | /redturtle.prenotazioni-2.0.0rc1-py3-none-any.whl/redturtle/prenotazioni/adapters/prenotazione_email_message.py | 0.617282 | 0.158012 | prenotazione_email_message.py | pypi |
from DateTime import DateTime
from pyinter.interval import Interval
from six.moves import map
from six.moves import range
from zope.component import Interface
from zope.interface import implementer
from plone.app.event.base import default_timezone
def is_intervals_overlapping(intervals):
"""
utility function to determine if in a list of intervals there is some
overlapping.
We could sort the intervals by start date. Then iterate the sublist of
intervals starting from the second element.
Iterating on this sublist, if the start of the current interval it's less
than the end of the previous interval in the original sorted array, then
we have overlap
"""
if len(intervals) < 2:
return False
intervals_sorted_by_starts = sorted(intervals, key=lambda x: x[0])
for i, interval in enumerate(intervals_sorted_by_starts[1:]):
# we can use i 'cause skipping the first element, i always refer to
# the previous element in the original array
if interval[0] < intervals_sorted_by_starts[i][1]:
return True
return False
def interval_is_contained(interval, lower_bound, upper_bound):
"""
utility function to determine if an interval is contained between two
bounds.
:param interval: something like ['0700', '0800']
:param lower_bound: something like '0700'
:param upper_bound: something like '1300'
Basically we use time interval to define that an office is open between two
hours. we need to know if a given interval it's smaller and cointaned in
the other.
We assume that the start could coincide with lower_bound but not with the
upper_bound. Viceversa the end could coincide with the upper_bound but not
with the lower_bound.
We also assume that we have an increasing interval
"""
I0 = interval[0]
I1 = interval[1]
assert I0 < I1
return (
I0 >= lower_bound
and I0 < upper_bound
and I1 > lower_bound
and I1 <= upper_bound
)
def slots_to_points(slots):
"""Return a list of point starting from the slots"""
points = []
[points.extend([x.lower_value, x.upper_value]) for x in slots]
return sorted(points)
class ISlot(Interface):
"""
Interface for a Slot object
"""
class LowerEndpoint(int):
"""Lower Endpoint"""
class UpperEndpoint(int):
"""Upper Endpoint"""
@implementer(ISlot)
class BaseSlot(Interval):
"""Overrides and simplifies pyinter.Interval"""
_lower = Interval.CLOSED
_upper = Interval.CLOSED
context = None
gate = ""
extra_css_styles = []
def __repr__(self):
return f"[{self.start()}:{self.stop()}]"
@staticmethod
def time2seconds(value):
"""
Takes a value and converts it into daily seconds (localtime!)
:param value: a datetime or DateTime object
"""
if isinstance(value, int):
return value
if not value:
return None
if isinstance(value, DateTime):
value = value.asdatetime()
value = value.astimezone(default_timezone(as_tzinfo=True))
return value.hour * 60 * 60 + value.minute * 60 + value.second
def __init__(self, start, stop, gate="", date=""):
"""
Initialize an BaseSlot
:param start:
:param stop:
:param gate:
:param date:
"""
if start is not None:
self._lower_value = LowerEndpoint(self.time2seconds(start))
if stop is not None:
self._upper_value = UpperEndpoint(self.time2seconds(stop))
self.gate = gate
self.date = date
def __len__(self):
"""The length of this object"""
if self._upper_value is None or self.lower_value is None or self.empty():
return 0
return self._upper_value - self.lower_value
def __nonzero__(self):
"""Check if this should be True"""
if isinstance(self._lower_value, int) and isinstance(self._upper_value, int):
return 1
else:
return 0
def __sub__(self, value):
"""Subtract something from this"""
if isinstance(value, Interval):
value = [value]
# We filter not overlapping intervals
good_intervals = [x for x in value if x.overlaps(self)]
points = slots_to_points(good_intervals)
start = self.lower_value
intervals = []
for x in points:
if isinstance(x, LowerEndpoint) and x > start:
intervals.append(BaseSlot(start, x))
# we raise the bar waiting for another stop
start = self.upper_value
elif isinstance(x, UpperEndpoint):
start = x
# append only valid intervals
if start < self.upper_value:
intervals.append(BaseSlot(start, self.upper_value))
return intervals
def value_hr(self, value):
"""format value in a human readable fashion"""
if not value:
return ""
hour = str(value // 3600).zfill(2)
minute = str(int((value % 3600) / 60)).zfill(2)
return "%s:%s" % (hour, minute)
def start(self):
"""Return the starting time"""
return self.value_hr(self._lower_value)
def stop(self):
"""Return the starting time"""
return self.value_hr(self._upper_value)
def get_offset(self, is_interval):
"""
We have two case to handle
In case we have a slot crossing over hours, we need to add a pixel for
every hour we change. e.g. if we have pause or appointment between 8.45
and 9.15 we need to add 1px.
If we have pause or appointment between 8.55 and 10.05, we need to add
2px.
This is caused by the border we add when we draw under every free hour
We check if we have context, so we are sure we are dealing with a pause
or with a reserveation. Then we take start and stop that are human
readable hours (like 8:00). With this we can take just hours splitting
the string and then make a difference between stop and start.
Second case. If we have BaseSlot we are drawing the gate columns. We
need to add a pixel offset 'cause if we don't stop at the end of the
hour, in the table the box with the hour is half cutted
"""
if self.context:
start = int(self.start().split(":")[0])
stop = int(self.stop().split(":")[0])
return (stop - start) * 1.0
if is_interval:
stop = self.stop().split(":")[1]
if stop in ("15", "30", "45"):
offset = {"15": 45.0, "30": 30.0, "45": 15.0}[stop]
return offset
return 0.0
def css_styles(self, is_interval=False):
"""the css styles for this slot
The height of the interval in pixel is equal
to the interval length in minnutes
"""
styles = []
if self._upper_value and self._lower_value:
# we add 1px for each hour to account for the border
# between the slots
height = len(self) / 60 * 1.0 + len(self) / 3600
offset = self.get_offset(is_interval)
height = height + offset
styles.append("height:%dpx" % height)
styles.extend(self.extra_css_styles)
return ";".join(styles)
def get_values_hr_every(self, width, slot_min_size=0):
"""This partitions this slot if pieces of length width and
return the human readable value of the starts
If slot is [0, 1000]
calling this with width 300 will return
["00:00", "00:05", "00:10"]
If slot_min_size is passed it will not return values whose distance
from slot upper value is lower than this
"""
if slot_min_size > len(self):
return []
number_of_parts = int(len(self) / width)
values = set([])
start = self.lower_value
end = self.upper_value
for i in range(number_of_parts):
value = start + width * i
if (end - value) >= slot_min_size:
values.add(value)
return list(map(self.value_hr, sorted(values)))
@implementer(ISlot)
class Slot(BaseSlot):
def __eq__(self, other):
"""We need to compare also the context before comparing the boundaries"""
return self.context == other.context and super(Slot, self).__eq__(other)
def __init__(self, context):
"""
@param context: a Prenotazione object
"""
self.context = context
super().__init__(
context.getBooking_date(),
context.getBooking_expiration_date(),
getattr(self.context, "gate", ""),
getattr(self.context, "date", ""),
) | /redturtle.prenotazioni-2.0.0rc1-py3-none-any.whl/redturtle/prenotazioni/adapters/slot.py | 0.870982 | 0.496033 | slot.py | pypi |
from Acquisition import aq_inner
from plone.app.event.base import default_timezone
from plone.app.event.ical.exporter import ICalendarEventComponent
from plone.app.event.ical.exporter import PRODID
from plone.app.event.ical.exporter import VERSION
from plone.event.interfaces import IICalendar
from plone.event.interfaces import IICalendarEventComponent
from plone.registry.interfaces import IRegistry
from plone.stringinterp.interfaces import IStringSubstitution
from redturtle.prenotazioni import _
from zope.component import getAdapter
from zope.component import getUtility
from zope.i18n import translate
from zope.interface import implementer
import icalendar
def construct_icalendar(context, bookings):
"""Returns an icalendar.Calendar object.
:param context: A content object, which is used for calendar details like
Title and Description. Usually a container, collection or
the event itself.
:param bookings: The list of bookings objects, which are included in this
calendar.
"""
cal = icalendar.Calendar()
cal.add("prodid", PRODID)
cal.add("version", VERSION)
cal_tz = default_timezone(context)
if cal_tz:
cal.add("x-wr-timezone", cal_tz)
if not getattr(bookings, "__getitem__", False):
bookings = [bookings]
for booking in bookings:
cal.add_component(IICalendarEventComponent(booking).to_ical())
return cal
@implementer(IICalendarEventComponent)
class ICalendarBookingComponent(ICalendarEventComponent):
def __init__(self, context):
self.context = context
self.event = self.context
self.ical = icalendar.Event()
@property
def parent(self):
return self.context.getPrenotazioniFolder()
@property
def dtstart(self):
return {"value": self.context.booking_date}
@property
def dtend(self):
return {"value": self.context.booking_expiration_date}
@property
def summary(self):
title = translate(
_(
"ical_booking_label",
default="Booking for {}".format(self.parent.title),
)
)
return {"value": title}
@property
def url(self):
return {
"value": getAdapter(
self.context, IStringSubstitution, "booking_print_url"
)()
}
@property
def location(self):
return {"value": getattr(self.parent, "complete_address", "")}
@property
def contact(self):
email = getattr(self.parent, "pec", "")
if not email:
registry = getUtility(IRegistry)
record = registry.records.get("plone.email_from_address", None)
if record:
email = record.value
return {"value": email}
@property
def uid(self):
return {"value": self.context.UID()}
@property
def organizer(self):
return {
"value": "MAILTO:{}".format(self.contact.get("value", "")),
"parameters": {"CN": self.parent.title},
}
def to_ical(self):
# TODO: event.text
ical_add = self.ical_add
ical_add("dtstamp", self.dtstamp)
ical_add("created", self.created)
ical_add("last-modified", self.last_modified)
ical_add("uid", self.uid)
ical_add("url", self.url)
ical_add("summary", self.summary)
ical_add("description", self.description)
ical_add("dtstart", self.dtstart)
ical_add("dtend", self.dtend)
ical_add("location", self.location)
ical_add("contact", self.contact)
ical_add("organizer", self.organizer)
return self.ical
@implementer(IICalendar)
def calendar_from_booking(context):
"""Booking adapter. Returns an icalendar.Calendar object from a Booking
context.
"""
context = aq_inner(context)
return construct_icalendar(context, [context]) | /redturtle.prenotazioni-2.0.0rc1-py3-none-any.whl/redturtle/prenotazioni/adapters/ical.py | 0.63861 | 0.387198 | ical.py | pypi |
from datetime import datetime
from io_tools.storage import Storage as Base
from pony import orm
db = orm.Database()
class Message_io(db.Entity):
msgid = orm.Optional(str, nullable=True)
key = orm.Required(str, unique=True)
fiscal_code = orm.Required(str)
subject = orm.Required(str)
body = orm.Required(str)
amount = orm.Optional(int, nullable=True)
notice_number = orm.Optional(str, nullable=True)
invalid_after_due_date = orm.Optional(bool, nullable=True)
due_date = orm.Optional(datetime, nullable=True)
status = orm.Required(str)
info = orm.Optional(str, nullable=True)
def __repr__(self):
return '<{} : {} "{}" {}>'.format(
self.__class__.__name__,
self.fiscal_code,
self.subject,
self.status,
)
class Storage(Base):
def __init__(self, config):
create_tables = config.pop("create_tables", False)
db.bind(**config)
db.generate_mapping(create_tables=create_tables)
@staticmethod
@orm.db_session
def get_data(query, params=None):
"""
ritorna i dati per costruire i messaggi con una query sul db
"""
return db.select(query, params)
@staticmethod
@orm.db_session
def get_message(key):
"""Data una chiave di un messaggio ritorna il messaggio salvato nello storage
o None.
Args:
key (string): chiave univoca per un messaggio, va costruita in base a dati del messaggio,
non è il msgid registrato su IO. Serve per evitare di inviare due volte lo stesso
messaggio
Returns:
[Message_io|None]: ritorna un oggetto messaggi con gli attributi definiti nella implementazione di riferimento
in io_tools.rdbms.Message_io
"""
return orm.select(m for m in Message_io if m.key == key).first()
@staticmethod
@orm.db_session
def update_message(key, **kwargs):
msg = orm.select(m for m in Message_io if m.key == key).first()
for k, v in kwargs.items():
setattr(msg, k, v)
return msg
@staticmethod
@orm.db_session
def create_message(key, fiscal_code, subject, body, payment_data, due_date):
msg = Message_io(
key=key,
fiscal_code=fiscal_code,
subject=subject,
body=body,
amount=payment_data["amount"] if payment_data else None,
notice_number=payment_data["notice_number"] if payment_data else None,
invalid_after_due_date=payment_data["invalid_after_due_date"]
if payment_data
else None,
due_date=due_date,
status="created",
)
return msg | /redturtle.prenotazioni-2.0.0rc1-py3-none-any.whl/redturtle/prenotazioni/scripts/io_tools/rdbms.py | 0.708213 | 0.153454 | rdbms.py | pypi |
from datetime import datetime
from datetime import time
from datetime import timedelta
from plone.app.event.base import default_timezone
from redturtle.prenotazioni import tznow
import six
def hm2handm(hm):
"""This is a utility function that will return the hour and date of day
to the value passed in the string hm
:param hm: a string in the format "%H%m"
XXX: manage the case of `hm` as tuple, eg. ("0700", )
"""
if hm and isinstance(hm, tuple):
hm = hm[0]
if (not hm) or (not isinstance(hm, six.string_types)) or (len(hm) != 4):
raise ValueError(hm)
return (hm[:2], hm[2:])
def hm2DT(day, hm, tzinfo=None):
"""This is a utility function that will return the hour and date of day
to the value passed in the string hm
:param day: a datetime date
:param hm: a string in the format "%H%m" or "%H:%m"
:param tzinfo: a timezone object (default: the default local timezone as in plone)
"""
if tzinfo is None:
tzinfo = default_timezone(as_tzinfo=True)
if not hm or hm == "--NOVALUE--" or hm == ("--NOVALUE--",):
return None
if len(hm) == 4 and ":" not in hm:
hm = f"{hm[:2]}:{hm[2:]}"
return tzinfo.localize(datetime.combine(day, time.fromisoformat(hm)))
def hm2seconds(hm):
"""This is a utility function that will return
to the value passed in the string hm
:param hm: a string in the format "%H%m"
"""
if not hm:
return None
h, m = hm2handm(hm)
return int(h) * 3600 + int(m) * 60
def exceedes_date_limit(data, future_days):
"""
Check if the booking date exceedes the date limit
"""
if not future_days:
return False
booking_date = data.get("booking_date", None)
if not isinstance(booking_date, datetime):
return False
date_limit = tznow() + timedelta(future_days)
if not booking_date.tzinfo:
tzinfo = date_limit.tzinfo
if tzinfo:
booking_date = tzinfo.localize(booking_date)
if booking_date <= date_limit:
return False
return True | /redturtle.prenotazioni-2.0.0rc1-py3-none-any.whl/redturtle/prenotazioni/utilities/dateutils.py | 0.806205 | 0.504516 | dateutils.py | pypi |
from Acquisition import aq_base
from Acquisition import aq_inner
from collective.contentrules.mailfromfield import logger
from collective.contentrules.mailfromfield.actions.mail import IMailFromFieldAction
from collective.contentrules.mailfromfield.actions.mail import (
MailActionExecutor as BaseExecutor,
)
from plone.contentrules.rule.interfaces import IExecutable
from plone.dexterity.interfaces import IDexterityContainer
from plone.event.interfaces import IICalendar
from Products.CMFPlone.interfaces.siteroot import IPloneSiteRoot
from redturtle.prenotazioni.prenotazione_event import IMovedPrenotazione
from six.moves import filter
from zope.component import adapter
from zope.interface import implementer
from zope.interface import Interface
import six
class MailActionExecutor(BaseExecutor):
"""The executor for this action."""
def get_target_obj(self):
"""Get's the target object, i.e. the object that will provide the field
with the email address
"""
event_obj = self.event.object
if event_obj.portal_type != "Prenotazione":
return super().get_target_obj()
target = self.element.target
if target == "object":
obj = self.context
elif target == "parent":
# this is the patch
return event_obj.aq_parent
elif target == "target":
obj = event_obj
else:
raise ValueError(target)
return aq_base(aq_inner(obj))
def get_recipients(self):
"""
The recipients of this mail
"""
if self.event.object.portal_type != "Prenotazione":
return super().get_recipients()
# Try to load data from the target object
fieldName = str(self.element.fieldName)
obj = self.get_target_obj()
attr = getattr(obj, fieldName)
if hasattr(attr, "__call__"):
recipients = attr()
logger.debug("getting e-mail from %s method" % fieldName)
else:
recipients = attr
logger.debug("getting e-mail from %s attribute" % fieldName)
# now transform recipients in a iterator, if needed
if type(recipients) == str or type(recipients) == six.text_type: # noqa
recipients = [str(recipients)]
if not recipients:
return []
return list(filter(bool, recipients))
def manage_attachments(self, msg):
booking = self.event.object
action = getattr(self.event, "action", "")
if (
not (action == "confirm" or IMovedPrenotazione.providedBy(self.event))
or booking.portal_type != "Prenotazione"
):
return
cal = IICalendar(booking)
ical = cal.to_ical()
name = f"{booking.getId()}.ics"
msg.add_attachment(
ical,
maintype="text",
subtype="calendar",
filename=name,
)
@implementer(IExecutable)
@adapter(IPloneSiteRoot, IMailFromFieldAction, Interface)
class MailActionExecutorRoot(MailActionExecutor):
"""Registered for site root"""
@implementer(IExecutable)
@adapter(IDexterityContainer, IMailFromFieldAction, Interface)
class MailActionExecutorFolder(MailActionExecutor):
"""Registered for folderish content""" | /redturtle.prenotazioni-2.0.0rc1-py3-none-any.whl/redturtle/prenotazioni/actions/mail.py | 0.467818 | 0.238162 | mail.py | pypi |
from .prenotazioni_folder import IPrenotazioniFolder
from DateTime import DateTime
from plone import api
from plone.app.event.base import default_timezone
from plone.app.z3cform.widget import DatetimeFieldWidget
from plone.autoform import directives
from plone.dexterity.content import Item
from plone.supermodel import model
from redturtle.prenotazioni import _
from redturtle.prenotazioni import datetime_with_tz
from redturtle.prenotazioni import tznow
from redturtle.prenotazioni.utils import is_migration
from zope import schema
from zope.interface import implementer
from zope.schema import ValidationError
import hashlib
import re
import six
VACATION_TYPE = "out-of-office"
TELEPHONE_PATTERN = re.compile(r"^(\+){0,1}([0-9]| )*$")
class InvalidPhone(ValidationError):
__doc__ = _("invalid_phone_number", "Invalid phone number")
class InvalidEmailAddress(ValidationError):
__doc__ = _("invalid_email_address", "Invalid email address")
class IsNotfutureDate(ValidationError):
__doc__ = _("is_not_future_date", "This date is past")
class InvalidFiscalcode(ValidationError):
__doc__ = _("invalid_fiscalcode", "Invalid fiscal code")
def check_phone_number(value):
"""
If value exist it should match TELEPHONE_PATTERN
"""
if not value:
return True
if isinstance(value, six.string_types):
value = value.strip()
if TELEPHONE_PATTERN.match(value) is not None:
return True
raise InvalidPhone(value)
def check_valid_email(value):
"""Check if value is a valid email address"""
if not value:
return True
reg_tool = api.portal.get_tool(name="portal_registration")
if value and reg_tool.isValidEmail(value):
return True
else:
raise InvalidEmailAddress
# TODO: validare considerando anche TINIT-XXX...
# (vedi https://it.wikipedia.org/wiki/Codice_fiscale#Codice_fiscale_ordinario)
def check_valid_fiscalcode(value):
# fiscal code development
if value == "AAAAAA00A00A000A":
return True
if not value:
return True
value = value.upper()
if not len(value) == 16:
raise InvalidFiscalcode(value)
validi = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
for c in value:
if c not in validi:
raise InvalidFiscalcode(value)
set1 = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
set2 = "ABCDEFGHIJABCDEFGHIJKLMNOPQRSTUVWXYZ"
setpari = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
setdisp = "BAKPLCQDREVOSFTGUHMINJWZYX"
s = 0
for i in range(1, 14, 2):
s += setpari.find(set2[set1.find(value[i])])
for i in range(0, 15, 2):
s += setdisp.find(set2[set1.find(value[i])])
if s % 26 != (ord(value[15]) - ord("A"[0])):
raise InvalidFiscalcode(value)
return True
def check_is_future_date(value):
"""
Check if this date is in the future
"""
# Do not check the value if we are performing a migration
if is_migration():
return True
if not value:
return True
try:
if datetime_with_tz(value) >= tznow():
return True
else:
raise IsNotfutureDate
except Exception:
raise IsNotfutureDate
class IPrenotazione(model.Schema):
"""Marker interface and Dexterity Python Schema for Prenotazione"""
directives.mode(booking_date="display")
booking_date = schema.Datetime(
title=_("label_booking_time", "Booking time"),
required=True,
default=None,
constraint=check_is_future_date,
)
booking_type = schema.Choice(
title=_("label_booking_type", default="Booking type"),
vocabulary="redturtle.prenotazioni.booking_types",
required=True,
)
email = schema.TextLine(
title=_("label_booking_email", default="Email"),
constraint=check_valid_email,
default="",
required=False,
)
phone = schema.TextLine(
title=_("label_booking_phone", default="Phone number"),
required=False,
default="",
constraint=check_phone_number,
)
fiscalcode = schema.TextLine(
title=_("label_booking_fiscalcode", default="Fiscal code"),
default="",
# constraint=check_valid_fiscalcode,
required=False,
)
company = schema.TextLine(
title=_("label_booking_company", default="Company"),
description=_(
"description_company",
default="If you work for a company, please specify its name.",
),
required=False,
)
directives.mode(gate="display")
gate = schema.TextLine(
title=_("Gate"),
description=_("Sportello a cui presentarsi"),
)
directives.mode(booking_expiration_date="display")
booking_expiration_date = schema.Datetime(
title=_("Expiration date booking"), required=True
)
staff_notes = schema.Text(
required=False, title=_("label_booking_staff_notes", "Staff notes")
)
directives.widget(
"booking_date",
DatetimeFieldWidget,
default_timezone=default_timezone,
klass="booking_date",
)
@implementer(IPrenotazione)
class Prenotazione(Item):
""" """
def isVacation(self):
self.getBooking_type() == VACATION_TYPE
def getBooking_date(self):
return self.booking_date
def setBooking_date(self, date):
self.booking_date = date
return
def getBooking_expiration_date(self):
return self.booking_expiration_date
def setBooking_expiration_date(self, date):
self.booking_expiration_date = date
return
def getBooking_type(self):
return self.booking_type
def getCompany(self):
return self.company
def getFiscalcode(self):
return self.fiscalcode
def getPhone(self):
return self.phone
def getEmail(self):
return self.email
def getStaff_notes(self):
return self.staff_notes
def getPrenotazioniFolder(self):
"""Ritorna l'oggetto prenotazioni folder"""
for parent in self.aq_chain:
if IPrenotazioniFolder.providedBy(parent):
return parent
raise Exception(
"Could not find Prenotazioni Folder " "in acquisition chain of %r" % self
)
def getDuration(self):
"""Return current duration"""
start = self.getBooking_date()
end = self.getBooking_expiration_date()
if start and end:
return end - start
else:
return 1
def Subject(self):
"""Reuse plone subject to do something useful"""
return ""
def Date(self):
"""
Dublin Core element - default date
"""
# Return reservation date
return DateTime(self.getBooking_date())
def getBookingCode(self):
hash_obj = hashlib.blake2b(bytes(self.UID(), encoding="utf8"), digest_size=3)
return hash_obj.hexdigest().upper()
def canAccessBooking(self):
creator = self.Creator()
if api.user.is_anonymous():
if creator:
return False
else:
current_user = api.user.get_current()
if (
not api.user.has_permission("redturtle.prenotazioni.ManagePrenotazioni")
and creator != current_user.getUserName()
):
return False
return True
def canDeleteBooking(self):
creator = self.Creator()
if not creator:
if api.user.is_anonymous():
return True
if api.user.has_permission("redturtle.prenotazioni.ManagePrenotazioni"):
return True
else:
if api.user.is_anonymous():
return False
current_user = api.user.get_current()
if (
api.user.has_permission("redturtle.prenotazioni.ManagePrenotazioni")
or creator == current_user.getUserName()
):
return True
return False | /redturtle.prenotazioni-2.0.0rc1-py3-none-any.whl/redturtle/prenotazioni/content/prenotazione.py | 0.441432 | 0.224906 | prenotazione.py | pypi |
try:
from plone.app.blob.migrations import migrate as migrateBlob
except ImportError:
# without plone.app.blob should never be possible that someone called the "migrateBlob" function
pass
from redturtle.smartlink.interfaces import ISmartLink
def migrateSmartLink(context):
return migrateBlob(context, 'Link', 'ATLink').splitlines()
def isATLink(oldObject, **kwargs):
"""Test if the object is a simple ATLink (i.e: is not a Smart Link)"""
return not ISmartLink.providedBy(oldObject)
def isSmartLink(oldObject, **kwargs):
"""Test if the object is a Smart Link"""
return ISmartLink.providedBy(oldObject)
# helper to build custom blob migrators for the given type
# some code stolen from the migration of plone.app.blob
def makeMigrator(context, portal_type, meta_type):
""" generate a migrator for the given at-based portal type """
from Products.contentmigration.archetypes import InplaceATItemMigrator
class ATLinkMigrator(InplaceATItemMigrator):
src_portal_type = portal_type
src_meta_type = meta_type
dst_portal_type = portal_type
dst_meta_type = meta_type
def last_migrate_externalLink(self):
if self.old.getRemoteUrl() and ISmartLink.providedBy(self.new):
self.new.setExternalLink(self.old.getRemoteUrl())
self.new.reindexObject()
return ATLinkMigrator
def migrateLinkToSmartLink(context):
from Products.contentmigration.walker import CustomQueryWalker
migrator = makeMigrator(context, 'Link', 'ATLink')
walker = CustomQueryWalker(context, migrator, callBefore=isATLink, use_savepoint=True)
walker.go()
return walker.getOutput().splitlines()
def migrateSmartLinkToLink(context):
from Products.contentmigration.walker import CustomQueryWalker
migrator = makeMigrator(context, 'Link', 'ATLink')
walker = CustomQueryWalker(context, migrator, callBefore=isSmartLink, use_savepoint=True)
walker.go()
return walker.getOutput().splitlines() | /redturtle.smartlink-1.3.2.tar.gz/redturtle.smartlink-1.3.2/redturtle/smartlink/migrator.py | 0.5794 | 0.265244 | migrator.py | pypi |
from zope.interface import Interface
from zope import schema
from redturtle.smartlink import smartlinkMessageFactory as _
from zope.interface import invariant, Invalid
from plone.app.controlpanel.interfaces import IPloneControlPanelForm
class ISmartLinkControlPanelForm(IPloneControlPanelForm):
"""Interface for configuration panel inside SmartLink"""
class ILinkNormalizerUtility(Interface):
"""Utility for transform URLs to make them using valid internal hostnames"""
def toFrontEnd(remote):
"""Transform an URL to the proper frontend ones"""
def toCurrent(remote):
"""Tranform an URL to a compatible type, like the current ones"""
class ISmartlinkConfig(Interface):
relativelink = schema.Bool(
title=_(u"Relative links"),
description=_(u'help_relativelink',
default=(u'If selected, all internal links in the site will store URLs relative to the portal root, '
u'rather than absolute save the complete absolute ones. '
u'For example: no "http://myhost/plone/foo" but "/plone/foo"')),
required=False
)
frontend_main_link = schema.TextLine(
title=_(u"Front-end main URL"),
description=_(u'help_frontend_main_link',
default=(u'Put there the site main URL you want to expone to visitors. '
u'All Smart Link that starts with the current URL of the portal '
u'will be transformed to use this URL.')),
default=u'',
required=False
)
backendlink = schema.List(
title=_(u"Back-end URLs"),
description=_(u'help_backendlink',
default=(u'Put there all your possible back-office URLs you want to transform. '
u'URLs there must be unique')),
value_type=schema.TextLine(),
default=[],
unique=True,
required=False
)
frontendlink = schema.List(
title=_(u"Front-end URLs"),
description=_(u'help_frontendlink',
default=u'Fill there URLs in which you want to change the relative back-end ones.'),
value_type=schema.TextLine(),
default=[],
unique=False,
required=False
)
proxy_enabled = schema.Bool(
title=_(u"Globally enable proxy feature for title and description"),
description=_(u'help_enable_proxy_feature',
default=u"Disable this if you don't want to use the \"Use referenced content's data\" feature.\n"
u"Please note: changing this configuration will not update existings links automatically."),
default=False
)
@invariant
def otherFilledIfSelected(smartlink):
if len(smartlink.frontendlink) != len(smartlink.backendlink):
raise Invalid(_(u"Front-end link must correspond to a single back-end link")) | /redturtle.smartlink-1.3.2.tar.gz/redturtle.smartlink-1.3.2/redturtle/smartlink/interfaces/utility.py | 0.714329 | 0.16654 | utility.py | pypi |
from Products.Five.browser import BrowserView
from Products.statusmessages.interfaces import IStatusMessage
from plone.memoize.view import memoize
from redturtle.sqlcontents import sqlcontentsLogger as logger, \
sqlcontentsMessageFactory as _, sqlcontentsLogger
from redturtle.sqlcontents.adapters.engine import ISQLFolderEngine
class View(BrowserView):
'''
The view for SQLQuery objects
'''
def __init__(self, context, request):
'''
We need also the engine, taken from the adapted parent
'''
super(View, self).__init__(context, request)
self.execute
self.sqlkeys
self.sqlrows
@memoize
def declareEncodingProblem(self):
'''
Declare we have an encoding problem logging the exception
We put this in a memoized method to have it onece for request
'''
msg = 'Encoding problem %s' % self.context.UID()
sqlcontentsLogger.exception(msg)
def safedisplay(self, value):
'''
Takes value and tries to convert it in utf8 for injecting it in the
template
'''
if value is None:
return u''
try:
if not isinstance(value, unicode):
value = str(value)
return unicode(value)
except:
self.declareEncodingProblem()
return u''
@property
@memoize
def sqlengine(self):
'''
We take the parent of this object and then adapt it
'''
return ISQLFolderEngine(self.context.aq_inner.aq_parent)
@property
@memoize
def sqlkeys(self):
'''
Get the keys (the column ids)
'''
if not self.sqlstatus:
return ()
return self.execute.keys()
@property
@memoize
def column_names_map(self):
'''
Create a mapping between known column ids and names
'''
names_map = {}
for data_grid_line in self.context.getColumn_names():
key, value = data_grid_line['key'], data_grid_line['value']
names_map[key] = value
return names_map
@property
@memoize
def column_names(self):
'''
Translate, if possible the column ids in to human readable names
If no value is given return the key a s default
'''
names = []
names_map = self.column_names_map
for key in self.sqlkeys:
names.append(names_map.get(key, key))
return names
@property
@memoize
def sqlrows(self):
'''
Get the keys (the column ids)
'''
if not self.sqlstatus:
return ()
return self.execute.fetchall()
def on_error(self):
'''
What to do in case of error
'''
self.sqlstatus = False
msg = _("Error executing query")
logger.exception(msg)
sm = IStatusMessage(self.request)
sm.addStatusMessage(msg, 'error')
@property
@memoize
def execute(self):
'''
This executes the SQLQuery query with the engine
'''
self.sqlstatus = True
try:
return self.sqlengine.execute(self.context.getQuery())
except:
self.on_error() | /redturtle.sqlcontents-0.9.1.zip/redturtle.sqlcontents-0.9.1/redturtle/sqlcontents/browser/sqlquery_view.py | 0.564819 | 0.182517 | sqlquery_view.py | pypi |
from Products.ATContentTypes.content import base, schemata
from Products.Archetypes import atapi
from Products.DataGridField import DataGridField, DataGridWidget, Column
from redturtle.sqlcontents import sqlcontentsMessageFactory as _
from redturtle.sqlcontents.config import PROJECTNAME
from redturtle.sqlcontents.interfaces.sqlquery import ISQLQuery
from zope.interface import implements
SQLQuerySchema = schemata.ATContentTypeSchema.copy() + atapi.Schema((
# -*- Your Archetypes field definitions here ... -*-
atapi.TextField(
'query',
storage=atapi.AnnotationStorage(),
required=True,
widget=atapi.TextAreaWidget(
label=_(u"The query that should be executed"),
description=_("help_query",
(u"For example "
u"SELECT * FROM TABLE LIMIT 10")),
),
),
DataGridField(
'column_names',
storage=atapi.AnnotationStorage(),
required=False,
searchable=False,
allow_delete=True,
allow_insert=True,
allow_reorder=True,
columns=("key", "value"),
widget=DataGridWidget(
label=_("Column names"),
description=_('help_column_names',
(u"Assign human readable names to "
u"the columns, they will be used for "
u"presenting data in "
u"a table inside the content view")),
visible={'view': 'hidden', 'edit': 'visible'},
columns={'key': Column(_('Column')),
'value': Column(_('Name'))},
),
),
atapi.IntegerField('batch_size',
widget=atapi.IntegerWidget(label=_("label_batch_size",
"Rows per page"),
description=_("help_batch_size",
u"The maximum number of rows "
u"displayed in each page"),
),
default=20,
),
atapi.TextField('introduction',
searchable=False,
default_output_type='text/x-html-safe',
widget=atapi.RichWidget(
label=_('label_introduction',
default=u'Introductive text'),
description=_('help_introduction',
default=(u'This text will be displayed before the '
u'query results')
),
rows=5,
),
),
atapi.TextField('footer',
searchable=False,
default_output_type='text/x-html-safe',
widget=atapi.RichWidget(
label=_('label_footer', default=u'Footer text'),
description=_('help_footer',
default=(u'This text will be displayed after the '
u'query results')
),
rows=5,
),
),
))
# Set storage on fields copied from ATContentTypeSchema, making sure
# they work well with the python bridge properties.
SQLQuerySchema['title'].storage = atapi.AnnotationStorage()
SQLQuerySchema['description'].storage = atapi.AnnotationStorage()
schemata.finalizeATCTSchema(SQLQuerySchema, moveDiscussion=False)
class SQLQuery(base.ATCTContent):
"""SQLQuery"""
implements(ISQLQuery)
meta_type = "SQLQuery"
schema = SQLQuerySchema
title = atapi.ATFieldProperty('title')
description = atapi.ATFieldProperty('description')
# -*- Your ATSchema to Python Property Bridges Here ... -*-
atapi.registerType(SQLQuery, PROJECTNAME) | /redturtle.sqlcontents-0.9.1.zip/redturtle.sqlcontents-0.9.1/redturtle/sqlcontents/content/sqlquery.py | 0.46563 | 0.237764 | sqlquery.py | pypi |
from Products.PythonScripts.standard import url_quote
from Products.ResourceRegistries.browser.styles import StylesView as BaseStylesView
from plone.app.layout.navigation.root import getNavigationRoot
class StylesView(BaseStylesView):
""" Information for style rendering for a specific subsite section """
def _getRegistryURL(self, registry):
"""Service method to get the right registry URL.
This is constructed with the "/subsite" section if we are in a subsite enrironment.
"""
subsite_path = getNavigationRoot(self.context)
brains = self.context.portal_catalog(path={'query':subsite_path, 'depth':0})
if brains:
subsite = brains[0]
registry_url = subsite.getURL() + '/portal_css'
else:
registry_url = registry.absolute_url()
return registry_url
def styles(self):
registry = self.registry()
registry_url = self._getRegistryURL(registry)
styles = registry.getEvaluatedResources(self.context)
skinname = url_quote(self.skinname())
result = []
for style in styles:
rendering = style.getRendering()
if rendering == 'link':
src = "%s/%s/%s" % (registry_url, skinname, style.getId())
data = {'rendering': rendering,
'media': style.getMedia(),
'rel': style.getRel(),
'title': style.getTitle(),
'conditionalcomment' : style.getConditionalcomment(),
'src': src}
elif rendering == 'import':
src = "%s/%s/%s" % (registry_url, skinname, style.getId())
data = {'rendering': rendering,
'media': style.getMedia(),
'conditionalcomment' : style.getConditionalcomment(),
'src': src}
elif rendering == 'inline':
content = registry.getInlineResource(style.getId(),
self.context)
data = {'rendering': rendering,
'media': style.getMedia(),
'conditionalcomment' : style.getConditionalcomment(),
'content': content}
else:
raise ValueError, "Unkown rendering method '%s' for style '%s'" % (rendering, style.getId())
result.append(data)
return result | /redturtle.subsites-2.1.0.zip/redturtle.subsites-2.1.0/redturtle/subsites/frontend/browser/styles.py | 0.568655 | 0.175291 | styles.py | pypi |
from redturtle.tiles.management import _
from zope import schema
from zope.interface import Interface
from zope.publisher.interfaces.browser import IDefaultBrowserLayer
class IRedturtleTilesManagementLayer(IDefaultBrowserLayer):
"""Marker interface that defines a browser layer."""
class IRedturtleTilesManagementView(Interface):
"""Marker interface that defines a tiles management view."""
class ICreatedTile(Interface):
"""Marker interface for tiles. This is used to"""
class IRedturtleTilesManagementSettings(Interface):
""" """
enabled_tiles = schema.List(
title=_(u'enabled_tiles_label',
default=u'Enabled tiles'),
description=_(u'enabled_tiles_help',
default=u'Select a list of tiles to add.'),
required=False,
default=[],
missing_value=[],
value_type=schema.Choice(
vocabulary='tiles.management.vocabularies.RegisteredTiles',
),
)
tile_size_css_class = schema.List(
title=_(u'size_css_classes',
default=u'CSS Classes for tile sizes'),
description=_(u'size_css_classes_descriptions',
default=u'List of CSS classes to resize the tile. '
u'These are used in the size button in tile '
u'management.\n'
u'The default style is "reset". It will add an '
u'empty string as CSS class and the tile will '
u'take the whole width.\n'
u'Insert a list of values (one per row) in the '
u'following form: display_name|css_class where '
u'display_name is the string to show to the user'
u' and css_class is the class will be applied '
u'to the tile'),
required=False,
default=[
'reset|',
'two tiles in a row|half-width',
],
missing_value=[],
value_type=schema.TextLine(),
) | /redturtle.tiles.management-3.0.1.tar.gz/redturtle.tiles.management-3.0.1/src/redturtle/tiles/management/interfaces.py | 0.754373 | 0.251016 | interfaces.py | pypi |
from zope.interface import implements
from zope.component import getMultiAdapter
from plone.portlets.interfaces import IPortletDataProvider
from plone.app.portlets.portlets import base
from zope import schema
from zope.formlib import form
from plone.memoize.instance import memoize
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from plone.app.vocabularies.catalog import SearchableTextSourceBinder
from plone.app.form.widgets.uberselectionwidget import UberSelectionWidget
from Products.ATContentTypes.interface import IATTopic, IATFolder
from redturtle.video.interfaces import IRTVideo
from redturtle.video import videoMessageFactory as _
from Products.CMFCore.utils import getToolByName
from Acquisition import aq_inner
def getImageUrl(resource):
if ((IRTVideo.providedBy(resource) and \
resource.hasSplashScreenImage()) or \
(not IRTVideo.providedBy(resource) and \
resource.hasSplashScreenImage)):
if not IRTVideo.providedBy(resource):
return resource.getURL()+'/image_thumb'
return resource.absolute_url()+'/image_thumb'
portal = getToolByName(resource, 'portal_url').getPortalObject()
return portal.absolute_url() +\
"/++resource++collective.flowplayer.css/play.png"
class IRTVideoPortlet(IPortletDataProvider):
"""A portlet which can display video gallery"""
header = schema.TextLine(title = _(u"label_portlet_header",
default = u"Portlet header"),
description = _(u"help_portlet_header",
default = u"Title of the rendered portlet"),
required = False)
target = schema.Choice(title = _(u"label_target_object",
default = u"Target object"),
description = _(u"help_target_object",
default = u"This can be a file "\
"containing an video content, "\
"or a folder or collection "\
"containing videos"),
required = True,
source = SearchableTextSourceBinder(
{'object_provides': [IATTopic.__identifier__,
IATFolder.__identifier__,
IRTVideo.__identifier__]},
default_query = 'path:'))
limit = schema.Int(title = _(u"label_number_of_videos_to_show",
default = u"Number of videos to show"),
description = _(u"help_number_of_videos_to_show",
default = u"Enter a number greater than 0 "\
"to limit the number of items displayed"),
required = False,
default = 0)
show_more = schema.Bool(title = _(u"label_show_more_link",
default=u'Show "more..." link'),
description = _(u"help_show_more_link",
default = u"If enabled, a more... "\
"link will appear in the footer of "\
"the portlet, "
"linking to the underlying data."),
required = True,
default = True)
class Assignment(base.Assignment):
implements(IRTVideoPortlet)
header = u""
target = None
limit = 0
show_more = False
def __init__(self, header=u"", target=None, limit=None, show_more=False):
self.header = header
self.target = target
self.limit = limit
self.show_more = show_more
@property
def title(self):
if self.header:
return _(u"Video gallery")+': ' + self.header
return _(u"Video gallery")
class Renderer(base.Renderer):
render = ViewPageTemplateFile('portlet.pt')
@property
def available(self):
return len(self.videos()) > 0
def target_url(self):
target=self.target()
if target is None:
return None
plone_view = getMultiAdapter((aq_inner(target), self.request), name='plone')
if plone_view.isStructuralFolder():
return target.absolute_url()
else:
return "%s/view" % target.absolute_url()
@memoize
def videos(self):
target = self.target()
limit = self.data.limit
if target is None:
return []
if IRTVideo.providedBy(target):
return [dict(title=target.Title(),
description=target.Description(),
url=target.absolute_url(),
path='/'.join(target.getPhysicalPath()),
year=target.getYear(),
duration=target.getDuration(),
image_url=getImageUrl(target)),
]
if IATFolder.providedBy(target):
values = []
videos = target.getFolderContents(
contentFilter={'object_provides':
IRTVideo.__identifier__})
for v in videos:
values.append(dict(title = v.Title,
description = v.Description,
url = v.getURL(),
path=v.getPath(),
year = v.getYear,
duration = v.getDuration,
image_url = getImageUrl(v),
))
return (limit and values[:limit]) or values
if IATTopic.providedBy(target):
values = []
videos = target.queryCatalog(
contentFilter={'object_provides':
IRTVideo.__identifier__})
for v in videos:
values.append(dict(title=v.Title,
description=v.Description,
url=v.getURL(),
path=v.getPath(),
year=v.getYear,
duration=v.getDuration,
image_url=getImageUrl(v),
))
return (limit and values[:limit]) or values
return []
@memoize
def target(self):
target_path = self.data.target
if not target_path:
return None
if target_path.startswith('/'):
target_path = target_path[1:]
if not target_path:
return None
portal_state = getMultiAdapter((self.context, self.request),
name=u'plone_portal_state')
portal = portal_state.portal()
return portal.restrictedTraverse(target_path, default=None)
class AddForm(base.AddForm):
form_fields = form.Fields(IRTVideoPortlet)
form_fields['target'].custom_widget = UberSelectionWidget
label = _(u"label_add_video_portlet",
default = u"Add Video Portlet")
description = _(u"help_add_video_portlet",
default = u"This portlet display a video gallery.")
def create(self, data):
return Assignment(**data)
class EditForm(base.EditForm):
form_fields = form.Fields(IRTVideoPortlet)
form_fields['target'].custom_widget = UberSelectionWidget
label = _(u"label_edit_video_portlet",
default = u"Edit Video Portlet")
description = _(u"help_edit_video_portlet",
default = u"This portlet display a video gallery.") | /redturtle.video-1.1.1.tar.gz/redturtle.video-1.1.1/redturtle/video/portlets/portlet.py | 0.583203 | 0.212293 | portlet.py | pypi |
from Acquisition import aq_base
from plone.app.caching import purge
from plone.app.event.base import dt_start_of_day
from plone.app.event.recurrence import Occurrence
from plone.event.interfaces import IEventAccessor
from plone.event.recurrence import recurrence_sequence_ical
from plone.event.utils import pydt
from Products.CMFPlone.interfaces import IConstrainTypes
from zope.globalrequest import getRequest
import datetime
def occurrences(self, range_start=None, range_end=None):
"""Return all occurrences of an event, possibly within a start and end
limit.
:param range_start: Optional start datetime, from which you want
occurrences be returned.
:type range_start: Python datetime
:param range_end: Optional start datetime, from which you want
occurrences be returned.
:type range_end: Python datetime
:returns: List of occurrences, including the start event.
:rtype: IEvent or IOccurrence based objects
Please note: Events beginning before range_start but ending afterwards
won't be found.
TODO: really?
TODO: test with event start = 21st feb, event end = start+36h,
recurring for 10 days, range_start = 1st mar, range_end = last Mark
"""
event = IEventAccessor(self.context)
# We try to get IEventBasic start without including recurrence
event_start = getattr(self.context, "start", None)
if not event_start:
event_start = event.start
elif getattr(event, "whole_day", None):
event_start = dt_start_of_day(event_start)
# We get event ends by adding a duration to the start. This way, we
# prevent that the start and end lists are of different size if an
# event starts before range_start but ends afterwards.
if getattr(event, "whole_day", None) or getattr(event, "open_end", None):
duration = datetime.timedelta(hours=23, minutes=59, seconds=59)
else:
event_end = getattr(self.context, "end", None)
# THIS IS THE PATCH
if getattr(event, "recurrence", None):
recurrence_end = datetime.datetime.combine(
event_start.date(), event_end.time(), event_start.tzinfo
)
duration = recurrence_end - event_start
else:
duration = event_end - event_start
# END OF PATCH
starts = recurrence_sequence_ical(
event_start,
recrule=event.recurrence,
from_=range_start,
until=range_end,
duration=duration,
)
# XXX potentially occurrence won't need to be wrapped anymore
# but doing it for backwards compatibility as views/templates
# still rely on acquisition-wrapped objects.
def get_obj(start):
if pydt(event_start.replace(microsecond=0)) == start:
# If the occurrence date is the same as the event object, the
# occurrence is the event itself. return it as such.
# Dates from recurrence_sequence_ical are explicitly without
# microseconds, while event.start may contain it. So we have to
# remove it for a valid comparison.
return self.context
return Occurrence(
id=str(start.date()), start=start, end=start + duration
).__of__(self.context)
for start in starts:
yield get_obj(start)
def _verifyObjectPaste(self, obj, validate_src=True):
self._old__verifyObjectPaste(obj, validate_src=True)
portal_type = getattr(aq_base(obj), "portal_type", None)
constrains = IConstrainTypes(self, None)
if constrains:
allowed_ids = [i.getId() for i in constrains.allowedContentTypes()]
if portal_type not in allowed_ids:
raise ValueError("Disallowed subobject type: %s" % portal_type)
# PURGE/BAN EVERYTHING (NO TYPE CHECKING)
def isPurged(obj):
if getRequest() is not None:
return True
purge.isPurged = isPurged
def plone_volto_deserializer_call(self, value):
return value
def plone_volto_serializer_call(self, value):
return value
# IGNORE USESELESS "No such index: 'show_inactive'" warnings
try:
from plone.restapi.search.query import ZCatalogCompatibleQueryAdapter
ZCatalogCompatibleQueryAdapter.ignore_query_params = [
"metadata_fields",
"show_inactive",
"skipNull",
]
except ImportError:
pass | /redturtle.volto-5.2.2.tar.gz/redturtle.volto-5.2.2/src/redturtle/volto/monkey.py | 0.542621 | 0.296489 | monkey.py | pypi |
"""Init and utils."""
from plone.app.content.browser.vocabulary import PERMISSIONS
from plone.folder.nogopip import GopipIndex
from Products.ZCatalog.Catalog import Catalog
from redturtle.volto.catalogplan import Catalog_sorted_search_indexes
from zope.i18nmessageid import MessageFactory
from ZTUtils.Lazy import LazyCat
from ZTUtils.Lazy import LazyMap
from plone.restapi.serializer import utils
import logging
import re
logger = logging.getLogger(__name__)
_ = MessageFactory("redturtle.volto")
PERMISSIONS["plone.app.vocabularies.Keywords"] = "View"
# CATALOG PATCHES
logger.info(
"install monkey patch for Products.ZCatalog.Catalog.Catalog._sorted_search_indexes #### WORK IN PROGRESS ####"
)
Catalog._orig_sorted_search_indexes = Catalog._sorted_search_indexes
Catalog._sorted_search_indexes = Catalog_sorted_search_indexes
MAX_SORTABLE = 5000
def Catalog_sortResults(
self,
rs,
sort_index,
reverse=False,
limit=None,
merge=True,
actual_result_count=None,
b_start=0,
b_size=None,
):
if MAX_SORTABLE > 0:
if actual_result_count is None:
actual_result_count = len(rs)
if actual_result_count >= MAX_SORTABLE and isinstance(sort_index, GopipIndex):
logger.warning(
"too many results %s disable GopipIndex sorting", actual_result_count
)
switched_reverse = bool(
b_size and b_start and b_start > actual_result_count / 2
)
if hasattr(rs, "keys"):
sequence, slen = self._limit_sequence(
rs.keys(), actual_result_count, b_start, b_size, switched_reverse
)
return LazyMap(
self.__getitem__,
sequence,
len(sequence),
actual_result_count=actual_result_count,
)
else:
logger.error(
"too many results %s disable GopipIndex sorting results %s has no key",
actual_result_count,
type(rs),
)
return LazyCat([], 0, actual_result_count)
return self._orig_sortResults(
rs, sort_index, reverse, limit, merge, actual_result_count, b_start, b_size
)
logger.info("install monkey patch for Products.ZCatalog.Catalog.Catalog.sortResults")
Catalog._orig_sortResults = Catalog.sortResults
Catalog.sortResults = Catalog_sortResults
# patch plone.restapi regexp to catch also other
utils.RESOLVEUID_RE = re.compile("^(?:|.*/)resolve[Uu]id/([^/]*)/?(.*)$") | /redturtle.volto-5.2.2.tar.gz/redturtle.volto-5.2.2/src/redturtle/volto/__init__.py | 0.510496 | 0.20828 | __init__.py | pypi |
from copy import deepcopy
from plone.restapi.behaviors import IBlocks
from plone.restapi.deserializer.blocks import path2uid
from plone.restapi.interfaces import IBlockFieldDeserializationTransformer
from Products.CMFPlone.interfaces import IPloneSiteRoot
from redturtle.volto.interfaces import IRedturtleVoltoLayer
from zope.component import adapter
from zope.interface import implementer
EXCLUDE_KEYS = ["@type", "token", "value", "@id", "query"]
EXCLUDE_TYPES = ["title", "listing", "calendar", "searchEvents"]
class GenericResolveUIDDeserializer(object):
"""
Generic deserializer: parse all block data and try to change urls to
resolveuids.
This potentially handle all text fields and complex blocks.
"""
order = 200 # after standard ones
block_type = None
def __init__(self, context, request):
self.context = context
self.request = request
def __call__(self, value):
return self.fix_urls_in_block(block=deepcopy(value))
def fix_urls_in_block(self, block):
if isinstance(block, str):
return self.get_uid_from_path(link=block)
if block.get("@type", "") in EXCLUDE_TYPES:
return block
if "UID" in block.keys():
# we store only uid, because other infos can change.
return {"UID": block["UID"]}
for key, val in block.items():
if not val:
continue
if key in EXCLUDE_KEYS:
continue
if isinstance(val, str):
block[key] = self.get_uid_from_path(link=val)
elif isinstance(val, list):
block[key] = [self.fix_urls_in_block(x) for x in val]
elif isinstance(val, dict):
if "entityMap" in val.keys():
entity_map = val.get("entityMap", {})
for entity_map in entity_map.values():
url = entity_map["data"].get("url", "").strip("/")
entity_map["data"]["url"] = self.get_uid_from_path(link=url)
else:
block[key] = self.fix_urls_in_block(block=val)
return block
def get_uid_from_path(self, link):
"""get_uid_from_path.
:param link:
"""
try:
return path2uid(context=self.context, link=link)
except IndexError:
# the value (link) is not a valid path
return link
class TableResolveUIDDeserializer(object):
""" """
order = 210 # after standard ones
block_type = "table"
def __init__(self, context, request):
self.context = context
self.request = request
def __call__(self, block):
for row in block.get("table", {}).get("rows", []):
for cell in row.get("cells", []):
for entity in cell.get("value", {}).get("entityMap", {}).values():
if entity.get("type") == "LINK":
href = entity.get("data", {}).get("url", "")
entity["data"]["url"] = path2uid(
context=self.context, link=href
)
return block
@implementer(IBlockFieldDeserializationTransformer)
@adapter(IBlocks, IRedturtleVoltoLayer)
class GenericResolveUIDDeserializerContents(GenericResolveUIDDeserializer):
"""Deserializer for content-types that implements IBlocks behavior"""
@implementer(IBlockFieldDeserializationTransformer)
@adapter(IPloneSiteRoot, IRedturtleVoltoLayer)
class GenericResolveUIDDeserializerRoot(GenericResolveUIDDeserializer):
"""Deserializer for site-root"""
@implementer(IBlockFieldDeserializationTransformer)
@adapter(IBlocks, IRedturtleVoltoLayer)
class TableResolveUIDDeserializerContents(TableResolveUIDDeserializer):
"""Deserializer for content-types that implements IBlocks behavior"""
@implementer(IBlockFieldDeserializationTransformer)
@adapter(IPloneSiteRoot, IRedturtleVoltoLayer)
class TableResolveUIDDeserializerRoot(TableResolveUIDDeserializer):
"""Deserializer for site-root""" | /redturtle.volto-5.2.2.tar.gz/redturtle.volto-5.2.2/src/redturtle/volto/restapi/deserializer/blocks.py | 0.620392 | 0.188473 | blocks.py | pypi |
from plone.app.contenttypes.interfaces import ILink
from plone.app.contenttypes.utils import replace_link_variables_by_paths
from plone.app.dexterity.behaviors.metadata import IPublication
from plone.dexterity.interfaces import IDexterityContent
from plone.restapi.interfaces import IFieldSerializer
from plone.restapi.serializer.converters import json_compatible
from plone.restapi.serializer.dxfields import DefaultFieldSerializer
from plone.restapi.serializer.utils import uid_to_url
from redturtle.volto.interfaces import IRedturtleVoltoLayer
from zope.component import adapter
from zope.interface import implementer
from zope.schema.interfaces import IDatetime
from zope.schema.interfaces import ITextLine
import re
RESOLVEUID_RE = re.compile(".*?/resolve[Uu]id/([^/]*)/?(.*)$")
@adapter(ITextLine, ILink, IRedturtleVoltoLayer)
class TextLineFieldSerializer(DefaultFieldSerializer):
def __call__(self):
if self.field.getName() != "remoteUrl":
return super(TextLineFieldSerializer, self).__call__()
value = self.get_value()
path = replace_link_variables_by_paths(context=self.context, url=value)
url = uid_to_url(path)
return json_compatible(url)
@adapter(IDatetime, IDexterityContent, IRedturtleVoltoLayer)
@implementer(IFieldSerializer)
class DateTimeFieldSerializer:
def __init__(self, field, context, request):
self.context = context
self.request = request
self.field = field
def __call__(self):
return json_compatible(self.get_value())
def get_value(self, default=None):
value = getattr(
self.field.interface(self.context), self.field.__name__, default
)
if value and self.field.interface == IPublication:
# the patch: we want the dates with full tz infos
# default value is taken from
# plone.app.dexterity.behaviors.metadata.Publication that escape
# timezone
return getattr(self.context, self.field.__name__)()
return value | /redturtle.volto-5.2.2.tar.gz/redturtle.volto-5.2.2/src/redturtle/volto/restapi/serializer/dxfields.py | 0.623377 | 0.190253 | dxfields.py | pypi |
from copy import deepcopy
from plone import api
from plone.restapi.behaviors import IBlocks
from plone.restapi.interfaces import IBlockFieldSerializationTransformer
from plone.restapi.interfaces import ISerializeToJsonSummary
from plone.restapi.serializer.blocks import uid_to_url
from Products.CMFPlone.interfaces import IPloneSiteRoot
from redturtle.volto.interfaces import IRedturtleVoltoLayer
from zope.component import adapter
from zope.component import getMultiAdapter
from zope.globalrequest import getRequest
from zope.interface import implementer
EXCLUDE_KEYS = ["@type"]
EXCLUDE_TYPES = ["title", "listing"]
class GenericResolveUIDSerializer(object):
"""
Generic deserializer: parse all block data and try to convert uids into
proper urls.
This potentially handle all text fields and complex blocks.
"""
order = 200 # after standard ones
block_type = None
def __init__(self, context, request):
self.context = context
self.request = request
def __call__(self, value):
new_value = deepcopy(value)
return self.resolve_uids(block=new_value)
def resolve_uids(self, block):
if isinstance(block, str):
return uid_to_url(block)
if block.get("@type", "") in EXCLUDE_TYPES:
return block
if isinstance(block, dict) and "UID" in block.keys():
# expand internal relations
return self.get_item_from_uid(block=block)
for key, val in block.items():
if not val:
continue
if key in EXCLUDE_KEYS:
continue
if isinstance(val, str):
block[key] = uid_to_url(val)
elif isinstance(val, list):
new_val = []
for x in val:
fixed_block = self.resolve_uids(block=x)
if fixed_block:
new_val.append(fixed_block)
block[key] = new_val
elif isinstance(val, dict):
if "entityMap" in val.keys():
entity_map = val.get("entityMap", {})
for entity_map in entity_map.values():
url = entity_map["data"].get("url", "").strip("/")
new = uid_to_url(url)
entity_map["data"]["url"] = new
else:
block[key] = self.resolve_uids(block=val)
return block
def get_item_from_uid(self, block):
"""
Return serialized item from uid.
We return the summary one because we want to avoid recursion and too much complex data returned here.
For example if we serialize the whole context, we will have also all its blocks.
This could lead to a huge amount of data returned.
We need to wrap the item with IIndexableObject to be able to get all metadata like it was a brain.
"""
items = api.content.find(UID=block["UID"], show_inactive=False)
if len(items) == 0:
return {}
item = items[0]
adapter = getMultiAdapter((item, getRequest()), ISerializeToJsonSummary)
return adapter(force_all_metadata=True)
class TableResolveUIDSerializer(object):
""" """
order = 210 # after standard ones
block_type = "table"
def __init__(self, context, request):
self.context = context
self.request = request
def __call__(self, value):
for row in value.get("table", {}).get("rows", []):
for cell in row.get("cells", []):
for entity in cell.get("value", {}).get("entityMap", {}).values():
if entity.get("type") == "LINK":
url = entity.get("data", {}).get("url", "")
entity["data"]["url"] = uid_to_url(url)
return value
@implementer(IBlockFieldSerializationTransformer)
@adapter(IBlocks, IRedturtleVoltoLayer)
class GenericResolveUIDSerializerContents(GenericResolveUIDSerializer):
"""Deserializer for content-types that implements IBlocks behavior"""
@implementer(IBlockFieldSerializationTransformer)
@adapter(IPloneSiteRoot, IRedturtleVoltoLayer)
class GenericResolveUIDSerializerRoot(GenericResolveUIDSerializer):
"""Deserializer for site-root"""
@implementer(IBlockFieldSerializationTransformer)
@adapter(IBlocks, IRedturtleVoltoLayer)
class TableResolveUIDSerializerContents(TableResolveUIDSerializer):
"""Deserializer for content-types that implements IBlocks behavior"""
@implementer(IBlockFieldSerializationTransformer)
@adapter(IPloneSiteRoot, IRedturtleVoltoLayer)
class TableResolveUIDSerializerRoot(TableResolveUIDSerializer):
"""Deserializer for site-root""" | /redturtle.volto-5.2.2.tar.gz/redturtle.volto-5.2.2/src/redturtle/volto/restapi/serializer/blocks.py | 0.675765 | 0.233302 | blocks.py | pypi |
from plone.app.layout.navigation.navtree import buildFolderTree
from plone.app.layout.navigation.navtree import NavtreeStrategyBase
from plone.dexterity.interfaces import IDexterityContainer
from plone.restapi.interfaces import IExpandableElement
from plone.restapi.services import Service
from zope.component import adapter
from zope.interface import implementer
from zope.interface import Interface
@implementer(IExpandableElement)
@adapter(IDexterityContainer, Interface)
class ContextNavigation(object):
def __init__(self, context, request):
self.context = context
self.request = request
def __call__(self, expand=False):
if self.request.form.get("expand.navigation.depth", False):
depth = int(self.request.form["expand.navigation.depth"])
else:
depth = 1
result = {
"navigation": {
"@id": "{}/@context-navigation".format(self.context.absolute_url())
}
}
if not expand:
return result
root = self.context
class Strategy(NavtreeStrategyBase):
rootPath = "/".join(root.getPhysicalPath())
showAllParents = False
strategy = Strategy()
query = {
"path": {
"query": "/".join(root.getPhysicalPath()),
"depth": depth,
},
"is_folderish": True,
"sort_on": "getObjPositionInParent",
}
navtree = buildFolderTree(self, query=query, strategy=strategy)
items = {
"@id": self.context.absolute_url(),
"items": [self.generateNode(x) for x in navtree["children"]],
}
return items
def generateNode(self, root):
item = root["item"]
res = {
"title": item.Title,
"@id": item.getURL(),
"description": item.Description,
}
children = root.get("children", [])
if children:
res["items"] = [self.generateNode(x) for x in children]
return res
class NavigationGet(Service):
def reply(self):
navigation = ContextNavigation(self.context, self.request)
return navigation(expand=True) | /redturtle.volto-5.2.2.tar.gz/redturtle.volto-5.2.2/src/redturtle/volto/restapi/services/navigation/get.py | 0.624294 | 0.23156 | get.py | pypi |
from asyncio import new_event_loop as loop
from typing import Optional
import click
from reduct_cli.export_impl.bucket import export_to_bucket
from reduct_cli.export_impl.folder import export_to_folder
from reduct_cli.utils.error import error_handle
from reduct_cli.utils.helpers import (
parse_path,
build_client,
)
run = loop().run_until_complete
start_option = click.option(
"--start",
help="Export records with timestamps newer than this time point in ISO format"
" or Unix timestamp in microseconds",
)
stop_option = click.option(
"--stop",
help="Export records with timestamps older than this time point in ISO format"
" or Unix timestamp in microseconds",
)
entries_option = click.option(
"--entries",
"-e",
help="Export only these entries, separated by comma",
default="",
)
include_option = click.option(
"--include",
"-I",
help="Export only these records which have these labels with given values, "
"separated by comma. Example: --include label1=values1,label2=value2",
default="",
)
exclude_option = click.option(
"--exclude",
"-E",
help="Export only these records which DON NOT have these labels with given values, "
"separated by comma. Example: --exclude label1=values1,label2=value2",
default="",
)
limit_option = click.option(
"--limit", "-l", help="Limit the number of records to export"
)
@click.group()
def export():
"""Export data from a bucket somewhere else"""
@export.command()
@click.argument("src")
@click.argument("dest")
@stop_option
@start_option
@entries_option
@include_option
@exclude_option
@limit_option
@click.option(
"--ext",
help="Extension for exported files, if not specified, will be guessed from content type",
)
@click.option(
"--with-metadata/--no-with-metadata",
help="Export metadata along with the data",
default=False,
)
@click.pass_context
def folder(
ctx,
src: str,
dest: str,
start: Optional[str],
stop: Optional[str],
entries: str,
include: str,
exclude: str,
ext: Optional[str],
with_metadata: bool,
limit: Optional[int],
): # pylint: disable=too-many-arguments
"""Export data from SRC bucket to DST folder
SRC should be in the format of ALIAS/BUCKET_NAME.
DST should be a path to a folder.
As result, the folder will contain a folder for each entry in the bucket.
Each entry folder will contain a file for each record
in the entry with the timestamp as the name.
"""
alias_name, src_bucket = parse_path(src)
client = build_client(
ctx.obj["config_path"], alias_name, timeout=ctx.obj["timeout"]
)
with error_handle():
run(
export_to_folder(
client,
dest,
src_bucket,
parallel=ctx.obj["parallel"],
start=start,
stop=stop,
entries=entries.split(","),
include=include.split(","),
exclude=exclude.split(","),
ext=ext,
timeout=ctx.obj["timeout"],
with_metadata=with_metadata,
limit=limit,
)
)
@export.command
@click.argument("src")
@click.argument("dest")
@stop_option
@start_option
@entries_option
@include_option
@exclude_option
@limit_option
@click.pass_context
def bucket(
ctx,
src: str,
dest: str,
start: Optional[str],
stop: Optional[str],
entries: str,
include: str,
exclude: str,
limit: Optional[int],
): # pylint: disable=too-many-arguments
"""Copy data from SRC to DEST bucket
SRC and DST should be in the format of ALIAS/BUCKET_NAME
If the destination bucket doesn't exist, it is created with
the settings of the source bucket."""
with error_handle():
alias_name, src_bucket = parse_path(src)
src_instance = build_client(
ctx.obj["config_path"], alias_name, timeout=ctx.obj["timeout"]
)
alias_name, dest_bucket = parse_path(dest)
dest_instance = build_client(
ctx.obj["config_path"], alias_name, timeout=ctx.obj["timeout"]
)
run(
export_to_bucket(
src_bucket,
dest_bucket,
src_instance,
dest_instance,
parallel=ctx.obj["parallel"],
start=start,
stop=stop,
entries=entries.split(","),
include=include.split(","),
exclude=exclude.split(","),
timeout=ctx.obj["timeout"],
limit=limit,
)
) | /reduct_cli-0.9.0-py3-none-any.whl/reduct_cli/export.py | 0.858407 | 0.234221 | export.py | pypi |
# pylint:disable=too-many-return-statements
from datetime import datetime, timezone
from typing import Union, Optional
MINUTE = 60
HOUR = MINUTE * 60
DAY = HOUR * 24
WEEK = DAY * 7
MONTH = DAY * 30
YEAR = MONTH * 12
def pretty_time_interval(seconds: Union[int, float]) -> str:
"""Print readable time interval"""
if seconds < 0:
return "---"
if seconds <= MINUTE:
return f"{round(seconds)} second(s)"
if seconds <= HOUR:
return f"{round(seconds / MINUTE)} minute(s)"
if seconds <= DAY:
return f"{round(seconds / HOUR)} hour(s)"
if seconds <= WEEK:
return f"{round(seconds / DAY)} day(s)"
if seconds <= MONTH:
return f"{round(seconds / WEEK)} week(s)"
if seconds <= YEAR:
return f"{round(seconds / MONTH)} month(s)"
return f"{round(seconds / YEAR)} year(s)"
KB = 1000
MB = KB * 1000
GB = MB * 1000
TB = GB * 1000
def pretty_size(size: Union[int, float]) -> str:
"""Return human-readable size"""
size = int(size)
if size < 0:
raise ValueError("Size must be positive")
if size <= KB:
return f"{size} B"
if size <= MB:
return f"{round(size / KB)} KB"
if size <= GB:
return f"{round(size / MB)} MB"
if size <= TB:
return f"{round(size / GB)} GB"
return f"{round(size / TB)} TB"
def print_datetime(time_stamp: int, valid: bool):
"""Print datatime as ISO string in UTC or '---' if it's invalid"""
return (
datetime.fromtimestamp(time_stamp / 1000_000, tz=timezone.utc).strftime(
"%Y-%m-%dT%H:%M:%S"
)
if valid
else "---"
)
def parse_ci_size(size: Optional[str]) -> Optional[int]:
"""Parse CI size and return size in bytes"""
if size is None:
return None
size = size.strip().upper()
if "TB" in size:
return int(size.replace("TB", "")) * TB
if "GB" in size:
return int(size.replace("GB", "")) * GB
if "MB" in size:
return int(size.replace("MB", "")) * MB
if "KB" in size:
return int(size.replace("KB", "")) * KB
if "B" in size:
return int(size.replace("B", ""))
raise ValueError(f"Failed to parse {size}") | /reduct_cli-0.9.0-py3-none-any.whl/reduct_cli/utils/humanize.py | 0.794783 | 0.304003 | humanize.py | pypi |
import asyncio
import signal
import time
from asyncio import Semaphore, Queue
from datetime import datetime
from pathlib import Path
from typing import Tuple, List
from click import Abort
from reduct import EntryInfo, Bucket, Client
from rich.progress import Progress
from reduct_cli.config import read_config, Alias
from reduct_cli.utils.consoles import error_console
from reduct_cli.utils.humanize import pretty_size
signal_queue = Queue()
def get_alias(config_path: Path, name: str) -> Alias:
"""Helper method to parse alias from config"""
conf = read_config(config_path)
if name.split("/")[0] not in conf.aliases:
error_console.print(f"Alias '{name}' doesn't exist")
raise Abort()
alias_: Alias = conf.aliases[name]
return alias_
def build_client(config_path: Path, alias: str, timeout: float) -> Client:
"""Build client from alias"""
alias_ = get_alias(config_path, alias)
return Client(alias_.url, api_token=alias_.token, timeout=timeout)
def parse_path(path) -> Tuple[str, str]:
"""Parse path ALIAS/RESOURCE"""
args = path.split("/")
if len(args) != 2:
raise RuntimeError(
f"Path {path} has wrong format. It must be 'ALIAS/BUCKET_NAME'"
)
return tuple(args)
async def read_records_with_progress(
entry: EntryInfo,
bucket: Bucket,
progress: Progress,
sem: Semaphore,
**kwargs,
): # pylint: disable=too-many-locals
"""Read records from entry and show progress
Args:
entry (EntryInfo): Entry to read records from
bucket (Bucket): Bucket to read records from
progress (Progress): Progress bar to show progress
sem (Semaphore): Semaphore to limit parallelism
Keyword Args:
start (Optional[datetime]): Start time point
stop (Optional[datetime]): Stop time point
timeout (int): Timeout for read operation
parallel (int): Number of parallel tasks
Yields:
Record: Record from entry
"""
def _to_timestamp(date: str) -> int:
try:
return int(date)
except ValueError:
return int(
datetime.fromisoformat(date.replace("Z", "+00:00")).timestamp()
* 1000_000
)
params = {
"start": _to_timestamp(kwargs["start"])
if kwargs["start"]
else entry.oldest_record,
"stop": _to_timestamp(kwargs["stop"])
if kwargs["stop"]
else entry.latest_record,
"include": {},
"exclude": {},
"ttl": kwargs["timeout"] * kwargs["parallel"],
}
if "limit" in kwargs and kwargs["limit"]:
params["limit"] = int(kwargs["limit"])
for item in kwargs["include"]:
if item:
key, value = item.split("=")
params["include"][key] = value
for item in kwargs["exclude"]:
if item:
key, value = item.split("=")
params["exclude"][key] = value
last_time = params["start"]
task = progress.add_task(
f"Entry '{entry.name}' waiting", total=params["stop"] - params["start"]
)
async with sem:
exported_size = 0
count = 0
stats = []
speed = 0
def stop_signal():
signal_queue.put_nowait("stop")
asyncio.get_event_loop().add_signal_handler(signal.SIGINT, stop_signal)
asyncio.get_event_loop().add_signal_handler(signal.SIGTERM, stop_signal)
async for record in bucket.query(
entry.name,
**params,
):
if signal_queue.qsize() > 0:
# stop signal received
progress.update(
task,
description=f"Entry '{entry.name}' "
f"(copied {count} records ({pretty_size(exported_size)}), stopped",
refresh=True,
)
return
exported_size += record.size
stats.append((record.size, time.time()))
if len(stats) > 100:
speed = sum(s[0] for s in stats) / (stats[-1][1] - stats[0][1])
stats = stats[-50:]
yield record
progress.update(
task,
description=f"Entry '{entry.name}' "
f"(copied {count} records ({pretty_size(exported_size)}), "
f"speed {pretty_size(speed) if speed else '? B'}/s)",
advance=record.timestamp - last_time,
refresh=True,
)
last_time = record.timestamp
count += 1
progress.update(task, total=1, completed=True)
def filter_entries(entries: List[EntryInfo], names: List[str]) -> List[EntryInfo]:
"""Filter entries by names"""
if not names or len(names) == 0:
return entries
if len(names) == 1 and names[0] == "":
return entries
def _filter(entry):
for name in names:
name = name.strip()
if name == entry.name:
return True
if name.endswith("*") and entry.name.startswith(name[:-1]):
return True
return False
return list(filter(_filter, entries)) | /reduct_cli-0.9.0-py3-none-any.whl/reduct_cli/utils/helpers.py | 0.597608 | 0.157914 | helpers.py | pypi |
from datetime import datetime
from typing import Optional, List, Dict
from aiohttp import ClientSession
from pydantic import BaseModel
from reduct.bucket import BucketInfo, BucketSettings, Bucket
from reduct.http import HttpClient
from reduct.error import ReductError
class Defaults(BaseModel):
"""Default server settings"""
bucket: BucketSettings
"""settings for a new bucket"""
class ServerInfo(BaseModel):
"""Server stats"""
version: str
"""version of the storage in x.y.z format"""
bucket_count: int
"""number of buckets in the storage"""
usage: int
"""stored data in bytes"""
uptime: int
"""storage uptime in seconds"""
oldest_record: int
"""UNIX timestamp of the oldest record in microseconds"""
latest_record: int
"""UNIX timestamp of the latest record in microseconds"""
defaults: Defaults
"""Default server settings"""
class BucketList(BaseModel):
"""List of buckets"""
buckets: List[BucketInfo]
class Permissions(BaseModel):
"""Token permission"""
full_access: bool
"""full access to manage buckets and tokens"""
read: Optional[List[str]]
"""list of buckets with read access"""
write: Optional[List[str]]
"""list of buckets with write access"""
class Token(BaseModel):
"""Token for authentication"""
name: str
"""name of token"""
created_at: datetime
"""creation time of token"""
class FullTokenInfo(Token):
"""Full information about token with permissions"""
permissions: Permissions
"""permissions of token"""
class TokenList(BaseModel):
"""List of tokens"""
tokens: List[Token]
class TokenCreateResponse(BaseModel):
"""Response from creating a token"""
value: str
"""token for authentication"""
class Client:
"""HTTP Client for Reduct Storage HTTP API"""
def __init__(
self,
url: str,
api_token: Optional[str] = None,
timeout: Optional[float] = None,
extra_headers: Optional[Dict[str, str]] = None,
**kwargs,
):
"""
Constructor
Args:
url: URL to connect to the storage
api_token: API token if the storage uses it for authorization
timeout: total timeout for connection, request and response in seconds
extra_headers: extra headers to send with each request
Kwargs:
session: an external aiohttp session to use for requests
Examples:
>>> client = Client("http://127.0.0.1:8383")
>>> info = await client.info()
"""
self._http = HttpClient(
url.rstrip("/"), api_token, timeout, extra_headers, **kwargs
)
async def __aenter__(self):
self._http._session = ClientSession(timeout=self._http._timeout)
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
return await self._http._session.close()
async def info(self) -> ServerInfo:
"""
Get high level server info
Returns:
ServerInfo:
Raises:
ReductError: if there is an HTTP error
"""
return ServerInfo.parse_raw(await self._http.request_all("GET", "/info"))
async def list(self) -> List[BucketInfo]:
"""
Return a list of all buckets on server
Returns:
List[BucketInfo]
Raises:
ReductError: if there is an HTTP error
"""
return BucketList.parse_raw(
await self._http.request_all("GET", "/list")
).buckets
async def get_bucket(self, name: str) -> Bucket:
"""
Load a bucket to work with
Args:
name: name of the bucket
Returns:
Bucket
Raises:
ReductError: if there is an HTTP error
"""
await self._http.request_all("GET", f"/b/{name}")
return Bucket(name, self._http)
async def create_bucket(
self,
name: str,
settings: Optional[BucketSettings] = None,
exist_ok: bool = False,
) -> Bucket:
"""
Create a new bucket
Args:
name: a name for the bucket
settings: settings for the bucket If None, the server
default settings is used.
exist_ok: the client raises no exception if the bucket
already exists and returns it
Returns:
Bucket: created bucket
Raises:
ReductError: if there is an HTTP error
"""
data = settings.json() if settings else None
try:
await self._http.request_all("POST", f"/b/{name}", data=data)
except ReductError as err:
if err.status_code != 409 or not exist_ok:
raise err
return Bucket(name, self._http)
async def get_token_list(self) -> List[Token]:
"""
Get a list of all tokens
Returns:
List[Token]
Raises:
ReductError: if there is an HTTP error
"""
return TokenList.parse_raw(
await self._http.request_all("GET", "/tokens")
).tokens
async def get_token(self, name: str) -> FullTokenInfo:
"""
Get a token by name
Args:
name: name of the token
Returns:
Token
Raises:
ReductError: if there is an HTTP error
"""
return FullTokenInfo.parse_raw(
await self._http.request_all("GET", f"/tokens/{name}")
)
async def create_token(self, name: str, permissions: Permissions) -> str:
"""
Create a new token
Args:
name: name of the token
permissions: permissions for the token
Returns:
str: token value
Raises:
ReductError: if there is an HTTP error
"""
return TokenCreateResponse.parse_raw(
await self._http.request_all(
"POST", f"/tokens/{name}", data=permissions.json()
)
).value
async def remove_token(self, name: str) -> None:
"""
Delete a token
Args:
name: name of the token
Raises:
ReductError: if there is an HTTP error
"""
await self._http.request_all("DELETE", f"/tokens/{name}")
async def me(self) -> FullTokenInfo:
"""
Get information about the current token
Returns:
FullTokenInfo
Raises:
ReductError: if there is an HTTP error
"""
return FullTokenInfo.parse_raw(await self._http.request_all("GET", "/me")) | /reduct_py-1.6.0-py3-none-any.whl/reduct/client.py | 0.917806 | 0.351283 | client.py | pypi |
import asyncio
from dataclasses import dataclass
from functools import partial
from typing import Dict, Callable, AsyncIterator, Awaitable
from aiohttp import ClientResponse
@dataclass
class Record:
"""Record in a query"""
timestamp: int
"""UNIX timestamp in microseconds"""
size: int
"""size of data"""
last: bool
"""last record in the query. Deprecated: doesn't work for some cases"""
content_type: str
"""content type of data"""
read_all: Callable[[None], Awaitable[bytes]]
"""read all data"""
read: Callable[[int], AsyncIterator[bytes]]
"""read data in chunks"""
labels: Dict[str, str]
"""labels of record"""
LABEL_PREFIX = "x-reduct-label-"
CHUNK_SIZE = 512_000
def parse_record(resp: ClientResponse, last=True) -> Record:
"""Parse record from response"""
timestamp = int(resp.headers["x-reduct-time"])
size = int(resp.headers["content-length"])
content_type = resp.headers.get("content-type", "application/octet-stream")
labels = dict(
(name[len(LABEL_PREFIX) :], value)
for name, value in resp.headers.items()
if name.startswith(LABEL_PREFIX)
)
return Record(
timestamp=timestamp,
size=size,
last=last,
read_all=resp.read,
read=resp.content.iter_chunked,
labels=labels,
content_type=content_type,
)
def _parse_header_as_csv_row(row: str) -> (int, str, Dict[str, str]):
items = []
escaped = ""
for item in row.split(","):
if item.startswith('"') and not escaped:
escaped = item[1:]
if escaped:
if item.endswith('"'):
escaped = escaped[:-1]
items.append(escaped)
escaped = ""
else:
escaped += item
else:
items.append(item)
content_length = int(items[0])
content_type = items[1]
labels = {}
for label in items[2:]:
if "=" in label:
name, value = label.split("=", 1)
labels[name] = value
return content_length, content_type, labels
async def _read(buffer: bytes, n: int):
count = 0
size = len(buffer)
n = min(n, size)
while True:
chunk = buffer[count : count + n]
count += len(chunk)
n = min(n, size - count)
yield chunk
await asyncio.sleep(0)
if count == size:
break
async def _read_all(buffer):
data = b""
async for chunk in _read(buffer, CHUNK_SIZE):
data += chunk
return data
async def parse_batched_records(resp: ClientResponse) -> AsyncIterator[Record]:
"""Parse batched records from response"""
records_total = sum(
1 for header in resp.headers if header.startswith("x-reduct-time-")
)
records_count = 0
head = resp.method == "HEAD"
for name, value in resp.headers.items():
if name.startswith("x-reduct-time-"):
timestamp = int(name[14:])
content_length, content_type, labels = _parse_header_as_csv_row(value)
last = False
records_count += 1
if records_count == records_total:
# last record in batched records read in client code
read_func = resp.content.iter_chunked
read_all_func = resp.read
if resp.headers.get("x-reduct-last", "false") == "true":
# last record in query
last = True
else:
# batched records must be read in order, so it is safe to read them here
# instead of reading them in the use code with an async interator.
# The batched records are small if they are not the last.
# The last batched record is read in the async generator in chunks.
if head:
buffer = b""
else:
buffer = await _read_response(resp, content_length)
read_func = partial(_read, buffer)
read_all_func = partial(_read_all, buffer)
record = Record(
timestamp=timestamp,
size=content_length,
last=last,
content_type=content_type,
labels=labels,
read_all=read_all_func,
read=read_func,
)
yield record
async def _read_response(resp, content_length):
buffer = b""
count = 0
while True:
n = min(CHUNK_SIZE, content_length - count)
chunk = await resp.content.read(n)
buffer += chunk
count += len(chunk)
if count == content_length:
break
return buffer | /reduct_py-1.6.0-py3-none-any.whl/reduct/record.py | 0.654011 | 0.381421 | record.py | pypi |
# Reducto
**Reducto** is a command line utility to extract statistical features from your
_python_ source code.
- Count the number of lines in a project (_.py files only_)
- Number of functions/methods.
- Average function length.
- Docstring lines.
- Comment lines.
- Blank lines.
- Source files (number of .py files).
- Source lines (number of lines corresponding to python code, total number of lines minus docstrings, comments and blank lines).
*For the average package the code should be fast enough. The average time is of 0.5 seconds against
top pypi packages by number of downloads.*
A simple analysis run top of the 4000 top pypi packages at the following repository:
[top_pypi_source_code_stats](https://github.com/plaguss/top_pypi_source_code_stats).
### Development status

[](https://github.com/psf/black)
[](https://codecov.io/gh/plaguss/reducto)
[](https://reducto.readthedocs.io/en/latest/?badge=latest)
### Installation
**reducto** is available in [PyPI](https://pypi.org/project/reducto/), run from your terminal:
$ pip install reducto
Install with extras to print tables via [tabulate](https://pypi.org/project/tabulate/):
$ pip install reducto[tabulate]
_Currently tested on python 3.8 only_.
### Usage
To start with the default example:
$ reducto {source_file_or_directory}
The default mode will print the base `json` report to the command line using `pprint.pprint`.
Let's see an example running on the reducto source code (for the current version):
```sh
$ reducto reducto
{'reducto': {'average_function_length': 6,
'blank_lines': 208,
'comment_lines': 20,
'docstring_lines': 803,
'lines': 1973,
'number_of_functions': 108,
'source_files': 7,
'source_lines': 942}}
```
The formats include `json` as a default, but when installing _tabulate_ dependency,
the formats defined there are available too. For example:
```sh
$ reducto reducto --format="rst"
========= ======= =========== ======== =========== ========= ======= ========== ========
package lines number source docstring comment blank average source
of lines lines lines lines function files
functions length
========= ======= =========== ======== =========== ========= ======= ========== ========
reducto 1973 108 942 803 20 208 6 7
========= ======= =========== ======== =========== ========= ======= ========== ========
```
Or copying directly the output from executing `reducto reducto/ --format "github" --percentage`
to this README.md:
| package | lines | number_of_functions | source_lines | docstring_lines | comment_lines | blank_lines | average_function_length | source_files |
|-----------|---------|-----------------------|----------------|-------------------|-----------------|---------------|---------------------------|----------------|
| reducto | 1973 | 108 | 48% | 41% | 1% | 11% | 6 | 7 |
Typing the help command may show the different formats defined currently, but for more
info, the [documentation](#Documentation) may be more helpful.
```sh
$ reducto --help
usage: reducto [-h] [-v]
[-f {json,simple,plain,grid,fancy_grid,github,pipe,orgtbl,jira,presto,pretty,psql,rst,mediawiki,moinmoin,youtrack,html,unsafehtml,latex,latex_raw,latex_booktabs,latex_longtable,tsv,textile}]
[--grouped | --ungrouped] [-o OUTPUT] [-p]
[target]
positional arguments:
target Path to execute the program into. Must be either a python package (directory containing an __init__.py) or a python source file {SRC.py}
optional arguments:
-h, --help show this help message and exit
-v, --version Show the version of the program.
-f {json,simple,plain,grid,fancy_grid,github,pipe,orgtbl,jira,presto,pretty,psql,rst,mediawiki,moinmoin,youtrack,html,unsafehtml,latex,latex_raw,latex_booktabs,latex_longtable,tsv,textile}, --format {json,simple,plain,grid,fancy_grid,github,pipe,orgtbl,jira,presto,pretty,psql,rst,mediawiki,moinmoin,youtrack,html,unsafehtml,latex,latex_raw,latex_booktabs,latex_longtable,tsv,textile}
Format for the report type.
--grouped Return the results separated by source files, or grouped for the whole package. Only used when the target path is a package.
--ungrouped Opposite of --grouped.
-o OUTPUT, --output OUTPUT
Full path of the report to be generated. If not given, redirects to stdout.
-p, --percentage Report the number of lines as percentage.
```
### Documentation
Read the documentation on [ReadTheDocs](https://reducto.readthedocs.io/en/latest/).
### License
[MIT License](https://github.com/plaguss/reducto/blob/main/LICENSE)
| /reducto-1.0.3.tar.gz/reducto-1.0.3/README.md | 0.61555 | 0.879043 | README.md | pypi |
from numpy import pi, arange, arctan2, sqrt, meshgrid, linspace, sin, cos, array, log, ones, histogram2d, logical_and, zeros_like, ones_like, degrees, mod
def ConvertToCylindrical(array_in, x_min, x_max, y_min, y_max, theta_offset = 0.0, min_r = None, oversample_th = 1.0, oversample_r = 1.0):
x_axis = linspace(x_min, x_max, array_in.shape[1])
y_axis = linspace(y_min, y_max, array_in.shape[0])
x_stepsize = float(x_max - x_min) / (array_in.shape[1] - 1)
y_stepsize = float(y_max - y_min) / (array_in.shape[0] - 1)
x,y = meshgrid(x_axis, y_axis)
# meshgrid makes two new arrays with same dimensions as array_in,
# but filled with x and y coordinates of each point instead of data
r_in = sqrt(x**2 + y**2)
theta_in = arctan2(y, x)
# these are two more arrays with same dimensions as array_in,
# but filled with the r, theta coordinates of each point of array_in
dtheta_min = (1.0/r_in[r_in>0]**2 * sqrt(x[r_in>0]**2 * y_stepsize**2 + y[r_in>0]**2 * x_stepsize)).min() * 180.0/pi
# dtheta is approximately 1/r^2 * sqrt(x^2 dy + y^2 dx)
th_step = dtheta_min / oversample_th
dr_min = (1.0/r_in[r_in>0] * ( abs(x[r_in>0] * x_stepsize) + abs(y[r_in>0] * y_stepsize) )).min()
# dr is (1/r) * (x dx + y dy)
r_step = dr_min / oversample_r
if min_r == None:
min_r = dr_min
th_out_min = theta_offset
th_out_max = theta_offset + 360.0 + th_step
th_out_axis = arange(th_out_min, th_out_max, th_step )
r_in_min = r_in.min()
r_in_max = r_in.max()
r_out_min = max(r_in.min(), min_r)
r_out_max = r_in_max
r_out_axis = arange(r_out_min, r_out_max, r_step)
th_out, r_out = meshgrid(th_out_axis, r_out_axis)
output_grid = zeros_like(th_out)
output_norm = zeros_like(th_out)
forward_r_list = ( r_in - r_in_min ).flatten()
forward_th_list = (mod(degrees(theta_in) - theta_offset, 360.0) + theta_offset).flatten()
reverse_x = (r_out * cos(th_out*pi/180.0))
reverse_y = (r_out * sin(th_out*pi/180.0))
inshape = r_in.shape
reverse_norm = (ones_like(th_out)).flatten()
hist2d, xedges, yedges = histogram2d(reverse_x.flatten(),reverse_y.flatten(), \
bins = (inshape[1],inshape[0]), range=((x_min, x_max + x_stepsize),(y_min,y_max+y_stepsize)), weights=reverse_norm)
input_count = hist2d.T # how many bins in the output map to this bin in the input in the reverse mapping
input_count += 1.0 # how many bins will be mapped in the forward mapping (all of them)
forward_weights = (array_in/input_count).flatten()
forward_norm = (1.0 / input_count).flatten()
outshape = th_out.shape
#print('outshape: ', outshape)
hist2d, xedges, yedges = histogram2d(forward_r_list,forward_th_list, \
bins = (outshape[0],outshape[1]), range=((r_out_min,r_out_max+r_step),(th_out_min,th_out_max+th_step)), weights=forward_weights)
output_grid += hist2d # counts in every pixel in input added to corresponding output pixel (forward)
hist2d, xedges, yedges = histogram2d(forward_r_list,forward_th_list, \
bins = (outshape[0],outshape[1]), range=((r_out_min,r_out_max+r_step),(th_out_min,th_out_max+th_step)), weights=forward_norm)
output_norm += hist2d # weight of every pixel in input added to corresponding output-weight pixel (forward)
reverse_x_lookup = ((reverse_x - x_min) / x_stepsize).astype(int)
reverse_y_lookup = ((reverse_y - y_min) / y_stepsize).astype(int)
reverse_mask = logical_and((reverse_x_lookup >=0), (reverse_x_lookup < r_in.shape[1]))
reverse_mask = logical_and(reverse_mask, (reverse_y_lookup >= 0))
reverse_mask = logical_and(reverse_mask, (reverse_y_lookup < r_in.shape[0]))
reverse_x_lookup = reverse_x_lookup[reverse_mask]
reverse_y_lookup = reverse_y_lookup[reverse_mask]
reverse_r_list = r_out[reverse_mask]
reverse_th_list = th_out[reverse_mask]
reverse_weights = (array_in/input_count)[reverse_y_lookup, reverse_x_lookup]
reverse_norm = (1.0/input_count)[reverse_y_lookup, reverse_x_lookup]
# counts from corresponding pixel in input added to every output pixel (reverse lookup)
output_grid[reverse_mask] += reverse_weights
# weight of corresponding pixel in input added to every output-weight pixel (reverse lookup)
output_norm[reverse_mask] += reverse_norm
normalized = zeros_like(output_grid)
normalized[reverse_mask] = output_grid[reverse_mask] / output_norm[reverse_mask]
output = output_grid.copy()
extent = [th_out.min(), th_out.max(), r_out.min(), r_out.max()]
return output_grid, output_norm, normalized, extent
if __name__ == '__main__':
from pylab import imshow, show, xlabel, ylabel, colorbar, figure, title
x, y = meshgrid(linspace(-50., 150., 201), linspace(-20., 30., 51))
r = sqrt(x**2 + y**2)
th = arctan2(y, x)
d = sin(r*pi/25.0)**2
#d = cos(th)**2 * 1.0/(r + 1.0)**2
array_out, data_mask, normalized, extent = ConvertToCylindrical(d, -50, 150, -20, 30)
# transposing the meshgrid because meshgrid(x,y) has shape (len(y), len(x))
figure()
imshow(array_out, aspect = 'auto', origin='lower', extent = extent, interpolation="nearest")
xlabel('$\\theta ({}^{\circ})$', size='large')
ylabel('$r$', size='large')
title('$sin^2(r)$ in cylindrical coords', size='large')
colorbar()
figure()
imshow(data_mask, aspect = 'auto', origin='lower', extent=extent, interpolation="nearest")
xlabel('$\\theta ({}^{\circ})$', size='large')
ylabel('$r$', size='large')
title('norm for $sin^2(r)$ in cylindrical coords', size='large')
colorbar()
figure()
imshow(normalized, aspect = 'auto', origin='lower', extent=extent, interpolation="nearest")
xlabel('$\\theta ({}^{\circ})$', size='large')
ylabel('$r$', size='large')
title('normalized transform for $sin^2(r)$ in cylindrical coords', size='large')
colorbar()
figure()
imshow(d, origin='lower', extent=[-50,150,-20,30], interpolation="nearest")
xlabel('$x$', size='large')
ylabel('$y$', size='large')
title('$sin^2(r)$ in rectangular coords', size='large')
colorbar()
a2, m2, n2, e2 = ConvertToCylindrical(d, -50, 150, -20, 30, theta_offset=-90.0)
figure()
imshow(a2, aspect = 'auto', origin='lower', extent=e2, interpolation="nearest")
xlabel('$\\theta ({}^{\circ})$', size='large')
ylabel('$r$', size='large')
title('$sin^2(r)$ in cylindrical coords with -90.0 deg offset', size='large')
colorbar()
print("show?")
figure()
imshow(n2, aspect = 'auto', origin='lower', extent=e2, interpolation="nearest")
#imshow(m2.T, origin='lower', extent=e2, interpolation="nearest")
xlabel('$\\theta ({}^{\circ})$', size='large')
ylabel('$r$', size='large')
title('normalized $sin^2(r)$ in cylindrical coords with -90.0 deg offset', size='large')
colorbar()
show() | /reductus-0.1b2-py3-none-any.whl/sansred/cylindrical.py | 0.547464 | 0.68404 | cylindrical.py | pypi |
from PIL import Image, ImageDraw
import numpy
def annular_mask_antialiased(shape, center, inner_radius, outer_radius,
background_value=0.0, mask_value=1.0,
oversampling=8):
# type: (Tuple[int, int], Tuple[float, float], float, float, float, float, int) -> numpy.ndarray
"""
Takes the following:
* *shape* tuple: (x, y) - this is the size of the output image
* *center* tuple: (x, y)
* *inner_radius*: float
* *outer_radius*: float
* *background_value*: float (the image is initialized to this value)
* *mask_value*: float (the annulus is drawn with this value)
* *oversampling*: int (the mask is drawn on a canvas this many times bigger
than the final size, then resampled down to give smoother edges)
"""
# Create a 32-bit float image
intermediate_shape = (shape[0]*int(oversampling), shape[1]*int(oversampling))
im = Image.new('F', intermediate_shape, color=background_value)
# Making a handle to the drawing tool
draw = ImageDraw.Draw(im)
# Have to scale everything in the problem by the oversampling
outer_radius_r = outer_radius * oversampling
inner_radius_r = inner_radius * oversampling
center_r = (center[0] * oversampling, center[1] * oversampling)
# Calculate bounding box for outer circle
x_outer_min = center_r[0] - outer_radius_r
x_outer_max = center_r[0] + outer_radius_r
y_outer_min = center_r[1] - outer_radius_r
y_outer_max = center_r[1] + outer_radius_r
outer_bbox = [x_outer_min, y_outer_min, x_outer_max, y_outer_max]
# Calculate bounding box for inner circle
x_inner_min = center_r[0] - inner_radius_r
x_inner_max = center_r[0] + inner_radius_r
y_inner_min = center_r[1] - inner_radius_r
y_inner_max = center_r[1] + inner_radius_r
inner_bbox = [x_inner_min, y_inner_min, x_inner_max, y_inner_max]
# Draw the circles: outer one first
draw.ellipse(outer_bbox, fill=mask_value)
# Now overlay the inner circle
draw.ellipse(inner_bbox, fill=background_value)
# Now bring it back to size, with antialiasing
#im.thumbnail(shape, Image.ANTIALIAS)
# This produced artifacts - output.max() was > mask_value by 10% or more!
# Using numpy reshape instead (rebinning) - see Scipy cookbook
output = numpy.asarray(im)
output = output.reshape(shape[0], oversampling, shape[1], oversampling).mean(1).mean(2)
return output
def pie_bounding_box(center, radius, start_angle, end_angle):
# assumes Euclidean angles (CCW) - make sure to specify pieslice with negative angles (CW)
points = [
center,
[center[0] + numpy.cos(start_angle)*radius, center[1] + numpy.sin(start_angle)*radius],
[center[0] + numpy.cos(end_angle)*radius, center[1] + numpy.sin(end_angle)*radius]
]
pangle = numpy.pi/2.0 # angle of the cardinal points
for cardinal in numpy.arange(pangle*numpy.ceil(start_angle/pangle), end_angle, pangle):
points.append([center[0] + numpy.cos(cardinal)*radius, center[1] + numpy.sin(cardinal)*radius])
points = numpy.array(points, dtype='float')
xmin = points[:,0].min()
xmax = points[:,0].max()
ymin = points[:,1].min()
ymax = points[:,1].max()
return [xmin, ymin, xmax, ymax]
def sector_cut_antialiased(shape, center, inner_radius, outer_radius, start_angle=0.0, end_angle=numpy.pi,
mirror=True, background_value=0.0, mask_value=1.0,
oversampling=8):
# type: (Tuple[int, int], Tuple[float, float], float, float, float, float, int) -> numpy.ndarray
"""
Takes the following:
* *shape* tuple: (x, y) - this is the size of the output image
* *center* tuple: (x, y)
* *inner_radius*: float
* *outer_radius*: float
* *start_angle*: float (radians) start of sector cut range
* *end_angle*: float (radians) end of sector cut range (with defaults, covers full circle)
* *mirror*: bool (take cut on both sides of origin)
* *background_value*: float (the image is initialized to this value)
* *mask_value*: float (the annulus is drawn with this value)
* *oversampling*: int (the mask is drawn on a canvas this many times bigger
than the final size, then resampled down to give smoother edges)
"""
d_start = -numpy.degrees(start_angle)
d_end = -numpy.degrees(end_angle)
# Create a 32-bit float image
intermediate_shape = (shape[0]*int(oversampling), shape[1]*int(oversampling))
im = Image.new('F', intermediate_shape, color=background_value)
# Making a handle to the drawing tool
draw = ImageDraw.Draw(im)
# Have to scale everything in the problem by the oversampling
outer_radius_r = outer_radius * oversampling
inner_radius_r = inner_radius * oversampling
center_r = (center[0] * oversampling, center[1] * oversampling)
# Calculate bounding box for outer circle
x_outer_min = center_r[0] - outer_radius_r
x_outer_max = center_r[0] + outer_radius_r
y_outer_min = center_r[1] - outer_radius_r
y_outer_max = center_r[1] + outer_radius_r
outer_bbox = [x_outer_min, y_outer_min, x_outer_max, y_outer_max]
# draw pie slices
draw.pieslice(outer_bbox, d_end, d_start, fill=mask_value)
if mirror:
draw.pieslice(outer_bbox, d_end-180.0, d_start-180.0, fill=mask_value)
# Calculate bounding box for inner circle
x_inner_min = center_r[0] - inner_radius_r
x_inner_max = center_r[0] + inner_radius_r
y_inner_min = center_r[1] - inner_radius_r
y_inner_max = center_r[1] + inner_radius_r
inner_bbox = [x_inner_min, y_inner_min, x_inner_max, y_inner_max]
# Now overlay the inner circle
draw.ellipse(inner_bbox, fill=background_value)
# Now bring it back to size, with antialiasing
#im.thumbnail(shape, Image.ANTIALIAS)
# This produced artifacts - output.max() was > mask_value by 10% or more!
# Using numpy reshape instead (rebinning) - see Scipy cookbook
output = numpy.asarray(im)
output = output.reshape(shape[0], oversampling, shape[1], oversampling).mean(1).mean(2)
return output
def check_sector_cut_antialiased(shape, center, inner_radius, outer_radius, start_angle=0.0, end_angle=numpy.pi,
mirror=True, background_value=0.0, mask_value=1.0,
oversampling=8):
# type: (Tuple[int, int], Tuple[float, float], float, float, float, float, int) -> numpy.ndarray
"""
Takes the following:
* *shape* tuple: (x, y) - this is the size of the output image
* *center* tuple: (x, y)
* *inner_radius*: float
* *outer_radius*: float
* *start_angle*: float (radians) start of sector cut range
* *end_angle*: float (radians) end of sector cut range (with defaults, covers full circle)
* *background_value*: float (the image is initialized to this value)
* *mask_value*: float (the annulus is drawn with this value)
* *oversampling*: int (the mask is drawn on a canvas this many times bigger
than the final size, then resampled down to give smoother edges)
"""
d_start = -numpy.degrees(start_angle)
d_end = -numpy.degrees(end_angle)
# Create a 32-bit float image
intermediate_shape = (shape[0]*int(oversampling), shape[1]*int(oversampling))
im = Image.new('F', intermediate_shape, color=background_value)
# Making a handle to the drawing tool
draw = ImageDraw.Draw(im)
# Have to scale everything in the problem by the oversampling
outer_radius_r = outer_radius * oversampling
inner_radius_r = inner_radius * oversampling
center_r = (center[0] * oversampling, center[1] * oversampling)
# Calculate bounding box for outer circle
x_outer_min = center_r[0] - outer_radius_r
x_outer_max = center_r[0] + outer_radius_r
y_outer_min = center_r[1] - outer_radius_r
y_outer_max = center_r[1] + outer_radius_r
outer_bbox = [x_outer_min, y_outer_min, x_outer_max, y_outer_max]
draw.pieslice(outer_bbox, d_end, d_start, fill=mask_value)
if mirror:
draw.pieslice(outer_bbox, d_end-180.0, d_start-180.0, fill=mask_value)
# Calculate bounding box for inner circle
x_inner_min = center_r[0] - inner_radius_r
x_inner_max = center_r[0] + inner_radius_r
y_inner_min = center_r[1] - inner_radius_r
y_inner_max = center_r[1] + inner_radius_r
inner_bbox = [x_inner_min, y_inner_min, x_inner_max, y_inner_max]
# Now overlay the inner circle
draw.ellipse(inner_bbox, fill=background_value)
im.show()
def check():
check_sector_cut_antialiased(
(128,128),
(64, 60),
10,
20,
start_angle=numpy.pi/4,
end_angle=numpy.pi/2,
mirror=False,
background_value=0.0,
mask_value=100.0,
oversampling=8
)
if __name__ == "__main__":
check() | /reductus-0.1b2-py3-none-any.whl/sansred/draw_annulus_aa.py | 0.831109 | 0.571886 | draw_annulus_aa.py | pypi |
from dataflow.lib import hzf_readonly_stripped as hzf
from dataflow.lib import unit
from .sansdata import SansData
metadata_lookup = {
"det.dis": "DAS_logs/detectorPosition/softPosition",
"resolution.lmda" : "instrument/monochromator/wavelength",
"resolution.dlmda": "instrument/monochromator/wavelength_error",
"det.beamx": "instrument/detector/beam_center_x",
"det.beamy": "instrument/detector/beam_center_y",
"det.pixeloffsetx": "instrument/detector/x_offset",
"det.pixelsizex": "instrument/detector/x_pixel_size",
"det.pixeloffsety": "instrument/detector/y_offset",
"det.pixelsizey": "instrument/detector/y_pixel_size",
"analysis.intent": "DAS_logs/trajectoryData/intent",
"analysis.filepurpose": "DAS_logs/trajectoryData/filePurpose",
"sample.name": "DAS_logs/sample/name",
"sample.description": "DAS_logs/sample/description",
"sample.labl": "DAS_logs/sample/description", # compatibility
"polarization.front": "DAS_logs/frontPolarization/direction",
"polarization.back": "DAS_logs/backPolarization/direction",
"run.filename": "DAS_logs/trajectoryData/fileName",
"run.filePrefix": "DAS_logs/trajectoryData/filePrefix",
"run.experimentScanID": "DAS_logs/trajectory/experimentScanID",
"run.instrumentScanID": "DAS_logs/trajectory/instrumentScanID",
"run.detcnt": "control/detector_counts",
"run.rtime": "control/count_time",
"run.moncnt": "control/monitor_counts",
"run.atten": "instrument/attenuator/index",
"analysis.groupid": "DAS_logs/trajectoryData/groupid",
"run.configuration": "DAS_logs/configuration/key",
"sample.thk": "DAS_logs/sample/thickness",
"adam.voltage": "DAS_logs/adam4021/voltage",
"sample.temp": "DAS_logs/temp/primaryNode/average_value",
"resolution.ap1": "DAS_logs/geometry/sourceAperture",
"resolution.ap2": "instrument/sample_aperture/size",
"resolution.ap12dis": "instrument/source_aperture/distance",
"sample.position": "instrument/sample_aperture/distance",
"rfflipperpowersupply.voltage": "DAS_logs/RFFlipperPowerSupply/actualVoltage/average_value",
"rfflipperpowersupply.frequency": "DAS_logs/RFFlipperPowerSupply/frequency",
"huberRotation.softPosition": "DAS_logs/huberRotation/softPosition",
"start_time":"start_time",
"end_time":"end_time",
}
unit_specifiers = {
"det.dis": "cm",
"det.pixelsizex": "cm",
"det.pixeloffsetx": "cm",
"det.pixelsizey": "cm",
"det.pixeloffsety": "cm",
"sample.thk": "cm",
"resolution.ap1": "cm",
"resolution.ap2": "cm",
"sample.thk": "cm"
}
def process_sourceAperture(field, units):
import numpy as np
def handler(v):
return np.float(v.split()[0])
handle_values = np.vectorize(handler)
value = handle_values(field.value)
units_from = ""
v0 = field.value[0].split()
if len(v0) > 1:
units_from = v0[1]
converter = unit.Converter(units_from)
return converter(value, units)
def data_as(field, units):
"""
Return value of field in the desired units.
"""
if field.name.split('/')[-1] == 'sourceAperture':
return process_sourceAperture(field, units)
else:
converter = unit.Converter(field.attrs.get('units', ''))
value = converter(field.value, units)
return value
def readSANSNexuz(input_file, file_obj=None):
"""
Load all entries from the NeXus file into sans data sets.
"""
datasets = []
file = hzf.File(input_file, file_obj)
for entryname, entry in file.items():
areaDetector = entry['data/areaDetector'].value
shape = areaDetector.shape
if len(shape) < 2 or len(shape) > 3:
raise ValueError("areaDetector data must have dimension 2 or 3")
return
if len(shape) == 2:
# add another dimension at the front
shape = (1,) + shape
areaDetector = areaDetector.reshape(shape)
for i in range(shape[0]):
metadata = {}
for mkey in metadata_lookup:
field = entry.get(metadata_lookup[mkey], None)
if field is not None:
if mkey in unit_specifiers:
field = data_as(field, unit_specifiers[mkey])
else:
field = field.value
if field.dtype.kind == 'f':
field = field.astype("float")
elif field.dtype.kind == 'i':
field = field.astype("int")
if len(field) == shape[0]:
metadata[mkey] = field[i]
else:
metadata[mkey] = field
else:
metadata[mkey] = field
metadata['entry'] = entryname
dataset = SansData(data=areaDetector[i].copy(), metadata=metadata)
datasets.append(dataset)
return datasets | /reductus-0.1b2-py3-none-any.whl/sansred/loader.py | 0.611382 | 0.433202 | loader.py | pypi |
import gzip
from collections import OrderedDict
import datetime
import numpy as np
import json
import io
import sys
IS_PY3 = sys.version_info[0] >= 3
def _b(s):
if IS_PY3:
return s.encode('utf-8')
else:
return s
from dataflow.lib import octave
class RawData(object):
def __init__(self, name, data):
histo_array = data.pop("histodata")
self.histodata = histo_array
#self.histodata = Uncertainty(histo_array, histo_array)
self.metadata = data
self.name = name
self.metadata["name"] = name
self.metadata["entry"] = "entry"
def todict(self):
return _toDictItem(self.metadata)
def get_plottable(self):
return {"entry": "entry", "type": "params", "params": {"name": self.name}}
def get_metadata(self):
return _toDictItem(self.metadata)
class EfTwoThetaData(object):
def __init__(self, name, data, ef=None, twotheta=None, metadata=None):
self.metadata = metadata
self.data = data
self.ef = ef
self.twotheta = twotheta
self.name = name
self.xaxis = {
"label": u"Ef [meV]",
"values": self.ef,
"min": self.ef.min(),
"max": self.ef.max(),
"dim": self.ef.shape[0]
}
self.yaxis = {
"label": "Detector Angle [degrees]",
"values": self.twotheta,
"min": self.twotheta.min(),
"max": self.twotheta.max(),
"dim": self.twotheta.shape[0]
}
def get_plottable(self):
output = {
"title": self.name,
"dims": {
"ymin": self.yaxis["min"],
"ymax": self.yaxis["max"],
"ydim": self.yaxis["dim"],
"xmin": self.xaxis["min"],
"xmax": self.xaxis["max"],
"xdim": self.xaxis["dim"],
"zmin": self.data.min(),
"zmax": self.data.max()
},
"type": "2d",
"xlabel": self.xaxis["label"],
"ylabel": self.yaxis["label"],
"z": [self.data.flatten().tolist()]
}
return output
def get_metadata(self):
return _toDictItem(self.metadata)
class EQData(object):
def __init__(self, name, data, metadata):
self.metadata = metadata
self.data = data
self.name = name
self.xaxis = {
"label": u"|Q| [A⁻¹]",
"values": np.linspace(metadata["Q_min"], metadata["Q_max"], data.shape[0]),
"min": metadata["Q_min"],
"max": metadata["Q_max"],
"dim": data.shape[0]
}
self.yaxis = {
"label": "Ei-Ef [meV]",
"values": np.linspace(-metadata["Ei"], metadata["Ei"], data.shape[1]),
"min": -metadata["Ei"],
"max": metadata["Ei"],
"dim": data.shape[1]
}
def get_plottable(self):
Ei = self.metadata['Ei']
Q_max = self.metadata['Q_max']
EQ_data = self.data
output = {
"title": self.name,
"dims": {
"ymin": self.yaxis["min"],
"ymax": self.yaxis["max"],
"ydim": self.yaxis["dim"],
"xmin": self.xaxis["min"],
"xmax": self.xaxis["max"],
"xdim": self.xaxis["dim"],
"zmin": self.data.min(),
"zmax": self.data.max()
},
"type": "2d",
"xlabel": self.xaxis["label"],
"ylabel": self.yaxis["label"],
"z": [self.data.flatten().tolist()]
}
return output
def get_metadata(self):
return _toDictItem(self.metadata)
class DCS1dData(object):
properties = ['x', 'v', 'dx', 'dv', 'xlabel', 'vlabel', 'xunits', 'vunits', 'metadata']
def __init__(self, x, v, dx=0, dv=0, xlabel="", vlabel="", xunits="", vunits="", metadata=None):
self.x = x
self.v = v
self.dx = dx
self.dv = dv
self.xlabel = xlabel
self.vlabel = vlabel
self.xunits = xunits
self.vunits = vunits
self.metadata = metadata if metadata is not None else {}
def to_dict(self):
props = dict([(p, getattr(self, p, None)) for p in self.properties])
return _toDictItem(props)
def get_plottable(self):
label = "%s: %s" % (self.metadata['name'], self.metadata['entry'])
xdata = self.x.tolist()
ydata = self.v.tolist()
yerr = self.dv.tolist()
data = [[x, y, {"yupper": y+dy, "ylower": y-dy, "xupper": x, "xlower": x}] for x,y,dy in zip(xdata, ydata, yerr)]
plottable = {
"type": "1d",
"title": self.metadata.get("name", "DCS 1d data"),
"options": {
"axes": {
"xaxis": {"label": self.xlabel},
"yaxis": {"label": self.vlabel}
},
"series": [{"label": label}]
},
"data": [data]
}
return plottable
def get_metadata(self):
return self.to_dict()
def export(self):
fid = io.BytesIO()
#fid.write(_b("# %s\n" % json.dumps(_toDictItem(self.metadata)).strip("{}")))
metadata = {"name": self.metadata["name"]}
fid.write(_b("# %s\n" % json.dumps(metadata).strip("{}")))
columns = {"columns": [self.xlabel, self.vlabel, "uncertainty", "resolution"]}
units = {"units": [self.xunits, self.vunits, self.vunits, self.xunits]}
fid.write(_b("# %s\n" % json.dumps(columns).strip("{}")))
fid.write(_b("# %s\n" % json.dumps(units).strip("{}")))
np.savetxt(fid, np.vstack([self.x, self.v, self.dv, self.dx]).T, fmt="%.10e")
fid.seek(0)
name = getattr(self, "name", "default_name")
entry = getattr(self.metadata, "entry", "default_entry")
return {"name": name, "entry": entry, "export_string": fid.read(), "file_suffix": ".dcs.dat"}
class Parameters(dict):
def get_metadata(self):
return self
def get_plottable(self):
return self
def readDCS(name, fid):
gzf = gzip.GzipFile(fileobj=fid)
data = octave.read_octave_binary(gzf)
return RawData(name, data)
def _toDictItem(obj):
if isinstance(obj, np.integer):
obj = int(obj)
elif isinstance(obj, np.floating):
obj = float(obj)
elif isinstance(obj, np.ndarray):
obj = obj.tolist()
elif isinstance(obj, datetime.datetime):
obj = [obj.year, obj.month, obj.day, obj.hour, obj.minute, obj.second]
elif isinstance(obj, list):
obj = [_toDictItem(a) for a in obj]
elif isinstance(obj, dict):
obj = dict([(k, _toDictItem(v)) for k, v in obj.items()])
return obj | /reductus-0.1b2-py3-none-any.whl/dcsred/dcsdata.py | 0.422028 | 0.301073 | dcsdata.py | pypi |
from __future__ import print_function
def processing_order(pairs, n=0):
"""
Order the work in a workflow.
Given a set of n items to evaluate numbered from zero through n-1,
and dependency pairs
:Parameters:
*pairs* : [(int, int), ...]
Pairwise dependencies amongst items.
*n* : int
Number of items, or 0 if we don't care about any item that is not
mentioned in the list of pairs
:Returns:
*order* : [int, ...]
Permutation which satisfies the partial order requirements.
"""
order = _dependencies(pairs)
if n:
if any(id >= n for id in order):
raise ValueError("Not all dependencies are in the set")
rest = set(range(n)) - set(order)
else:
rest = set(k for p in pairs for k in p) - set(order)
#print "order",order,"from",pairs
return order + list(rest)
def _dependencies(pairs):
#print "order_dependencies",pairs
emptyset = set()
order = []
# Break pairs into left set and right set
left, right = [set(s) for s in zip(*pairs)] if pairs != [] else ([], [])
while pairs != []:
#print "within",pairs
# Find which items only occur on the right
independent = right - left
if independent == emptyset:
cycleset = ", ".join(str(s) for s in left)
raise ValueError("Cyclic dependencies amongst %s" % cycleset)
# The possibly resolvable items are those that depend on the independents
dependent = set([a for a, b in pairs if b in independent])
pairs = [(a, b) for a, b in pairs if b not in independent]
if not pairs:
resolved = dependent
else:
left, right = [set(s) for s in zip(*pairs)]
resolved = dependent - left
#print "independent",independent,"dependent",dependent,"resolvable",resolved
order += resolved
#print "new order",order
order.reverse()
return order
# ========= Test code ========
def _check(msg, pairs, n):
"""
Verify that the list n contains the given items, and that the list
satisfies the partial ordering given by the pairs in partial order.
"""
order = processing_order(pairs, n=n)
if len(set(order)) != n:
raise RuntimeError("%s is missing items" % msg)
for lo, hi in pairs:
if order.index(lo) >= order.index(hi):
raise RuntimeError("%s expect %s before %s in %s for %s"
% (msg, lo, hi, order, pairs))
print("%s %s"%(msg, str(order)))
def test():
import numpy as np
# No dependencies
_check("test empty", [], 9)
# No chain dependencies
_check("test2", [(4, 1), (3, 2), (7, 6)], 9)
# Some chain dependencies
pairs = [(4, 0), (0, 1), (1, 2), (7, 0), (3, 5)]
_check("test1", pairs, 9)
_check("test1 numpy", np.array(pairs), 9)
# Cycle test
pairs = [(1, 4), (4, 3), (4, 5), (5, 1)]
try:
_ = processing_order(pairs, n=9)
except ValueError:
pass
else:
raise Exception("test3 expect ValueError exception for %s" % (pairs,))
# large test for gross speed check
A = np.random.randint(4000, size=(1000, 2))
A[:, 1] += 4000 # Avoid cycles
_check("test-large", A, 8000)
# depth tests
k = 200
A = np.array([range(0, k), range(1, k + 1)]).T
_check("depth-1", A, 201)
A = np.array([range(1, k + 1), range(0, k)]).T
_check("depth-2", A, 201)
if __name__ == "__main__":
test() | /reductus-0.1b2-py3-none-any.whl/dataflow/deps.py | 0.506103 | 0.618406 | deps.py | pypi |
from __future__ import print_function
import sys
import re
from contextlib import contextmanager
from docutils.core import publish_parts
from docutils.writers.html4css1 import HTMLTranslator
from docutils.nodes import SkipNode
def rst2html(rst, part="whole", math_output="html"):
r"""
Convert restructured text into simple html.
Valid *math_output* formats for formulas include:
- html
- mathml
- mathjax
See the `docutils documentation <http://docutils.sourceforge.net/docs/user/config.html#math-output>`_
for details.
The following *part* choices are available:
- whole: the entire html document
- html_body: document division with title and contents and footer
- body: contents only
There are other parts, but they don't make sense alone:
subtitle, version, encoding, html_prolog, header, meta,
html_title, title, stylesheet, html_subtitle, html_body,
body, head, body_suffix, fragment, docinfo, html_head,
head_prefix, body_prefix, footer, body_pre_docinfo, whole
"""
# Ick! mathjax doesn't work properly with math-output, and the
# others don't work properly with math_output!
if math_output == "mathjax":
settings = {"math_output": math_output}
else:
settings = {"math-output": math_output}
# math2html and mathml do not support \frac12
rst = replace_compact_fraction(rst)
# mathml, html do not support \tfrac
if math_output in ("mathml", "html"):
rst = rst.replace(r'\tfrac', r'\frac')
rst = replace_dollar(rst)
with suppress_html_errors():
parts = publish_parts(source=rst, writer_name='html',
settings_overrides=settings)
return parts[part]
@contextmanager
def suppress_html_errors():
r"""
Context manager for keeping error reports out of the generated HTML.
Within the context, system message nodes in the docutils parse tree
will be ignored. After the context, the usual behaviour will be restored.
"""
visit_system_message = HTMLTranslator.visit_system_message
HTMLTranslator.visit_system_message = _skip_node
yield None
HTMLTranslator.visit_system_message = visit_system_message
def _skip_node(self, node):
try:
doc = node.document.nameids.keys()[0]
print("error while processing line %s of %s rst" % (node.line+1, doc),
file=sys.stderr)
except Exception:
pass
raise SkipNode
_compact_fraction = re.compile(r"(\\[cdt]?frac)([0-9])([0-9])")
def replace_compact_fraction(content):
r"""
Convert \frac12 to \frac{1}{2} for broken latex parsers
"""
return _compact_fraction.sub(r"\1{\2}{\3}", content)
_dollar = re.compile(r"(?:^|(?<=\s|[(]))[$]([^\n]*?)(?<![\\])[$](?:$|(?=\s|[.,;)\\]))")
_notdollar = re.compile(r"\\[$]")
def replace_dollar(content):
r"""
Convert dollar signs to inline math markup in rst.
"""
content = _dollar.sub(r":math:`\1`", content)
content = _notdollar.sub("$", content)
return content
def test_dollar():
assert replace_dollar(u"no dollar") == u"no dollar"
assert replace_dollar(u"$only$") == u":math:`only`"
assert replace_dollar(u"$first$ is good") == u":math:`first` is good"
assert replace_dollar(u"so is $last$") == u"so is :math:`last`"
assert replace_dollar(u"and $mid$ too") == u"and :math:`mid` too"
assert replace_dollar(u"$first$, $mid$, $last$") == u":math:`first`, :math:`mid`, :math:`last`"
assert replace_dollar(u"dollar\$ escape") == u"dollar$ escape"
assert replace_dollar(u"dollar \$escape\$ too") == u"dollar $escape$ too"
assert replace_dollar(u"spaces $in the$ math") == u"spaces :math:`in the` math"
assert replace_dollar(u"emb\ $ed$\ ed") == u"emb\ :math:`ed`\ ed"
assert replace_dollar(u"$first$a") == u"$first$a"
assert replace_dollar(u"a$last$") == u"a$last$"
assert replace_dollar(u"$37") == u"$37"
assert replace_dollar(u"($37)") == u"($37)"
assert replace_dollar(u"$37 - $43") == u"$37 - $43"
assert replace_dollar(u"($37, $38)") == u"($37, $38)"
assert replace_dollar(u"a $mid$dle a") == u"a $mid$dle a"
assert replace_dollar(u"a ($in parens$) a") == u"a (:math:`in parens`) a"
assert replace_dollar(u"a (again $in parens$) a") == u"a (again :math:`in parens`) a"
if __name__ == "__main__":
test_dollar() | /reductus-0.1b2-py3-none-any.whl/dataflow/rst2html.py | 0.49707 | 0.291321 | rst2html.py | pypi |
from __future__ import print_function
import sys
import posixpath
import zipfile
import json
import numpy
IS_PY3 = sys.version_info[0] >= 3
if IS_PY3:
def bytes_to_str(s):
# type: (AnyStr) -> str
return s.decode('utf-8') if isinstance(s, bytes) else s # type: ignore
else: # python 2.x
def bytes_to_str(s):
# type: (AnyStr) -> str
return s
builtin_open = open
__version__ = "0.0.1"
class Node(object):
_attrs_filename = ".attrs"
def __init__(self, parent_node=None, path="/", nxclass=None, attrs=None):
self.root = self if parent_node is None else parent_node.root
self.readonly = self.root.readonly
if path.startswith("/"):
# absolute path
self.path = path
else:
# relative
self.path = posixpath.join(parent_node.path, path)
def makeAttrs(self):
attr_data = self.root.open(posixpath.join(self.path, self._attrs_filename), "r").read()
return json.loads(bytes_to_str(attr_data))
@property
def parent(self):
return self.root[posixpath.dirname(self.name)]
@property
def groups(self):
return dict([(gn, Group(self, gn)) for gn in self.groupnames])
@property
def groupnames(self):
return [x for x in self.root.listdir(self.path) if self.root.isdir(posixpath.join(self.path, x))]
@property
def name(self):
return self.path
def keys(self):
return [x for x in self.root.listdir(self.path) if not "." in x]
def values(self):
keys = self.keys()
return [self[k] for k in keys]
def items(self):
keys = self.keys()
return [(k, self[k]) for k in keys]
def __contains__(self, key):
return self.root.exists(posixpath.join(self.path, key))
def __getitem__(self, path):
""" get an item based only on its path.
Can assume that next-to-last segment is a group (dataset is lowest level)
"""
if path.startswith("/"):
# absolute path
full_path = path
else:
# relative
full_path = posixpath.join(self.path, path)
#os_path = posixpath.join(self.os_path, full_path.lstrip("/"))
if self.root.exists(full_path):
if self.root.isdir(full_path):
# it's a group
return Group(self, full_path)
elif self.root.exists(full_path + ".link"):
# it's a link
return makeSoftLink(self, full_path)
else:
# it's a field
return FieldFile(self, full_path)
else:
# the item doesn't exist
raise KeyError(path)
def get(self, path, default_value):
try:
value = self.__getitem__(path)
return value
except KeyError:
return default_value
def add_field(self, path, **kw):
FieldFile(self, path, **kw)
def add_group(self, path, nxclass, attrs={}):
Group(self, path, nxclass, attrs)
class File(Node):
def __init__(self, filename, file_obj=None):
self.readonly = True
Node.__init__(self, parent_node=None, path="/")
if file_obj is None:
file_obj = builtin_open(filename, mode='rb')
self.zipfile = zipfile.ZipFile(file_obj)
self.attrs = self.makeAttrs()
self.filename = filename
self.mode = "r"
def flush(self):
# might make this do writezip someday.
pass
def isdir(self, path):
""" abstraction for looking up paths:
should work for unpacked directories and packed zip archives """
path = path.lstrip("/")
if path == "":
return True # root path
else:
filenames = self.root.zipfile.namelist()
return (path.rstrip("/") + "/") in filenames
def listdir(self, path):
""" abstraction for looking up paths:
should work for unpacked directories and packed zip archives """
path = path.strip("/")
return [posixpath.basename(fn.rstrip("/"))
for fn in self.zipfile.namelist()
if posixpath.dirname(fn.rstrip("/")) == path]
def exists(self, path):
path = path.strip("/")
filenames = self.root.zipfile.namelist()
return path in filenames or self.isdir(path)
def read(self, path):
return self.open(path, "r").read()
def getsize(self, path):
path = path.lstrip("/")
return self.zipfile.getinfo(path).file_size
def open(self, path, mode):
path = path.lstrip("/")
return self.zipfile.open(path, "r")
def __repr__(self):
return "<HDZIP file \"%s\" (mode %s)>" % (self.filename, self.mode)
def close(self):
# there seems to be only one read-only mode
self.zipfile.close()
class Group(Node):
def __init__(self, node, path, nxclass="NXCollection", attrs={}):
Node.__init__(self, parent_node=node, path=path)
if path.startswith("/"):
# absolute path
self.path = path
else:
# relative
self.path = posixpath.join(node.path, path)
self.attrs = self.makeAttrs()
def __repr__(self):
return "<HDZIP group \"" + self.path + "\">"
class FieldFile(object):
_formats = {
'S': '%s',
'f': '%.8g',
'i': '%d',
'u': '%d',
'b': '%d'}
_attrs_suffix = ".attrs"
def __init__(self, node, path, **kw):
self.root = node.root
if not path.startswith("/"):
# relative path:
path = posixpath.join(node.path, path)
self.path = path
attr_data = self.root.open(self.path + self._attrs_suffix, "r").read()
self.attrs = json.loads(bytes_to_str(attr_data))
self._value = None
def __repr__(self):
return ("<HDZIP field \"%s\" %s \"%s\">"
% (self.name, str(self.attrs['shape']), self.attrs['dtype']))
def __getitem__(self, slice_def):
return self.value.__getitem__(slice_def)
def __setitem__(self, slice_def, newvalue):
raise Exception("read only")
# promote a few attrs items to python object attributes:
@property
def shape(self):
return self.attrs.get('shape', None)
@property
def dtype(self):
return self.attrs.get('dtype', None)
@property
def name(self):
return self.path
@property
def parent(self):
return self.root[posixpath.dirname(self.name)]
@property
def value(self):
if self._value is None:
attrs = self.attrs
target = self.path
try:
infile = self.root.open(target, 'rb')
dtype_str = str(attrs['format'])
# CRUFT: <l4, <d8 are not sensible dtypes
if dtype_str == '<l4': dtype_str = '<i4'
if dtype_str == '<l8': dtype_str = '<i8'
if dtype_str == '<d8': dtype_str = '<f8'
if IS_PY3: dtype_str = dtype_str.replace('S', 'U')
dtype = numpy.dtype(dtype_str)
if attrs.get('binary', False) == True:
s = infile.read()
d = numpy.frombuffer(s, dtype=dtype).copy()
elif self.root.getsize(target) == 1:
# empty entry: only contains \n
# this is only possible with empty string being written.
d = numpy.array([''], dtype=dtype)
elif dtype.kind == 'S':
data = [[v for v in line[:-1].split(b'\t')]
for line in infile]
d = numpy.squeeze(numpy.array(data))
d = _unescape_str(d)
elif dtype.kind == 'U':
data = [[v.decode('utf-8') for v in line[:-1].split(b'\t')]
for line in infile]
d = numpy.squeeze(numpy.array(data))
d = _unescape_str(d)
else:
d = numpy.loadtxt(infile, dtype=dtype, delimiter='\t')
finally:
infile.close()
if 'shape' in attrs:
try:
d = d.reshape(attrs['shape'])
except Exception:
# literally do nothing. Should be logging this.
pass
self._value = d
return self._value
def _unescape_str(data):
if data.size:
# Hide the \\ in \1 so that it doesn't get processed twice. At the
# end, convert it back to \\. This allows us to support \\t without
# it turning into a tab character or a backslash tab sequence.
# Don't convert empty arrays since the change type
data = numpy.char.replace(data, '\\\\', '\1')
data = numpy.char.replace(data, '\\t', '\t')
data = numpy.char.replace(data, '\\r', '\r')
data = numpy.char.replace(data, '\\n', '\n')
data = numpy.char.replace(data, '\1', '\\')
return data
def makeSoftLink(parent, path):
orig_attrs_path = path.lstrip("/") + ".link"
attr_data = parent.root.open(orig_attrs_path, 'r').read()
linkinfo = json.loads(bytes_to_str(attr_data))
target = linkinfo['target']
target_obj = parent.root[target]
target_obj.orig_path = path
return target_obj
#compatibility with h5nexus:
group = Group
field = FieldFile
open = File | /reductus-0.1b2-py3-none-any.whl/dataflow/lib/hzf_readonly_stripped.py | 0.437583 | 0.150153 | hzf_readonly_stripped.py | pypi |
from uncertainties import ufloat
from uncertainties.unumpy import \
uarray, umatrix, nominal_values as uval, std_devs as udev
import numpy as np
def interp_err(x, xp, fp, dfp, left=None, right=None):
"""
Linear interpolation of x into points (xk,yk +/- dyk).
xp is assumed to be in ascending order.
left is the uncertainty value to return for points before the range of xp,
or None for the initial value, fp[0].
right is the uncertainty value to return for points after the range of xp,
or None for the final value, fp[-1].
"""
is_scalar_x = np.isscalar(x)
if is_scalar_x:
fp = ufloat(fp, dfp)
f = interp([x], xp, fp, left, right)[0]
return f.n, f.s
else:
fp = uarray(fp, dfp)
f = interp(x, xp, fp, left, right)
return uval(f), udev(f)
def format_err(x, dx):
"""
Format number with uncertainty.
"""
return ufloat(x, dx).format('gS')
def solve_err(A, dA, b, db):
A = umatrix(A, dA)
b = umatrix(b, db).T
x = A.I*b
return uval(x), udev(x)
def interp(x, xp, fp, left=None, right=None):
"""
1-D interpolation of *x* into *(xp,fp)*.
*xp* must be an increasing vector. *x* can be scalar or vector.
If *x* is beyond the range of *xp*, returns *left/right*, or the value of
*fp* at the end points if *left/right* is not defined.
Implemented in pure python so *fp* can be an extended numeric type such
as complex or value+uncertainty.
"""
is_scalar_x = np.isscalar(x)
if len(xp) == 1:
f = fp[np.zeros_like(x, dtype='i')]
else:
xp = np.asarray(xp)
if np.any(np.diff(xp) < 0.):
raise ValueError("interp needs a sorted list")
if not is_scalar_x:
x = np.asarray(x)
idx = np.searchsorted(xp[1:-1], x)
# Support repeated values in Xp, which will lead to 0/0 errors if the
# interpolated point is one of the repeated values.
p = (xp[idx+1]-x)/(xp[idx+1]-xp[idx])
f = p*fp[idx] + (1-p)*fp[idx+1]
if is_scalar_x:
if x < xp[0]:
return left if left is not None else fp[0]
elif x > xp[-1]:
return right if right is not None else fp[-1]
else:
return f
else:
f[x < xp[0]] = left if left is not None else fp[0]
f[x > xp[-1]] = right if right is not None else fp[-1]
return f | /reductus-0.1b2-py3-none-any.whl/dataflow/lib/errutil.py | 0.762336 | 0.681621 | errutil.py | pypi |
from __future__ import print_function
import sys
from collections import OrderedDict
import numpy as np
if sys.version_info[0] > 2:
def tostr(s):
return s.decode('utf8')
def decode(s, encoding='utf8'):
return s.decode(encoding)
STR_ENCODING = 'utf8'
else:
def tostr(s):
return s
def decode(s, encoding='utf8'):
return unicode(s, encoding)
STR_ENCODING = None
DATA_TYPES = {
1: "scalar",
2: "matrix",
3: "complex scalar",
4: "complex matrix",
5: "old_string",
6: "range",
7: "string",
}
TYPE_CODES = {
0: "u1",
1: "u2",
2: "u4",
3: "i1",
4: "i2",
5: "i4",
6: "f4",
7: "f8",
8: "u8",
9: "i8",
}
DTYPES = {k: np.dtype(v) for k, v in TYPE_CODES.items()}
def loadoct(fd, encoding=STR_ENCODING):
"""
Read an octave binary file from the file handle fd, returning
an array of structures. If encoding is not None then convert
strings from bytes to unicode. Default is STR_ENCODING, which
is utf8 for python 3 and None for python 2, yielding arrays
of type str in each dialect.
"""
magic = fd.read(10)
assert(magic == b"Octave-1-L" or magic == b"Octave-1-B")
endian = "<" if magic[-1:] == b"L" else ">"
# Float type is 0: IEEE-LE, 1: IEEE-BE, 2: VAX-D, 3: VAX-G, 4: Cray
# Not used since Octave assumes IEEE format floats.
_float_format = fd.read(1)
len_dtype = np.dtype(endian + "i4")
def read_len():
len_bytes = fd.read(4)
if not len_bytes:
return None
return np.frombuffer(len_bytes, len_dtype)[0]
table = OrderedDict()
while True:
name_length = read_len()
if name_length is None: # EOF
break
name = tostr(fd.read(name_length))
doc_length = read_len()
doc = tostr(fd.read(doc_length)) if doc_length else ''
is_global = bool(ord(fd.read(1)))
data_type = ord(fd.read(1))
if data_type == 255:
type_str = tostr(fd.read(read_len()))
else:
type_str = DATA_TYPES[data_type]
#print("reading", name, type_str)
if type_str.endswith("scalar"):
if type_str == "scalar":
dtype = DTYPES[ord(fd.read(1))]
elif type_str == "complex scalar":
_ = fd.read(1)
dtype = np.dtype('complex128')
elif type_str == "float complex scalar":
_ = fd.read(1)
dtype = np.dtype('complex64')
else:
dtype = np.dtype(type_str[:-7])
dtype = dtype.newbyteorder(endian)
data = np.frombuffer(fd.read(dtype.itemsize), dtype)
table[name] = data[0]
elif type_str.endswith("matrix"):
ndims = read_len()
if ndims < 0:
ndims = -ndims
dims = np.frombuffer(fd.read(4*ndims), len_dtype)
else:
dims = (ndims, read_len())
count = np.prod(dims)
if type_str == "matrix":
dtype = DTYPES[ord(fd.read(1))]
elif type_str == "complex matrix":
_ = fd.read(1)
dtype = np.dtype('complex128')
elif type_str == "float complex matrix":
_ = fd.read(1)
dtype = np.dtype('complex64')
else:
dtype = np.dtype(type_str[:-7])
dtype = dtype.newbyteorder(endian)
data = np.frombuffer(fd.read(count*dtype.itemsize), dtype)
# Note: Use data.copy() to make a modifiable array.
table[name] = data.reshape(dims, order='F')
elif type_str == "old_string":
data = fd.read(read_len())
if encoding is not None:
data = decode(data, encoding)
table[name] = data
elif type_str in ("string", "sq_string"):
nrows = read_len()
if nrows < 0:
ndims = -nrows
dims = np.frombuffer(fd.read(4*ndims), len_dtype)
count = np.prod(dims)
fortran_order = np.frombuffer(fd.read(count), dtype='uint8')
c_order = np.ascontiguousarray(fortran_order.reshape(dims, order='F'))
data = c_order.view(dtype='|S'+str(dims[-1]))
if encoding is not None:
data = np.array([decode(s, encoding) for s in data.flat])
table[name] = data.reshape(dims[:-1])
else:
data = [fd.read(read_len()) for _ in range(nrows)]
if encoding is not None:
data = [decode(s, encoding) for s in data]
table[name] = np.array(data)
else:
raise NotImplementedError("unknown octave type "+type_str)
#print("read %s:%s"%(name, type_str), table[name])
return table
read_octave_binary = loadoct # CRUFT: deprecated name
def _dump(filename, encoding=STR_ENCODING):
import gzip
if filename.endswith('.gz'):
with gzip.open(filename, 'rb') as fd:
table = loadoct(fd, encoding)
else:
with open(filename, 'rb') as fd:
table = loadoct(fd, encoding)
for k, v in table.items():
print(k, v)
if __name__ == "__main__":
#_dump(sys.argv[1], encoding='utf8') # unicode
#_dump(sys.argv[1], encoding=None) # bytes
_dump(sys.argv[1]) # str, encoding=STR_ENCODING | /reductus-0.1b2-py3-none-any.whl/dataflow/lib/octave.py | 0.400984 | 0.319891 | octave.py | pypi |
import numpy as np
from uncertainties.unumpy import uarray, nominal_values as uval, std_devs as udev
from dataflow.lib import err1d
from .deadtime_fit import deadtime_from_counts, estimate_incident
class DeadTimeData(object):
def __init__(self, datasets, tau_NP, tau_P,
attenuators, rates, index):
self.messages = []
self.warnings = []
self.datasets = datasets
self.tau_NP = tau_NP
self.tau_P = tau_P
self.attenuators = attenuators
self.incident_beam = rates
self.index = index
def plot(self):
raise NotImplementedError("see deadtime_fit for plots we want")
def get_metadata(self):
metadata = {
"tau_P": self.tau_P,
"tau_NP": self.tau_NP,
"attenuators": self.attenuators,
}
return metadata
def get_plottable(self):
return {"params": self.get_metadata(), "type": "params"}
def log(self, msg):
self.messages.append(msg)
def fit_dead_time(datasets, source='detector', mode='auto'):
time = [data.monitor.count_time for data in datasets]
if source == 'monitor':
counts = [data.monitor.counts for data in datasets]
elif source == 'detector':
counts = [data.detector.counts for data in datasets]
else:
raise ValueError("Source should be detector or monitor")
data = datasets[-1]
index = ~data.mask if data.mask is not None else slice(None, None)
pairs = [(c[index], t[index]) for c, t in zip(counts, time)]
res = deadtime_from_counts(pairs, mode=mode)
tau_NP, tau_P, attenuators, rates = res
attenuators = 1.0/uarray(*attenuators)
attenuators = list(zip(uval(attenuators), udev(attenuators)))
dead_time = DeadTimeData(datasets, tau_NP, tau_P, attenuators, rates, index)
return dead_time
def apply_monitor_dead_time(data, tau_NP=0.0, tau_P=0.0):
m = data.monitor.counts
t = data.monitor.count_time
dm = np.sqrt(data.monitor.counts_variance
if data.monitor.counts_variance is not None else data.monitor.counts)
I, dI = estimate_incident((m/t, dm/t),
tau_NP=[tau_NP, 0], tau_P=[tau_P, 0])
data.monitor.counts, data.monitor.counts_variance = (I*t), (dI*t)**2
def apply_detector_dead_time(data, tau_NP=0.0, tau_P=0.0):
m = data.detector.counts
t = data.monitor.count_time
dm = np.sqrt(data.detector.counts_variance
if data.detector.counts_variance is not None else data.detector.counts)
I, dI = estimate_incident((m/t, dm/t),
tau_NP=[tau_NP, 0], tau_P=[tau_P, 0])
data.detector.counts, data.detector.counts_variance = (I*t), (dI*t)**2
def saturation_correction(counts, time, saturation):
# type: (np.ndarray, np.ndarray, np.ndarray) -> Tuple[np.ndarray, np.ndarray, np.ndarray]
rate = counts / time
# TODO: assert that saturation is sorted by the first value
if saturation is None:
C, varC = counts, counts
mask = (rate >= 0.)
elif saturation.shape[0] == 3:
E, varE = err1d.interp(rate, saturation[0], saturation[1],
saturation[2]**2)
C, varC = err1d.div(counts, counts, E, varE)
mask = (rate <= saturation[0, -1])
else:
E = np.interp(rate, saturation[0], saturation[1])
C, varC = counts/E, counts/E**2
mask = (rate <= saturation[0, -1])
return C, varC, mask
def apply_monitor_saturation(data):
m = data.monitor.counts
t = data.monitor.count_time
I, varI, mask = saturation_correction(m, t, data.monitor.saturation)
data.monitor.counts, data.monitor.counts_variance = I, varI
# If any points exceed the saturation calibration, mask them.
if not mask.all():
data.mask = mask & (True if data.mask is None else data.mask)
def apply_detector_saturation(data):
t = data.monitor.count_time
m = data.detector.counts
I, varI, mask = saturation_correction(m, t, data.detector.saturation)
data.detector.counts, data.detector.counts_variance = I, varI
# If any points exceed the saturation calibration, mask them.
if not mask.all():
data.mask = mask & (True if data.mask is None else data.mask) | /reductus-0.1b2-py3-none-any.whl/reflred/deadtime.py | 0.651133 | 0.527742 | deadtime.py | pypi |
import numpy as np
from dataflow.lib import err1d
def apply_rescale(data, scale, dscale):
I, varI = err1d.mul(data.v, data.dv**2, scale, dscale**2)
data.v, data.dv = I, np.sqrt(varI)
def apply_intensity_norm(data, base):
assert data.normbase == base.normbase, "can't mix time and monitor normalized data"
S, varS = err1d.interp(data.angular_resolution,
base.angular_resolution, base.v, base.dv**2)
I, varI = err1d.div(data.v, data.dv**2, S, varS)
data.v, data.dv = I, np.sqrt(varI)
def calculate_number(data, base, time_uncertainty=1e-6):
""" returns the measured base flux * count time for each point """
assert base.normbase == 'time', "can't calculate time-integrated flux from monitor-normalized base"
S, varS = err1d.interp(data.angular_resolution,
base.angular_resolution, base.v, base.dv**2)
F, varF = err1d.mul(data.monitor.count_time, time_uncertainty**2, S, varS)
return F, varF
def estimate_attenuation(datasets):
raise NotImplementedError()
index = np.sort([d.angular_resolution[0] for d in datasets])
NORMALIZE_OPTIONS = 'auto|monitor|time|power|none'
def apply_norm(data, base='auto'):
if base == 'auto':
if (data.monitor.counts > 0).all():
base = 'monitor'
elif (data.monitor.count_time > 0).all():
base = 'time'
elif data.monitor.source_power is not None:
base = 'power'
else:
base = 'none'
C = data.detector.counts
varC = data.detector.counts_variance
if base == 'monitor':
assert (data.monitor.counts > 0).all(), "monitor counts are zero; can't normalize by monitor"
M = data.monitor.counts
varM = data.monitor.counts_variance
varM += (varM == 0) # variance on zero counts is +/- 1
units = 'monitor'
elif base == 'time':
assert (data.monitor.count_time > 0).all(), "count time is zero; can't normalize by time"
M = data.monitor.count_time
# Uniform distribution has variance of interval/12
varM = data.monitor.time_step/12.
units = 'second'
elif base == 'power':
assert data.monitor.source_power is not None, "source power is unknown; can't normalize by power"
M = data.monitor.source_power
varM = 0
units = data.monitor.source_power_units
elif base == 'none':
M = 1
varM = 0
units = ''
else:
raise ValueError("Expected %r in %s" % (base, NORMALIZE_OPTIONS))
#print "norm",C,varC,M,varM
value, variance = err1d.div(C, varC+(varC == 0), M, varM)
data.v = value
data.dv = np.sqrt(variance)
data.vunits = 'counts per '+units if units else 'counts'
data.vlabel = 'Intensity'
data.normbase = base
# TODO: set data scale to a nice number
#data.scale = nice_number(M[0]) | /reductus-0.1b2-py3-none-any.whl/reflred/scale.py | 0.49585 | 0.53783 | scale.py | pypi |
<img src="docs/source/_static/redun.svg" width="200"/>
*yet another redundant workflow engine*
**redun** aims to be a more expressive and efficient workflow framework, built on top of the popular Python programming language. It takes the somewhat contrarian view that writing dataflows directly is unnecessarily restrictive, and by doing so we lose abstractions we have come to rely on in most modern high-level languages (control flow, composability, recursion, high order functions, etc). redun's key insight is that workflows can be expressed as [lazy expressions](#whats-the-trick), which are then evaluated by a scheduler that performs automatic parallelization, caching, and data provenance logging.
redun's key features are:
- Workflows are defined by lazy expressions that when evaluated emit dynamic directed acyclic graphs (DAGs), enabling complex data flows.
- Incremental computation that is reactive to both data changes as well as code changes.
- Workflow tasks can be executed on a variety of compute backend (threads, processes, AWS batch jobs, Spark jobs, etc).
- Data changes are detected for in-memory values as well as external data sources such as files and object stores using file hashing.
- Code changes are detected by hashing individual Python functions and comparing them against historical call graph recordings.
- Past intermediate results are cached centrally and reused across workflows.
- Past call graphs can be used as a data lineage record and can be queried for debugging and auditing.
To learn more, see our [Medium](https://insitro.medium.com/when-data-science-goes-with-the-flow-insitro-introduces-redun-8b06b707a14b) and [AWS HPC](https://aws.amazon.com/blogs/hpc/data-science-workflows-at-insitro-using-redun-on-aws-batch/) blog posts, as well as our [documentation](https://insitro.github.io/redun/design.html), [tutorial](examples/README.md), and [influences](https://insitro.github.io/redun/design.html#influences).
*About the name:* The name "redun" is self-deprecating (there are [A LOT](https://github.com/pditommaso/awesome-pipeline) of workflow engines), but it is also a reference to its original inspiration, the [redo](https://apenwarr.ca/log/20101214) build system.
## Install
```sh
pip install redun
```
See [developing](https://insitro.github.io/redun/developing.html) for more information on working with the code.
### Postgres backend
To use postgres as a recording backend, use
```sh
pip install redun[postgres]
```
The above assumes the following dependencies are installed:
* `pg_config` (in the `postgresql-devel` package; on ubuntu: `apt-get install libpq-dev`)
* `gcc` (on ubuntu or similar `sudo apt-get install gcc`)
### Optional Visualization
To generate graphviz images and dot files, use
```sh
pip install redun[viz]
```
The above assumes the following dependencies are installed:
* `graphviz` (on ubuntu: `apt-get install graphviz graphviz-dev`, via homebrew: `brew install graphviz`)
* `gcc` (on ubuntu or similar `sudo apt-get install gcc`)
## Use cases
redun's general approach to defining workflows makes it a good choice for implementing workflows for a wide-variety of use cases:
- [Bioinformatics](examples/06_bioinfo_batch/)
- [Cheminformatics](examples/aws_glue/rdkit_workflow.py)
- [Web or API data extraction](examples/scraping/)
- [General data science](examples/word_count/)
- [And much more](examples/)
## Small taste
Here is a quick example of using redun for a familiar workflow, compiling a C program ([full example](examples/02_compile/README.md)). In general, any kind of data processing could be done within each task (e.g. reading and writing CSVs, DataFrames, databases, APIs).
```py
# make.py
import os
from typing import Dict, List
from redun import task, File
redun_namespace = "redun.examples.compile"
@task()
def compile(c_file: File) -> File:
"""
Compile one C file into an object file.
"""
os.system(f"gcc -c {c_file.path}")
return File(c_file.path.replace(".c", ".o"))
@task()
def link(prog_path: str, o_files: List[File]) -> File:
"""
Link several object files together into one program.
"""
o_files=" ".join(o_file.path for o_file in o_files)
os.system(f"gcc -o {prog_path} {o_files}")
return File(prog_path)
@task()
def make_prog(prog_path: str, c_files: List[File]) -> File:
"""
Compile one program from its source C files.
"""
o_files = [
compile(c_file)
for c_file in c_files
]
prog_file = link(prog_path, o_files)
return prog_file
# Definition of programs and their source C files.
files = {
"prog": [
File("prog.c"),
File("lib.c"),
],
"prog2": [
File("prog2.c"),
File("lib.c"),
],
}
@task()
def make(files : Dict[str, List[File]] = files) -> List[File]:
"""
Top-level task for compiling all the programs in the project.
"""
progs = [
make_prog(prog_path, c_files)
for prog_path, c_files in files.items()
]
return progs
```
Notice, that besides the `@task` decorator, the code follows typical Python conventions and is organized like a sequential program.
We can run the workflow using the `redun run` command:
```
redun run make.py make
[redun] redun :: version 0.4.15
[redun] config dir: /Users/rasmus/projects/redun/examples/compile/.redun
[redun] Upgrading db from version -1.0 to 2.0...
[redun] Start Execution 69c40fe5-c081-4ca6-b232-e56a0a679d42: redun run make.py make
[redun] Run Job 72bdb973: redun.examples.compile.make(files={'prog': [File(path=prog.c, hash=dfa3aba7), File(path=lib.c, hash=a2e6cbd9)], 'prog2': [File(path=prog2.c, hash=c748e4c7), File(path=lib.c, hash=a2e6cbd9)]}) on default
[redun] Run Job 096be12b: redun.examples.compile.make_prog(prog_path='prog', c_files=[File(path=prog.c, hash=dfa3aba7), File(path=lib.c, hash=a2e6cbd9)]) on default
[redun] Run Job 32ed5cf8: redun.examples.compile.make_prog(prog_path='prog2', c_files=[File(path=prog2.c, hash=c748e4c7), File(path=lib.c, hash=a2e6cbd9)]) on default
[redun] Run Job dfdd2ee2: redun.examples.compile.compile(c_file=File(path=prog.c, hash=dfa3aba7)) on default
[redun] Run Job 225f924d: redun.examples.compile.compile(c_file=File(path=lib.c, hash=a2e6cbd9)) on default
[redun] Run Job 3f9ea7ae: redun.examples.compile.compile(c_file=File(path=prog2.c, hash=c748e4c7)) on default
[redun] Run Job a8b21ec0: redun.examples.compile.link(prog_path='prog', o_files=[File(path=prog.o, hash=4934098e), File(path=lib.o, hash=7caa7f9c)]) on default
[redun] Run Job 5707a358: redun.examples.compile.link(prog_path='prog2', o_files=[File(path=prog2.o, hash=cd0b6b7e), File(path=lib.o, hash=7caa7f9c)]) on default
[redun]
[redun] | JOB STATUS 2021/06/18 10:34:29
[redun] | TASK PENDING RUNNING FAILED CACHED DONE TOTAL
[redun] |
[redun] | ALL 0 0 0 0 8 8
[redun] | redun.examples.compile.compile 0 0 0 0 3 3
[redun] | redun.examples.compile.link 0 0 0 0 2 2
[redun] | redun.examples.compile.make 0 0 0 0 1 1
[redun] | redun.examples.compile.make_prog 0 0 0 0 2 2
[redun]
[File(path=prog, hash=a8d14a5e), File(path=prog2, hash=04bfff2f)]
```
This should have taken three C source files (`lib.c`, `prog.c`, and `prog2.c`), compiled them to three object files (`lib.o`, `prog.o`, `prog2.o`), and then linked them into two binaries (`prog` and `prog2`). Specifically, redun automatically determined the following dataflow DAG and performed the compiling and linking steps in separate threads:
<p align="center">
<img width="400" src="examples/02_compile/images/compile-dag.svg">
</p>
Using the `redun log` command, we can see the full job tree of the most recent execution (denoted `-`):
```
redun log -
Exec 69c40fe5-c081-4ca6-b232-e56a0a679d42 [ DONE ] 2021-06-18 10:34:28: run make.py make
Duration: 0:00:01.47
Jobs: 8 (DONE: 8, CACHED: 0, FAILED: 0)
--------------------------------------------------------------------------------
Job 72bdb973 [ DONE ] 2021-06-18 10:34:28: redun.examples.compile.make(files={'prog': [File(path=prog.c, hash=dfa3aba7), File(path=lib.c, hash=a2e6cbd9)], 'prog2': [File(path=prog2.c, hash=c748e4c7), Fil
Job 096be12b [ DONE ] 2021-06-18 10:34:28: redun.examples.compile.make_prog('prog', [File(path=prog.c, hash=dfa3aba7), File(path=lib.c, hash=a2e6cbd9)])
Job dfdd2ee2 [ DONE ] 2021-06-18 10:34:28: redun.examples.compile.compile(File(path=prog.c, hash=dfa3aba7))
Job 225f924d [ DONE ] 2021-06-18 10:34:28: redun.examples.compile.compile(File(path=lib.c, hash=a2e6cbd9))
Job a8b21ec0 [ DONE ] 2021-06-18 10:34:28: redun.examples.compile.link('prog', [File(path=prog.o, hash=4934098e), File(path=lib.o, hash=7caa7f9c)])
Job 32ed5cf8 [ DONE ] 2021-06-18 10:34:28: redun.examples.compile.make_prog('prog2', [File(path=prog2.c, hash=c748e4c7), File(path=lib.c, hash=a2e6cbd9)])
Job 3f9ea7ae [ DONE ] 2021-06-18 10:34:28: redun.examples.compile.compile(File(path=prog2.c, hash=c748e4c7))
Job 5707a358 [ DONE ] 2021-06-18 10:34:29: redun.examples.compile.link('prog2', [File(path=prog2.o, hash=cd0b6b7e), File(path=lib.o, hash=7caa7f9c)])
```
Notice, redun automatically detected that `lib.c` only needed to be compiled once and that its result can be reused (a form of [common subexpression elimination](https://en.wikipedia.org/wiki/Common_subexpression_elimination)).
Using the `--file` option, we can see all files (or URLs) that were read, `r`, or written, `w`, by the workflow:
```
redun log --file
File 2b6a7ce0 2021-06-18 11:41:42 r lib.c
File d90885ad 2021-06-18 11:41:42 rw lib.o
File 2f43c23c 2021-06-18 11:41:42 w prog
File dfa3aba7 2021-06-18 10:34:28 r prog.c
File 4934098e 2021-06-18 10:34:28 rw prog.o
File b4537ad7 2021-06-18 11:41:42 w prog2
File c748e4c7 2021-06-18 10:34:28 r prog2.c
File cd0b6b7e 2021-06-18 10:34:28 rw prog2.o
```
We can also look at the provenance of a single file, such as the binary `prog`:
```
redun log prog
File 2f43c23c 2021-06-18 11:41:42 w prog
Produced by Job a8b21ec0
Job a8b21ec0-e60b-4486-bcf4-4422be265608 [ DONE ] 2021-06-18 11:41:42: redun.examples.compile.link('prog', [File(path=prog.o, hash=4934098e), File(path=lib.o, hash=d90885ad)])
Traceback: Exec 4a2b624d > (1 Job) > Job 2f8b4b5f make_prog > Job a8b21ec0 link
Duration: 0:00:00.24
CallNode 6c56c8d472dc1d07cfd2634893043130b401dc84 redun.examples.compile.link
Args: 'prog', [File(path=prog.o, hash=4934098e), File(path=lib.o, hash=d90885ad)]
Result: File(path=prog, hash=2f43c23c)
Task a20ef6dc2ab4ed89869514707f94fe18c15f8f66 redun.examples.compile.link
def link(prog_path: str, o_files: List[File]) -> File:
"""
Link several object files together into one program.
"""
o_files=" ".join(o_file.path for o_file in o_files)
os.system(f"gcc -o {prog_path} {o_files}")
return File(prog_path)
Upstream dataflow:
result = File(path=prog, hash=2f43c23c)
result <-- <6c56c8d4> link(prog_path, o_files)
prog_path = <ee510692> 'prog'
o_files = <f1eaf150> [File(path=prog.o, hash=4934098e), File(path=lib.o, hash=d90885ad)]
prog_path <-- argument of <a4ac4959> make_prog(prog_path, c_files)
<-- origin
o_files <-- derives from
compile_result = <d90885ad> File(path=lib.o, hash=d90885ad)
compile_result_2 = <4934098e> File(path=prog.o, hash=4934098e)
compile_result <-- <45054a8f> compile(c_file)
c_file = <2b6a7ce0> File(path=lib.c, hash=2b6a7ce0)
c_file <-- argument of <a4ac4959> make_prog(prog_path, c_files)
<-- argument of <a9a6af53> make(files)
<-- origin
compile_result_2 <-- <8d85cebc> compile(c_file_2)
c_file_2 = <dfa3aba7> File(path=prog.c, hash=dfa3aba7)
c_file_2 <-- argument of <74cceb4e> make_prog(prog_path, c_files)
<-- argument of <45400ab5> make(files)
<-- origin
```
This output shows the original `link` task source code responsible for creating the program `prog`, as well as the full derivation, denoted "upstream dataflow". See the full example for a [deeper explanation](examples/02_compile#data-provenance-for-files) of this output. To understand more about the data structure that powers these kinds of queries, see [call graphs](https://insitro.github.io/redun/design.html#call-graphs).
We can change one of the input files, such as `lib.c`, and rerun the workflow. Due to redun's automatic incremental compute, only the minimal tasks are rerun:
```
redun run make.py make
[redun] redun :: version 0.4.15
[redun] config dir: /Users/rasmus/projects/redun/examples/compile/.redun
[redun] Start Execution 4a2b624d-b6c7-41cb-acca-ec440c2434db: redun run make.py make
[redun] Run Job 84d14769: redun.examples.compile.make(files={'prog': [File(path=prog.c, hash=dfa3aba7), File(path=lib.c, hash=2b6a7ce0)], 'prog2': [File(path=prog2.c, hash=c748e4c7), File(path=lib.c, hash=2b6a7ce0)]}) on default
[redun] Run Job 2f8b4b5f: redun.examples.compile.make_prog(prog_path='prog', c_files=[File(path=prog.c, hash=dfa3aba7), File(path=lib.c, hash=2b6a7ce0)]) on default
[redun] Run Job 4ae4eaf6: redun.examples.compile.make_prog(prog_path='prog2', c_files=[File(path=prog2.c, hash=c748e4c7), File(path=lib.c, hash=2b6a7ce0)]) on default
[redun] Cached Job 049a0006: redun.examples.compile.compile(c_file=File(path=prog.c, hash=dfa3aba7)) (eval_hash=434cbbfe)
[redun] Run Job 0f8df953: redun.examples.compile.compile(c_file=File(path=lib.c, hash=2b6a7ce0)) on default
[redun] Cached Job 98d24081: redun.examples.compile.compile(c_file=File(path=prog2.c, hash=c748e4c7)) (eval_hash=96ab0a2b)
[redun] Run Job 8c95f048: redun.examples.compile.link(prog_path='prog', o_files=[File(path=prog.o, hash=4934098e), File(path=lib.o, hash=d90885ad)]) on default
[redun] Run Job 9006bd19: redun.examples.compile.link(prog_path='prog2', o_files=[File(path=prog2.o, hash=cd0b6b7e), File(path=lib.o, hash=d90885ad)]) on default
[redun]
[redun] | JOB STATUS 2021/06/18 11:41:43
[redun] | TASK PENDING RUNNING FAILED CACHED DONE TOTAL
[redun] |
[redun] | ALL 0 0 0 2 6 8
[redun] | redun.examples.compile.compile 0 0 0 2 1 3
[redun] | redun.examples.compile.link 0 0 0 0 2 2
[redun] | redun.examples.compile.make 0 0 0 0 1 1
[redun] | redun.examples.compile.make_prog 0 0 0 0 2 2
[redun]
[File(path=prog, hash=2f43c23c), File(path=prog2, hash=b4537ad7)]
```
Notice, two of the compile jobs are cached (`prog.c` and `prog2.c`), but compiling the library `lib.c` and the downstream link steps are correctly rerun.
Check out the [examples](examples/) for more example workflows and features of redun. Also, see the [design notes](https://insitro.github.io/redun/design.html) for more information on redun's design.
## Data provenance exploration
All workflow executions are recorded into a database that can be explored using the [Console (TUI)](https://insitro.github.io/redun/design.html#call-graphs). The Console is convenient for debugging large complex workflows, as well as understanding how to reproduce and extend past work.
<a href="docs/source/_static/console-execution.svg"><img width="45%" src="docs/source/_static/console-execution.svg"> <a href="docs/source/_static/console-job.svg"><img width="45%" src="docs/source/_static/console-job.svg">
## Mixed compute backends
In the above example, each task ran in its own thread. However, more generally each task can run in its own process, Docker container, [AWS Batch job](examples/05_aws_batch), or [Spark job](examples/aws_glue). With [minimal configuration](examples/05_aws_batch/.redun/redun.ini), users can lightly annotate where they would like each task to run. redun will automatically handle the data and code movement as well as backend scheduling:
```py
@task(executor="process")
def a_process_task(a):
# This task runs in its own process.
b = a_batch_task(a)
c = a_spark_task(b)
return c
@task(executor="batch", memory=4, vcpus=5)
def a_batch_task(a):
# This task runs in its own AWS Batch job.
# ...
@task(executor="spark")
def a_spark_task(b):
# This task runs in its own Spark job.
sc = get_spark_context()
# ...
```
See the [executor documentation](https://insitro.github.io/redun/executors.html) for more.
## What's the trick?
How did redun automatically perform parallel compute, caching, and data provenance in the example above? The trick is that redun builds up an [expression graph](https://en.wikipedia.org/wiki/Abstract_semantic_graph) representing the workflow and evaluates the expressions using [graph reduction](https://en.wikipedia.org/wiki/Graph_reduction). For example, the workflow above went through the following evaluation process:
<img width="800" src="examples/02_compile/images/expression-graph.svg">
For a more in-depth walk-through, see the [scheduler tutorial](examples/03_scheduler).
## Why not another workflow engine?
redun focuses on making multi-domain scientific pipelines easy to develop and deploy. The automatic parallelism, caching, code, and data reactivity, as well as data provenance features, make it a great fit for such work. However, redun does not attempt to solve all possible workflow problems, so it's perfectly reasonable to supplement it with other tools. For example, while redun provides a very expressive way to define [task parallelism](https://en.wikipedia.org/wiki/Task_parallelism), it does not attempt to perform the kind of fine-grain [data parallelism](https://en.wikipedia.org/wiki/Data_parallelism) more commonly provided by Spark or Dask. Fortunately, redun does not perform any "dirty tricks" (e.g. complex static analysis or call stack manipulation), and so we have found it possible to safely combine redun with other frameworks (e.g. pyspark, pytorch, Dask, etc) to achieve the benefits of each tool.
Lastly, redun does not provide its own compute cluster but instead builds upon other systems that do, such as cloud provider services for batch Docker jobs or Spark jobs.
For more details on how redun compares to other related ideas, see the [influences](https://insitro.github.io/redun/design.html#influences) section.
| /redun-0.16.2.tar.gz/redun-0.16.2/README.md | 0.60288 | 0.939248 | README.md | pypi |
from functools import partial
from inspect import getfullargspec
from logging import getLogger
from typing import Iterable, cast
from rx import Observable, merge
from rx.core.typing import Mapper
from .types import Epic
logger = getLogger(__name__)
def _wrapped_epic(epic: Mapper[Observable, Observable],
action_: Observable, _: Observable) -> Observable:
return epic(action_)
def normalize_epic(epic: Epic) -> Epic:
"""Creates a callback for an epic that expects two arguments
Args:
epic: the epic
Returns:
the normalized epic
"""
count = len(getfullargspec(epic)[0])
assert count in (1, 2)
return epic if count == 2 else cast(Epic, partial(_wrapped_epic, epic))
def _run_epic(action_: Observable, state_: Observable, epic: Epic) -> Observable:
return epic(action_, state_)
def run_epic(action_: Observable, state_: Observable) -> Mapper[Epic, Observable]:
""" Runs a single epic agains the given action and state observables
Args:
action_: the action observable
state_: the state observable
Returns:
A callback function that will run the given epic on the observables
"""
assert isinstance(action_, Observable)
assert isinstance(state_, Observable)
return partial(_run_epic, action_, state_)
def _combine_epics(
norm_epics: Iterable[Epic],
action_: Observable, state_: Observable
) -> Observable:
""" Merges the epics into one
Args:
action_: the action observable
state_: the state observable
Returns:
the merged epic
"""
return merge(*map(run_epic(action_, state_), norm_epics))
def combine_epics(*epics: Iterable[Epic]) -> Epic:
""" Combines a sequence of epics into one single epic by merging them
Args:
epics: the epics to merge
Returns:
The merged epic
"""
return partial(_combine_epics, tuple(map(normalize_epic, epics))) | /redux_py-0.1.12-py3-none-any.whl/redux/_internal/epic.py | 0.939491 | 0.463505 | epic.py | pypi |
from logging import getLogger
from typing import Iterable, Mapping, MutableMapping, Optional, cast
import rx.operators as op
from rx import Observable, merge
from rx.subject import BehaviorSubject, Subject
from .action import create_action
from .constants import INIT_ACTION
from .epic import normalize_epic, run_epic
from .reducer import combine_reducers
from .types import (Action, Epic, Reducer, ReduxFeatureModule, ReduxRootState,
ReduxRootStore, StateType)
logger = getLogger(__name__)
def select_id(module: ReduxFeatureModule) -> str:
""" Selects the ID from a module
Args:
module: the module
Returns:
The module identifier
"""
return module.id
def select_dependencies(
module: ReduxFeatureModule) -> Iterable[ReduxFeatureModule]:
""" Selects the dependencies from a module
Args:
module: the module
Returns:
The module dependencies
"""
return module.dependencies
def select_reducer(module: ReduxFeatureModule) -> Optional[Reducer]:
""" Selects the reducer from a module
Args:
module: the module
Returns:
The module reducer
"""
return module.reducer
def select_epic(module: ReduxFeatureModule) -> Optional[Epic]:
""" Selects the epic from a module
Args:
module: the module
Returns:
The module epic
"""
return module.epic
def has_reducer(module: ReduxFeatureModule) -> bool:
""" Tests if a module defines a reducer
Args:
module: the module
Returns:
True if the module has a reducer, else False
"""
return bool(select_reducer(module))
def identity_reducer(state: StateType, _: Action) -> StateType:
""" Reducer function that returns the original state
"""
return state
def reduce_reducers(
dst: Mapping[str, Reducer], module: ReduxFeatureModule
) -> Mapping[str, Reducer]:
""" Reduces the reducer on a module into a dictionary
Args:
dst: the target dictionary
module: the module to reduce
Returns:
the reduced dictionary
"""
reducer: Optional[Reducer] = select_reducer(module)
return cast(Mapping[str, Reducer], {
**dst, select_id(module): reducer}) if reducer else dst
def create_store(initial_state: Optional[ReduxRootState] = None) -> ReduxRootStore: # pylint: disable=too-many-locals
""" Constructs a new store that can handle feature modules.
Args:
initial_state: optional initial state of the store, will typically be the empty dict
Returns:
An implementation of the store
"""
# current reducer
reducer: Reducer = identity_reducer
def replace_reducer(new_reducer: Reducer) -> None:
""" Callback that replaces the current reducer
Args:
new_reducer: the new reducer
"""
nonlocal reducer
reducer = new_reducer
# subject used to dispatch actions
actions = Subject()
# the shared action observable
actions_ = actions.pipe(op.share())
_dispatch = actions.on_next
# our current state
state = BehaviorSubject(initial_state if initial_state else {})
# shutdown trigger
done_ = Subject()
# The set of known modules, to avoid cycles and duplicate registration
modules: MutableMapping[str, ReduxFeatureModule] = {}
# Sequence of added modules
module_subject = Subject()
# Subscribe to the resolved modules
module_ = module_subject.pipe(op.distinct(select_id), op.share())
# Build the reducers
reducer_ = module_.pipe(
op.filter(has_reducer),
op.scan(reduce_reducers, {}),
op.map(combine_reducers),
op.map(replace_reducer),
)
# Build the epic
epic_ = module_.pipe(
op.map(select_epic),
op.filter(bool),
op.map(normalize_epic)
)
# Root epic that combines all of the incoming epics
def root_epic(
action_: Observable, state_: Observable
) -> Observable:
""" Implementation of the root epic. If listens for new epics
to come in and automatically subscribes.
Args:
action_: the action observable
state_: the state observable
Returns
The observable of resulting actions
"""
return epic_.pipe(
op.flat_map(run_epic(action_, state_)),
op.map(_dispatch)
)
# notifications about new feature states
new_module_ = module_.pipe(
op.map(select_id),
op.map(create_action(INIT_ACTION)),
op.map(_dispatch),
)
def _add_feature_module(module: ReduxFeatureModule):
""" Registers a new feature module
Args:
module: the new feature module
"""
module_id = select_id(module)
if not module_id in modules:
modules[module_id] = module
for dep in select_dependencies(module):
_add_feature_module(dep)
module_subject.on_next(module)
# all state
internal_ = merge(root_epic(actions_, state), reducer_, new_module_).pipe(
op.ignore_elements()
)
def _as_observable() -> Observable:
""" Returns the state as an observable
Returns:
the observable
"""
return state
def _on_completed() -> None:
""" Triggers the done event """
done_.on_next(None)
merge(actions_, internal_).pipe(
op.map(lambda action: reducer(state.value, action)),
op.take_until(done_),
).subscribe(state, logger.error)
return ReduxRootStore(
_as_observable, _dispatch, _add_feature_module, _dispatch, _on_completed
) | /redux_py-0.1.12-py3-none-any.whl/redux/_internal/store.py | 0.936908 | 0.380558 | store.py | pypi |
from typing import (Any, Callable, Iterable, Mapping, NamedTuple, Optional,
TypeVar)
from rx import Observable
from rx.core.typing import OnCompleted
StateType = TypeVar("StateType")
PayloadType = TypeVar("PayloadType")
ReduxRootState = Mapping[str, StateType]
class Action(NamedTuple):
""" Action implementation that takes a payload """
type: str
"""Identifier for the action, must be globally unique."""
payload: Any
"""The action action payload"""
Epic = Callable[[Observable, Observable], Observable]
Reducer = Callable[[StateType, Action], StateType]
class ReduxFeatureModule(NamedTuple):
""" Defines the feature module. The ID identifies the section in the state and
is also used to globally discriminate features.
After instantiating a feature store the store will fire an initialization action
for that feature. Use :py:meth:`~redux.of_init_feature` to
register for these initialization actions.
"""
id: str
"""Identifier of the module, will also be used as a namespace into the state."""
reducer: Optional[Reducer]
"""Reducer that handles module specific actions."""
epic: Optional[Epic]
"""Epic that handles module specific asynchronous operations."""
dependencies: Iterable['ReduxFeatureModule']
"""Dependencies on other feature modules"""
class ReduxRootStore(NamedTuple):
""" Implementation of a store that manages sub-state as features. Features are added
to the store automatically, when required by the select method.
"""
as_observable: Callable[[], Observable]
"""Converts the store to an observable of state emissions"""
dispatch: Callable[[Action], None]
"""Dispatches a single action to the store"""
add_feature_module: Callable[[ReduxFeatureModule], None]
""" Adds a new feature module """
on_next: Callable[[Action], None]
""" Alias for :py:meth:`~redux.ReduxRootStore.dispatch` """
on_completed: OnCompleted
""" Shuts down the store """ | /redux_py-0.1.12-py3-none-any.whl/redux/_internal/types.py | 0.920491 | 0.428891 | types.py | pypi |
from functools import partial
from typing import Any
import rx.operators as op
from rx import Observable
from rx.core.typing import Mapper, Predicate
from .types import Action, PayloadType
def create_action(type_name: str) -> Mapper[PayloadType, Action]:
""" Creates a function that produces an action of the given type
Args:
type_name: type of the action
Returns:
A function that accepts the action payload and creates the action
"""
return partial(Action, type_name)
def select_action_type(action: Action) -> str:
""" Selects the type from the action
Args:
action: the action object
Returns:
the type of the action
"""
return action.type
def select_action_payload(action: Action) -> PayloadType:
""" Selects the payload from the action
Args:
action: the action object
Returns:
the payload of the action
"""
return action.payload
def _is_by_selector(value: Any, selector: Mapper[Action, Any], action: Action) -> bool:
""" Checks if selector on an action matches a value
Args:
action: the action object
Returns:
true if the selector result of the action matches, else false
"""
return selector(action) is value
def is_by_selector(
value: Any, selector: Mapper[Action, Any]
) -> Predicate[Action]:
""" Returns a function that checks if the selector on an action equals a particular value
Args:
value: the value to compare against
selector: the selector to use to extract the value from the action
Returns:
Function to execute the check against an action
"""
return partial(_is_by_selector, value, selector)
def is_type(type_name) -> Predicate[Action]:
""" Returns a function that checks if the action is of a particular type
Args:
type_name: type of the action to check for
Returns:
Function to execute the check against an action
"""
return is_by_selector(type_name, select_action_type)
def of_type(
type_name: str) -> Mapper[Observable, Observable]:
""" Returns a reactive operator that filters for actions of the given type
Args:
type_name: type of the action to filter for
Returns:
The filter operator function
"""
return op.filter(is_type(type_name)) | /redux_py-0.1.12-py3-none-any.whl/redux/_internal/action.py | 0.932215 | 0.474205 | action.py | pypi |
from functools import partial
from typing import Iterable, Mapping, MutableMapping, Optional, Tuple
from .action import select_action_type
from .types import Action, Reducer, StateType
def _default_reducer(
initial_state: Optional[StateType],
state: StateType,
_: Action) -> Optional[StateType]:
#: default handling
return state if state else initial_state
def default_reducer(initial_state: Optional[StateType]) -> Reducer:
""" Creates a reducer that returns the original state or the default state.
Args:
inital_state: optional initial state used as a fallback
Returns:
A reducer that reduces the current state or the initial state as a fallback
"""
return partial(_default_reducer, initial_state)
def _handle_actions_reducer(
action_map: Mapping[str, Reducer],
def_reducer: Reducer,
state: StateType,
action: Action) -> Optional[StateType]:
#: dispatch
return action_map.get(select_action_type(action),
def_reducer)(state, action)
def handle_actions(
action_map: Mapping[str, Reducer], initial_state: Optional[StateType] = None,
) -> Reducer:
""" Creates a new reducer from a mapping of action name to reducer for that action.
Args:
action_map: mapping from action name to reducer for that action
initial_state: optional initial state used if no reducer matches
Returns:
A reducer function that handles the actions in the map
"""
return partial(_handle_actions_reducer, {**action_map}, default_reducer(initial_state))
def _combine_reducers(items: Iterable[Tuple[str, Reducer]],
state: Mapping[str, StateType],
action: Action) -> Mapping[str, StateType]:
""" Updates the state object from the reducer mappings. """
result = state if state else {}
mutable: Optional[MutableMapping[str, StateType]] = None
for key, value in items:
current = result.get(key)
updated = value(current, action)
if not current is updated:
if not mutable:
mutable = dict(result)
result = mutable
mutable[key] = updated
return result
def combine_reducers(
reducers: Mapping[str, Reducer]) -> Reducer[Mapping[str, StateType]]:
""" Creates a new reducer from a mapping of reducers.
Args:
reducers: the mapping from state partition to reducer
Returns:
A reducer that dispatches actions against each of the mapped reducers
"""
return partial(_combine_reducers, tuple(reducers.items())) | /redux_py-0.1.12-py3-none-any.whl/redux/_internal/reducer.py | 0.95253 | 0.490602 | reducer.py | pypi |
from functools import partial
from logging import getLogger
from typing import Any, Callable, Iterable, Optional
import rx.operators as op
from rx import Observable, pipe
from rx.core.typing import Mapper, Predicate
from .action import is_by_selector, is_type, select_action_payload
from .constants import INIT_ACTION
from .types import (Action, Epic, Reducer, ReduxFeatureModule, ReduxRootState,
StateType)
logger = getLogger(__name__)
def has_payload(payload: Any) -> Predicate[Action]:
""" Returns a function that checks if the action has a particular payload
Args:
payload: payload to test against
Returns:
Function to execute the check against an action
"""
return is_by_selector(payload, select_action_payload)
def of_init_feature(
identifier: str) -> Mapper[Observable, Observable]:
""" Operator to test for the initialization action of a feature
Args:
identifier: the identifier of the feature
Returns:
Operator function that accepts init actions for the feature, once
"""
return pipe(
op.filter(is_type(INIT_ACTION)),
op.filter(has_payload(identifier)),
op.take(1),
op.map(lambda x: identifier),
op.do_action(logger.debug)
)
def create_feature_module(
identifier: str,
reducer: Optional[Reducer] = None,
epic: Optional[Epic] = None,
dependencies: Iterable[ReduxFeatureModule] = (),
) -> ReduxFeatureModule:
""" Constructs a new feature module descriptor
Args:
identifier: the identifier of the feature
reducer: optional reducer
epic: optional epic
dependencies: optional dependencies on other features
Returns:
The feature module descriptor
"""
return ReduxFeatureModule(identifier, reducer, epic, dependencies)
def _select_feature_by_id(
identifier: str,
initial_state: Optional[StateType],
state: ReduxRootState) -> Optional[StateType]:
""" Selector function that selects the feature state from the root state"""
return state.get(identifier, initial_state)
def select_feature(
identifier: str, initial_state: Optional[StateType] = None
) -> Callable[[ReduxRootState], Optional[StateType]]:
""" Returns a function that returns the feature state from the root state
Args:
identifier: identifier of the feature
initial_state: fallback state used if the feature state is not defined
Returns:
The selector function
"""
return partial(_select_feature_by_id, identifier, initial_state) | /redux_py-0.1.12-py3-none-any.whl/redux/_internal/feature.py | 0.930245 | 0.276547 | feature.py | pypi |
from typing import *
import traceback
from collections import defaultdict
import asyncio
class Listener:
def __init__(self, key, store: 'Store'):
assert key
assert store
self.key = key
self.store = store
self.is_binding = False
store.subscribe(key, self)
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
if self.store:
self.store.unsubscribe(self)
self.store = None
def unsubscribe(self):
if self.store is not None:
self.store.unsubscribe(self)
self.store = None
async def on_changed(self, changed_key: List[str], state: Dict[str, Any]):
raise NotImplementedError
class ListenerState:
def __init__(self, listener: Listener, initialize_full_state=True):
self.is_synced = not initialize_full_state
self.listener = listener
async def call_state_changed(self, changed_state, state):
if self.is_synced:
await self.listener.on_changed(list(changed_state.keys()), state)
else:
self.is_synced = True
await self.listener.on_changed(list(state.keys()), state)
class Action:
def __init__(self, action_type: str, **kwargs):
self.type = action_type
self.arguments = kwargs or {}
assert isinstance(self.type, str)
assert isinstance(self.arguments, dict)
def to_data(self, dumps):
action_dict = dict(type=self.type, **self.arguments)
return dumps(action_dict)
@staticmethod
def from_data(data, loads) -> 'Action':
all_arguments = loads(data)
action_type = all_arguments.get("type", None)
assert isinstance(action_type, str)
del all_arguments["type"]
return Action(action_type, **all_arguments)
def __repr__(self):
return "<Action: {}>".format(self.type)
class Reducer:
def __init__(self, mapping_dict: dict):
mapping_dict = mapping_dict or {}
self._mapping_dict = mapping_dict
self._state = {}
async def initialize_state(self, cell_key):
self._state = {}
return True
async def reduce(self, action: Action) -> Dict[str, Any]:
changed_state = {}
state = self._state
for key, callback in self._mapping_dict.items():
sub_state = state.get(key, None)
new_sub_state = await callback(state=sub_state, action=action)
if id(sub_state) != id(new_sub_state):
changed_state[key] = new_sub_state
state[key] = new_sub_state
self._state = state
return changed_state
def get_state(self):
return self._state
class Store:
def __init__(self, reducer: Type, init_full_state=True):
assert reducer
self._reducer = reducer
self._reducer_set = dict()
self._observer_list = defaultdict(dict)
self._initialize_full_state = init_full_state
def __getitem__(self, item) -> Optional[Dict[str, Any]]:
if type(item) is not str:
raise TypeError
reducer_cell = self._reducer_set.get(item, None)
if reducer_cell is None:
return None
return reducer_cell.get_state()
async def dispatch(self, key: str, action: Action) -> bool:
try:
if key is None:
return False
if key not in self._reducer_set:
reducer = self._reducer()
if not await reducer.initialize_state(key):
return False
self._reducer_set[key] = reducer
else:
reducer = self._reducer_set[key]
await self._dispatch_work(reducer, key, action)
except Exception as e:
traceback.print_exc()
return False
return True
async def _dispatch_work(self, reducer: Reducer, key: str, action: Action):
changed_state = await reducer.reduce(action)
if changed_state:
await self._call_listeners(key, changed_state, self[key])
async def _call_listeners(self, key: str, changed_state: Dict[str, Any], state: Dict[str, Any]):
listeners = self._observer_list.get(key, dict()).values()
for listener in list(listeners):
try:
await listener.call_state_changed(changed_state, state)
except Exception:
self.unsubscribe(key, listener.listener)
def subscribe(self, key: str, listener: Listener):
listener_wrapper = ListenerState(listener, self._initialize_full_state)
self._observer_list[key].setdefault(listener, listener_wrapper)
listener.is_binding = True
state = self[key]
if state:
asyncio.ensure_future(listener_wrapper.call_state_changed(state, state))
def unsubscribe(self, key: str, listener: Listener):
if listener not in self._observer_list[key]:
return
del self._observer_list[key][listener]
listener.is_binding = False
if not len(self._observer_list[key]):
del self._observer_list[key]
if key in self._reducer_set:
del self._reducer_set[key] | /redux-python-0.1.0.tar.gz/redux-python-0.1.0/redux/framework.py | 0.719581 | 0.243845 | framework.py | pypi |
from typing import *
import re
import json
import asyncio
from urllib.parse import urljoin
import websockets
from .framework import Store, Listener, Action
class StateCellFilter:
def create_cell_url(self, key: str) -> str:
return "/redux/{}".format(key)
def decode_cell_url(self, url: str) -> Optional[str]:
re_match = re.match(r"/redux/(.+)", url)
if re_match and re_match.lastindex == 1:
return re_match.group(1)
return None
class VirtualStore:
def __init__(self, uri: str, socket: websockets.WebSocketClientProtocol,
loads=json.loads, dumps=json.dumps):
self._get_state_queue = []
self._uri = uri
self._connection = socket
self._state = {}
self._observer_list = set([])
self.loads = loads
self.dumps = dumps
asyncio.ensure_future(self._reader(), loop=asyncio.get_event_loop())
@staticmethod
async def create(host: str, key_uri):
url = urljoin(host, key_uri)
socket = await websockets.connect(url)
return VirtualStore(url, socket)
async def close(self):
if self._connection:
await self._connection.close()
self._connection = None
self._observer_list.clear()
async def _reader(self):
async for message in self._connection:
state = self.loads(message)
if not state:
continue
self._state.update(state)
for listener in list(self._observer_list):
await listener.on_changed(list(state.keys()), state)
async def dispatch(self, action: Union[Dict[str, Any], Action]):
connection = self._connection
assert connection
action_data = action
if isinstance(action, Action):
action_data = action.to_data(self.dumps)
else:
action_data = self.dumps(action_data)
assert isinstance(action_data, str) or isinstance(action_data, bytes)
await connection.send(action_data)
def get_state(self):
return self._state
def subscribe(self, key, listener: Listener):
self._observer_list.add(listener)
def unsubscribe(self, listener: Listener):
if listener in self._observer_list:
self._observer_list.remove(listener)
class _WebSocketListener(Listener):
def __init__(self, key, store: 'WebSocketStoreServer', ws: websockets.WebSocketServerProtocol):
self.ws = ws
super(_WebSocketListener, self).__init__(key, store)
async def on_changed(self, changed_key: List[str], state: Dict[str, Any]):
response = {}
for key in changed_key:
if key in state:
response[key] = state[key]
await self.ws.send(self.store.dumps(response))
class WebSocketStoreServer(Store):
def __init__(self, host: str, port: int, reducer: Type,
loads=json.loads, dumps=json.dumps,
cell_filter: StateCellFilter=StateCellFilter()):
super(WebSocketStoreServer, self).__init__(reducer)
assert reducer
assert cell_filter
assert loads
assert dumps
self._filter = cell_filter
self.server = None
self._host = host
self._port = port
self.loads = loads
self.dumps = dumps
async def start(self):
host = self._host
port = self._port
server = await websockets.serve(self._pre_dispatch, host, port)
self.server = server
async def _pre_dispatch(self, ws: websockets.WebSocketServerProtocol, path):
key = self._filter.decode_cell_url(path)
try:
if not key:
return
listener = _WebSocketListener(key, self, ws)
async for message in ws:
action = Action.from_data(message, self.loads)
result = await self.dispatch(key, action)
if not result:
raise IndexError
if not listener.is_binding:
raise ConnectionError
finally:
await ws.close() | /redux-python-0.1.0.tar.gz/redux-python-0.1.0/redux/websocket.py | 0.755005 | 0.159087 | websocket.py | pypi |
# redvid
[](http://hits.dwyl.io/elmoiv/redvid)
[](https://travis-ci.org/elmoiv/redvid)
[](https://pypi.org/project/redvid/)
[](https://github.com/elmoiv/redvid/releases)
### Smart downloader for Reddit *hosted* videos
## Features
* Download local hosted videos with audio.
* Requires only `requests` and `FFmpeg`.
* Ability to decide quality.
* Bypass bot detection.
## Installation
`redvid` requires Python 3.
Use `pip` to install the package from PyPI:
```bash
pip install redvid
```
Or, install the latest version of the package from GitHub:
```bash
pip install git+https://github.com/elmoiv/redvid.git
```
## Usage
Using *redvid* to download a video:
- via terminal (using [`credvid.py`](https://github.com/elmoiv/redvid/blob/master/credvid.py)):
```console
> python credvid.py --help
usage: credvid.py [-h] -u URL [-p PATH] [-o] [-mxq] [-mnq] [-mxd MAXDURATION]
[-mxs MAXSIZE] [-am] [-px PROXIES]
Argument parser for redvid module
required arguments:
-u URL, --url URL Post URL for Reddit hosted video
optional arguments:
-p PATH, --path PATH Custom path for downloaded videos
-o, --overwrite Overwrite existing videos and ignore exception
-mxq, --maxquality Auto select maximum quality
-mnq, --minquality Auto select minimum quality
-mxd MAXDURATION, --maxduration MAXDURATION
Ignore videos that exceed this duration (in seconds)
-mxs MAXSIZE, --maxsize MAXSIZE
Ignore videos that exceed this size (in bytes)
-am, --automax Automatically download video with maximum size (Helps
for old reddit videos with unknown qualities)
-px PROXIES, --proxies PROXIES
Download videos through proxies for blocked regions
```
- via scripts:
```python
from redvid import Downloader
reddit = Downloader(max_q=True)
reddit.url = 'https://v.redd.it/c8oic7ppc2751'
reddit.download()
```
*or*
```python
__import__('redvid').Downloader(url='https://v.redd.it/c8oic7ppc2751', max_q=True).download()
```
## Installing FFmpeg
### Windows:
https://m.wikihow.com/Install-FFmpeg-on-Windows
(*You may need to restart your pc after applying these steps*)
### Linux:
`sudo apt install ffmpeg`
### Mac OS:
* install [Homebrew](https://brew.sh/):
`/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"`
* Then:
`$ brew install ffmpeg`
## Tests
Here are a few sample tests:
* [Video only](https://github.com/elmoiv/redvid/tree/master/tests/test1.py)
* [Video with audio](https://github.com/elmoiv/redvid/tree/master/tests/test2.py)
* [Get best quality according to given size (Automated)](https://github.com/elmoiv/redvid/tree/master/tests/test12.py)
* [Choose PATH](https://github.com/elmoiv/redvid/tree/master/tests/test3.py)
* [Auto-detect maximum quality](https://github.com/elmoiv/redvid/tree/master/tests/test4.py)
* [Auto-detect minimum quality](https://github.com/elmoiv/redvid/tree/master/tests/test5.py)
* [Skip file check and overwrite](https://github.com/elmoiv/redvid/tree/master/tests/test6.py)
* [Silent download (No logging)](https://github.com/elmoiv/redvid/tree/master/tests/test7.py)
* [Set maximum video size](https://github.com/elmoiv/redvid/tree/master/tests/test8.py)
* [Set maximum video size (different technique)](https://github.com/elmoiv/redvid/tree/master/tests/test9.py)
* [Set maximum video duration](https://github.com/elmoiv/redvid/tree/master/tests/test10.py)
* [Set maximum video duration (different technique)](https://github.com/elmoiv/redvid/tree/master/tests/test11.py)
## Changelog
### v1.1.3:
* [#20](https://github.com/elmoiv/redvid/issues/20) Added a feature to create a folder in case it does not exist.
### v1.1.2:
* [#18](https://github.com/elmoiv/redvid/issues/18) Fixed bug when handling path that caused recursive directories.
### v1.1.1:
* [#15](https://github.com/elmoiv/redvid/issues/15) Fixed bug when fetching reddit videos with expiry date.
### v1.1.0:
* [#11](https://github.com/elmoiv/redvid/issues/11) redvid can now decide best quality according to given size.
* Added support for old reddit videos.
* Fixed bug where video qualities list can't be parsed.
### v1.0.9:
* [#8](https://github.com/elmoiv/redvid/issues/8) Added the ability to disable logging.
* Maximum video size can be set.
* [#9](https://github.com/elmoiv/redvid/issues/9) Maximum video duration can be set.
### v1.0.8:
* [#7](https://github.com/elmoiv/redvid/issues/7) Fixed a bug with quality fetching.
### v1.0.7:
* `download()` will return file path again.
### v1.0.6:
* [#5](https://github.com/elmoiv/redvid/issues/5) Can now download urls with **v.reddit.it** fromat.
* [#4](https://github.com/elmoiv/redvid/issues/4) PATH can be choosed instead of current dir.
* [#3](https://github.com/elmoiv/redvid/issues/3) Max/Min quality can be automatically set to skip quality query.
* Added ffmpeg encoding to videos with no sound to be uploadable on some platforms.
* Adjusted printed text and progress bars.
## Stargazers over time
[](https://starchart.cc/elmoiv/redvid)
## Contributing
Please contribute! If you want to fix a bug, suggest improvements, or add new features to the project, just [open an issue](https://github.com/elmoiv/redvid/issues) or send me a pull request. | /redvid-2.0.0.tar.gz/redvid-2.0.0/README.md | 0.572125 | 0.830697 | README.md | pypi |
import numpy as np
from typing import Tuple
# This is comparable to RC filter, smoothing factor is an approximation to alpha
# See redpd_iterator
def get_smoothing_factor(sensor_sample_rate_hz: float, low_pass_sample_rate_hz: float = 1) -> float:
"""
Obtain smoothing factor
:param sensor_sample_rate_hz: sample rate of sensor in Hz
:param low_pass_sample_rate_hz: sample rate of low pass filter in Hz
:return: alpha
"""
# assuming sensor_sample_rate_hz << low_pass_sample_rate_hz
return low_pass_sample_rate_hz / sensor_sample_rate_hz
def get_gravity(accelerometer: np.ndarray, smoothing_factor: float) -> np.ndarray:
"""
based on the slack thread: https://tinyurl.com/f6t3h2fp
:param accelerometer: accelerometer signal waveform
:param smoothing_factor: from get_smoothing_factor function
:return: numpy array with gravity values
"""
# initialize gravity array
gravity = np.zeros(len(accelerometer)) * np.mean(accelerometer)
# loop through to update gravity information
for i in range(len(gravity) - 1):
gravity[i + 1] = (1 - smoothing_factor) * gravity[i] + smoothing_factor * accelerometer[i + 1]
return gravity
def get_gravity_and_linear_acceleration(accelerometer: np.ndarray,
sensor_sample_rate_hz: float,
low_pass_sample_rate_hz: float = 1) -> Tuple[np.ndarray, np.ndarray]:
"""
Obtain gravity and linear acceleration from smartphone accelerometer sensor
:param accelerometer: accelerometer signal waveform
:param sensor_sample_rate_hz: sample rate of accelerometer in Hz
:param low_pass_sample_rate_hz: sample rate of low pass filter in Hz
:return: numpy array with gravity and numpy array with linear acceleration
"""
# get smoothing factor (alpha)
alpha = get_smoothing_factor(sensor_sample_rate_hz=sensor_sample_rate_hz,
low_pass_sample_rate_hz=low_pass_sample_rate_hz)
# extract gravity via exponential filtering
gravity = get_gravity(accelerometer=accelerometer, smoothing_factor=alpha)
# subtract gravity from acceleration
linear_acceleration = accelerometer - gravity
return gravity, linear_acceleration
"""
Generalized to any DC offset, for comparison
"""
def get_sensor_lowpass(sensor_wf: np.ndarray,
sensor_sample_rate_hz: float,
lowpass_frequency_hz: float = 1) -> np.ndarray:
"""
based on the slack thread: https://tinyurl.com/f6t3h2fp
:param sensor_wf: signal waveform
:param sensor_sample_rate_hz: sample rate of sensor in Hz
:param lowpass_frequency_hz: sample rate of low pass filter in Hz
:return: sensor low pass
"""
smoothing_factor = lowpass_frequency_hz / sensor_sample_rate_hz
# initialize gravity array
sensor_lowpass = np.zeros(len(sensor_wf))
# loop through to update gravity information
for i in range(len(sensor_lowpass) - 1):
sensor_lowpass[i + 1] = (1 - smoothing_factor) * sensor_lowpass[i] + smoothing_factor * sensor_wf[i + 1]
return sensor_lowpass
def get_lowpass_and_highpass(sensor_wf: np.ndarray,
sensor_sample_rate_hz: float,
lowpass_frequency_hz: float = 1) -> Tuple[np.ndarray, np.ndarray]:
"""
:param sensor_wf: signal waveform
:param sensor_sample_rate_hz: sample rate of sensor in Hz
:param lowpass_frequency_hz: sample rate of low pass filter in Hz
:return: sensor low pass and high pass
"""
# extract low-frequency component via exponential filtering
sensor_lowpass = get_sensor_lowpass(sensor_wf, sensor_sample_rate_hz, lowpass_frequency_hz)
# subtract low-frequency component from waveform
sensor_highpass = sensor_wf - sensor_lowpass
return sensor_lowpass, sensor_highpass | /redvox-pandas-1.4.8.tar.gz/redvox-pandas-1.4.8/redpandas/redpd_gravity.py | 0.950926 | 0.829975 | redpd_gravity.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.