repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
wwj718/edx-platform | lms/djangoapps/instructor/tests/test_offline_gradecalc.py | 74 | 4754 | """
Tests for offline_gradecalc.py
"""
import json
from mock import patch
from courseware.models import OfflineComputedGrade
from student.models import CourseEnrollment
from student.tests.factories import UserFactory
from xmodule.graders import Score
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from ..offline_gradecalc import offline_grade_calculation, student_grades
def mock_grade(_student, _request, course, **_kwargs):
""" Return some fake grade data to mock grades.grade() """
return {
'grade': u'Pass',
'totaled_scores': {
u'Homework': [
Score(earned=10.0, possible=10.0, graded=True, section=u'Subsection 1', module_id=None),
]
},
'percent': 0.85,
'raw_scores': [
Score(
earned=5.0, possible=5.0, graded=True, section=u'Numerical Input',
module_id=course.id.make_usage_key('problem', 'problem1'),
),
Score(
earned=5.0, possible=5.0, graded=True, section=u'Multiple Choice',
module_id=course.id.make_usage_key('problem', 'problem2'),
),
],
'section_breakdown': [
{'category': u'Homework', 'percent': 1.0, 'detail': u'Homework 1 - Test - 100% (10/10)', 'label': u'HW 01'},
{'category': u'Final Exam', 'prominent': True, 'percent': 0, 'detail': u'Final = 0%', 'label': u'Final'}
],
'grade_breakdown': [
{'category': u'Homework', 'percent': 0.85, 'detail': u'Homework = 85.00% of a possible 85.00%'},
{'category': u'Final Exam', 'percent': 0.0, 'detail': u'Final Exam = 0.00% of a possible 15.00%'}
]
}
class TestOfflineGradeCalc(ModuleStoreTestCase):
""" Test Offline Grade Calculation with some mocked grades """
def setUp(self):
super(TestOfflineGradeCalc, self).setUp()
with modulestore().default_store(ModuleStoreEnum.Type.split): # Test with split b/c old mongo keys are messy
self.course = CourseFactory.create()
self.user = UserFactory.create()
CourseEnrollment.enroll(self.user, self.course.id)
patcher = patch('courseware.grades.grade', new=mock_grade)
patcher.start()
self.addCleanup(patcher.stop)
def test_output(self):
offline_grades = OfflineComputedGrade.objects
self.assertEqual(offline_grades.filter(user=self.user, course_id=self.course.id).count(), 0)
offline_grade_calculation(self.course.id)
result = offline_grades.get(user=self.user, course_id=self.course.id)
decoded = json.loads(result.gradeset)
self.assertEqual(decoded['grade'], "Pass")
self.assertEqual(decoded['percent'], 0.85)
self.assertEqual(decoded['totaled_scores'], {
"Homework": [
{"earned": 10.0, "possible": 10.0, "graded": True, "section": "Subsection 1", "module_id": None}
]
})
self.assertEqual(decoded['raw_scores'], [
{
"earned": 5.0,
"possible": 5.0,
"graded": True,
"section": "Numerical Input",
"module_id": unicode(self.course.id.make_usage_key('problem', 'problem1')),
},
{
"earned": 5.0,
"possible": 5.0,
"graded": True,
"section": "Multiple Choice",
"module_id": unicode(self.course.id.make_usage_key('problem', 'problem2')),
}
])
self.assertEqual(decoded['section_breakdown'], [
{"category": "Homework", "percent": 1.0, "detail": "Homework 1 - Test - 100% (10/10)", "label": "HW 01"},
{"category": "Final Exam", "label": "Final", "percent": 0, "detail": "Final = 0%", "prominent": True}
])
self.assertEqual(decoded['grade_breakdown'], [
{"category": "Homework", "percent": 0.85, "detail": "Homework = 85.00% of a possible 85.00%"},
{"category": "Final Exam", "percent": 0.0, "detail": "Final Exam = 0.00% of a possible 15.00%"}
])
def test_student_grades(self):
""" Test that the data returned by student_grades() and grades.grade() match """
offline_grade_calculation(self.course.id)
with patch('courseware.grades.grade', side_effect=AssertionError('Should not re-grade')):
result = student_grades(self.user, None, self.course, use_offline=True)
self.assertEqual(result, mock_grade(self.user, None, self.course))
| agpl-3.0 |
titasakgm/brc-stock | openerp/addons/base_report_designer/base_report_designer.py | 50 | 3813 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
import openerp.modules.registry
from openerp.osv import osv
from openerp_sxw2rml import sxw2rml
from StringIO import StringIO
from openerp import pooler
from openerp import addons
class report_xml(osv.osv):
_inherit = 'ir.actions.report.xml'
def sxwtorml(self, cr, uid, file_sxw, file_type):
'''
The use of this function is to get rml file from sxw file.
'''
sxwval = StringIO(base64.decodestring(file_sxw))
if file_type=='sxw':
fp = open(addons.get_module_resource('base_report_designer','openerp_sxw2rml', 'normalized_oo2rml.xsl'),'rb')
if file_type=='odt':
fp = open(addons.get_module_resource('base_report_designer','openerp_sxw2rml', 'normalized_odt2rml.xsl'),'rb')
return {'report_rml_content': str(sxw2rml(sxwval, xsl=fp.read()))}
def upload_report(self, cr, uid, report_id, file_sxw, file_type, context=None):
'''
Untested function
'''
pool = pooler.get_pool(cr.dbname)
sxwval = StringIO(base64.decodestring(file_sxw))
if file_type=='sxw':
fp = open(addons.get_module_resource('base_report_designer','openerp_sxw2rml', 'normalized_oo2rml.xsl'),'rb')
if file_type=='odt':
fp = open(addons.get_module_resource('base_report_designer','openerp_sxw2rml', 'normalized_odt2rml.xsl'),'rb')
report = pool.get('ir.actions.report.xml').write(cr, uid, [report_id], {
'report_sxw_content': base64.decodestring(file_sxw),
'report_rml_content': str(sxw2rml(sxwval, xsl=fp.read())),
})
# FIXME: this should be moved to an override of the ir.actions.report_xml.create() method
cr.commit()
pool.get('ir.actions.report.xml').register_all(cr)
openerp.modules.registry.RegistryManager.signal_registry_change(cr.dbname)
return True
def report_get(self, cr, uid, report_id, context=None):
if context is None:
context = {}
# skip osv.fields.sanitize_binary_value() because we want the raw bytes in all cases
context.update(bin_raw=True)
report = self.browse(cr, uid, report_id, context=context)
sxw_data = report.report_sxw_content
rml_data = report.report_rml_content
if isinstance(sxw_data, unicode):
sxw_data = sxw_data.encode("iso-8859-1", "replace")
if isinstance(rml_data, unicode):
rml_data = rml_data.encode("iso-8859-1", "replace")
return {
'file_type' : report.report_type,
'report_sxw_content': sxw_data and base64.encodestring(sxw_data) or False,
'report_rml_content': rml_data and base64.encodestring(rml_data) or False
}
report_xml()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
loveyoupeng/rt | modules/web/src/main/native/Tools/Scripts/webkitpy/common/config/committervalidator.py | 7 | 5266 | # Copyright (c) 2009 Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
# Copyright (c) 2010 Research In Motion Limited. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.common.config import committers, urls
class CommitterValidator(object):
def __init__(self, host):
self.host = host
def _contributors_json_path(self):
# contributors.json resides in the same directory as committers.py
dirname = self.host.filesystem.dirname(self.host.filesystem.path_to_module(committers.__name__))
path = self.host.filesystem.join(dirname, 'contributors.json')
return self.host.filesystem.relpath(path, self.host.scm().checkout_root)
def _flag_permission_rejection_message(self, setter_email, flag_name):
# This could be queried from the tool.
queue_name = "commit-queue"
committers_list = self._contributors_json_path()
message = "%s does not have %s permissions according to %s." % (
setter_email,
flag_name,
urls.view_source_url(committers_list))
message += "\n\n- If you do not have %s rights please read %s for instructions on how to use bugzilla flags." % (
flag_name, urls.contribution_guidelines)
message += "\n\n- If you have %s rights please correct the error in %s by adding yourself to the file (no review needed). " % (
flag_name, committers_list)
message += "The %s restarts itself every 2 hours. After restart the %s will correctly respect your %s rights." % (
queue_name, queue_name, flag_name)
return message
def _validate_setter_email(self, patch, result_key, rejection_function):
committer = getattr(patch, result_key)()
# If the flag is set, and we don't recognize the setter, reject the flag!
setter_email = patch._attachment_dictionary.get("%s_email" % result_key)
if setter_email and not committer:
rejection_function(patch.id(), self._flag_permission_rejection_message(setter_email, result_key))
return False
return True
def _reject_patch_if_flags_are_invalid(self, patch):
return (self._validate_setter_email(patch, "reviewer", self.reject_patch_from_review_queue)
and self._validate_setter_email(patch, "committer", self.reject_patch_from_commit_queue))
def patches_after_rejecting_invalid_commiters_and_reviewers(self, patches):
return [patch for patch in patches if self._reject_patch_if_flags_are_invalid(patch)]
def reject_patch_from_commit_queue(self,
attachment_id,
additional_comment_text=None):
comment_text = "Rejecting attachment %s from commit-queue." % attachment_id
if additional_comment_text:
comment_text += "\n\n%s" % additional_comment_text
self.host.bugs.set_flag_on_attachment(attachment_id,
"commit-queue",
"-",
comment_text)
def reject_patch_from_review_queue(self,
attachment_id,
additional_comment_text=None):
comment_text = "Rejecting attachment %s from review queue." % attachment_id
if additional_comment_text:
comment_text += "\n\n%s" % additional_comment_text
self.host.bugs.set_flag_on_attachment(attachment_id,
'review',
'-',
comment_text)
| gpl-2.0 |
Absimpl/Abstream | kivytest_version2_10_ubuntu/kivy/core/text/markup.py | 16 | 31300 | '''
Text Markup
===========
.. versionadded:: 1.1.0
We provide a simple text-markup for inline text styling. The syntax look the
same as the `BBCode <http://en.wikipedia.org/wiki/BBCode>`_.
A tag is defined as ``[tag]``, and might have a closed tag associated:
``[/tag]``. Example of a markup text::
[b]Hello [color=ff0000]world[/b][/color]
The following tags are availables:
``[b][/b]``
Activate bold text
``[i][/i]``
Activate italic text
``[font=<str>][/font]``
Change the font
``[size=<integer>][/size]``
Change the font size
``[color=#<color>][/color]``
Change the text color
``[ref=<str>][/ref]``
Add an interactive zone. The reference + all the word box inside the
reference will be available in :attr:`MarkupLabel.refs`
``[anchor=<str>]``
Put an anchor in the text. You can get the position of your anchor within
the text with :attr:`MarkupLabel.anchors`
``[sub][/sub]``
Display the text at a subscript position relative to the text before it.
``[sup][/sup]``
Display the text at a superscript position relative to the text before it.
If you need to escape the markup from the current text, use
:func:`kivy.utils.escape_markup`.
'''
__all__ = ('MarkupLabel', )
import re
from kivy.properties import dpi2px
from kivy.parser import parse_color
from kivy.logger import Logger
from kivy.core.text import Label, LabelBase
from kivy.core.text.text_layout import layout_text, LayoutWord, LayoutLine
from copy import copy
from math import ceil
from functools import partial
# We need to do this trick when documentation is generated
MarkupLabelBase = Label
if Label is None:
MarkupLabelBase = LabelBase
class MarkupLabel(MarkupLabelBase):
'''Markup text label.
See module documentation for more informations.
'''
def __init__(self, *largs, **kwargs):
self._style_stack = {}
self._refs = {}
self._anchors = {}
super(MarkupLabel, self).__init__(*largs, **kwargs)
self._internal_size = 0, 0
self._cached_lines = []
@property
def refs(self):
'''Get the bounding box of all the ``[ref=...]``::
{ 'refA': ((x1, y1, x2, y2), (x1, y1, x2, y2)), ... }
'''
return self._refs
@property
def anchors(self):
'''Get the position of all the ``[anchor=...]``::
{ 'anchorA': (x, y), 'anchorB': (x, y), ... }
'''
return self._anchors
@property
def markup(self):
'''Return the text with all the markup splitted::
>>> MarkupLabel('[b]Hello world[/b]').markup
>>> ('[b]', 'Hello world', '[/b]')
'''
s = re.split('(\[.*?\])', self.label)
s = [x for x in s if x != '']
return s
def _push_style(self, k):
if not k in self._style_stack:
self._style_stack[k] = []
self._style_stack[k].append(self.options[k])
def _pop_style(self, k):
if k not in self._style_stack or len(self._style_stack[k]) == 0:
Logger.warning('Label: pop style stack without push')
return
v = self._style_stack[k].pop()
self.options[k] = v
def render(self, real=False):
options = copy(self.options)
if not real:
ret = self._pre_render()
else:
ret = self._real_render()
self.options = options
return ret
def _pre_render(self):
# split markup, words, and lines
# result: list of word with position and width/height
# during the first pass, we don't care about h/valign
self._cached_lines = lines = []
self._refs = {}
self._anchors = {}
clipped = False
w = h = 0
uw, uh = self.text_size
spush = self._push_style
spop = self._pop_style
opts = options = self.options
options['_ref'] = None
options['_anchor'] = None
options['script'] = 'normal'
shorten = options['shorten']
# if shorten, then don't split lines to fit uw, because it will be
# flattened later when shortening and broken up lines if broken
# mid-word will have space mid-word when lines are joined
uw_temp = None if shorten else uw
xpad = options['padding_x']
uhh = (None if uh is not None and options['valign'][-1] != 'p' or
options['shorten'] else uh)
options['strip'] = options['strip'] or options['halign'][-1] == 'y'
for item in self.markup:
if item == '[b]':
spush('bold')
options['bold'] = True
self.resolve_font_name()
elif item == '[/b]':
spop('bold')
self.resolve_font_name()
elif item == '[i]':
spush('italic')
options['italic'] = True
self.resolve_font_name()
elif item == '[/i]':
spop('italic')
self.resolve_font_name()
elif item[:6] == '[size=':
item = item[6:-1]
try:
if item[-2:] in ('px', 'pt', 'in', 'cm', 'mm', 'dp', 'sp'):
size = dpi2px(item[:-2], item[-2:])
else:
size = int(item)
except ValueError:
raise
size = options['font_size']
spush('font_size')
options['font_size'] = size
elif item == '[/size]':
spop('font_size')
elif item[:7] == '[color=':
color = parse_color(item[7:-1])
spush('color')
options['color'] = color
elif item == '[/color]':
spop('color')
elif item[:6] == '[font=':
fontname = item[6:-1]
spush('font_name')
options['font_name'] = fontname
self.resolve_font_name()
elif item == '[/font]':
spop('font_name')
self.resolve_font_name()
elif item[:5] == '[sub]':
spush('font_size')
spush('script')
options['font_size'] = options['font_size'] * .5
options['script'] = 'subscript'
elif item == '[/sub]':
spop('font_size')
spop('script')
elif item[:5] == '[sup]':
spush('font_size')
spush('script')
options['font_size'] = options['font_size'] * .5
options['script'] = 'superscript'
elif item == '[/sup]':
spop('font_size')
spop('script')
elif item[:5] == '[ref=':
ref = item[5:-1]
spush('_ref')
options['_ref'] = ref
elif item == '[/ref]':
spop('_ref')
elif not clipped and item[:8] == '[anchor=':
options['_anchor'] = item[8:-1]
elif not clipped:
item = item.replace('&bl;', '[').replace(
'&br;', ']').replace('&', '&')
opts = copy(options)
extents = self.get_cached_extents()
opts['space_width'] = extents(' ')[0]
w, h, clipped = layout_text(item, lines, (w, h),
(uw_temp, uhh), opts, extents, True, False)
if len(lines): # remove any trailing spaces from the last line
old_opts = self.options
self.options = copy(opts)
w, h, clipped = layout_text('', lines, (w, h), (uw_temp, uhh),
self.options, self.get_cached_extents(), True, True)
self.options = old_opts
if shorten:
options['_ref'] = None # no refs for you!
options['_anchor'] = None
w, h, lines = self.shorten_post(lines, w, h)
self._cached_lines = lines
# when valign is not top, for markup we layout everything (text_size[1]
# is temporarily set to None) and after layout cut to size if too tall
elif uh != uhh and h > uh and len(lines) > 1:
if options['valign'][-1] == 'm': # bottom
i = 0
while i < len(lines) - 1 and h > uh:
h -= lines[i].h
i += 1
del lines[:i]
else: # middle
i = 0
top = int(h / 2. + uh / 2.) # remove extra top portion
while i < len(lines) - 1 and h > top:
h -= lines[i].h
i += 1
del lines[:i]
i = len(lines) - 1 # remove remaining bottom portion
while i and h > uh:
h -= lines[i].h
i -= 1
del lines[i + 1:]
# now justify the text
if options['halign'][-1] == 'y' and uw is not None:
# XXX: update refs to justified pos
# when justify, each line shouldv'e been stripped already
split = partial(re.split, re.compile('( +)'))
uww = uw - 2 * xpad
chr = type(self.text)
space = chr(' ')
empty = chr('')
for i in range(len(lines)):
line = lines[i]
words = line.words
# if there's nothing to justify, we're done
if (not line.w or int(uww - line.w) <= 0 or not len(words) or
line.is_last_line):
continue
done = False
parts = [None, ] * len(words) # contains words split by space
idxs = [None, ] * len(words) # indices of the space in parts
# break each word into spaces and add spaces until it's full
# do first round of split in case we don't need to split all
for w in range(len(words)):
word = words[w]
sw = word.options['space_width']
p = parts[w] = split(word.text)
idxs[w] = [v for v in range(len(p)) if
p[v].startswith(' ')]
# now we have the indices of the spaces in split list
for k in idxs[w]:
# try to add single space at each space
if line.w + sw > uww:
done = True
break
line.w += sw
word.lw += sw
p[k] += space
if done:
break
# there's not a single space in the line?
if not any(idxs):
continue
# now keep adding spaces to already split words until done
while not done:
for w in range(len(words)):
if not idxs[w]:
continue
word = words[w]
sw = word.options['space_width']
p = parts[w]
for k in idxs[w]:
# try to add single space at each space
if line.w + sw > uww:
done = True
break
line.w += sw
word.lw += sw
p[k] += space
if done:
break
# if not completely full, push last words to right edge
diff = int(uww - line.w)
if diff > 0:
# find the last word that had a space
for w in range(len(words) - 1, -1, -1):
if not idxs[w]:
continue
break
old_opts = self.options
self.options = word.options
word = words[w]
# split that word into left/right and push right till uww
l_text = empty.join(parts[w][:idxs[w][-1]])
r_text = empty.join(parts[w][idxs[w][-1]:])
left = LayoutWord(word.options,
self.get_extents(l_text)[0], word.lh, l_text)
right = LayoutWord(word.options,
self.get_extents(r_text)[0], word.lh, r_text)
left.lw = max(left.lw, word.lw + diff - right.lw)
self.options = old_opts
# now put words back together with right/left inserted
for k in range(len(words)):
if idxs[k]:
words[k].text = empty.join(parts[k])
words[w] = right
words.insert(w, left)
else:
for k in range(len(words)):
if idxs[k]:
words[k].text = empty.join(parts[k])
line.w = uww
w = max(w, uww)
self._internal_size = w, h
if uw:
w = uw
if uh:
h = uh
if h > 1 and w < 2:
w = 2
if w < 1:
w = 1
if h < 1:
h = 1
return int(w), int(h)
def _real_render(self):
lines = self._cached_lines
options = None
for line in lines:
if len(line.words): # get opts from first line, first word
options = line.words[0].options
break
if not options: # there was no text to render
self._render_begin()
data = self._render_end()
assert(data)
if data is not None and data.width > 1:
self.texture.blit_data(data)
return
old_opts = self.options
render_text = self._render_text
xpad, ypad = options['padding_x'], options['padding_y']
x, y = xpad, ypad # pos in the texture
iw, ih = self._internal_size # the real size of text, not texture
w, h = self.size
halign = options['halign']
valign = options['valign']
refs = self._refs
anchors = self._anchors
self._render_begin()
if valign == 'bottom':
y = h - ih + ypad
elif valign == 'middle':
y = int((h - ih) / 2 + ypad)
for layout_line in lines: # for plain label each line has only one str
lw, lh = layout_line.w, layout_line.h
x = xpad
if halign[0] == 'c': # center
x = int((w - lw) / 2.)
elif halign[0] == 'r': # right
x = max(0, int(w - lw - xpad))
layout_line.x = x
layout_line.y = y
psp = pph = 0
for word in layout_line.words:
options = self.options = word.options
# the word height is not scaled by line_height, only lh was
wh = options['line_height'] * word.lh
# calculate sub/super script pos
if options['script'] == 'superscript':
script_pos = max(0, psp if psp else self.get_descent())
psp = script_pos
pph = wh
elif options['script'] == 'subscript':
script_pos = min(lh - wh, ((psp + pph) - wh)
if pph else (lh - wh))
pph = wh
psp = script_pos
else:
script_pos = (lh - wh) / 1.25
psp = pph = 0
if len(word.text):
render_text(word.text, x, y + script_pos)
# should we record refs ?
ref = options['_ref']
if ref is not None:
if not ref in refs:
refs[ref] = []
refs[ref].append((x, y, x + word.lw, y + wh))
# Should we record anchors?
anchor = options['_anchor']
if anchor is not None:
if not anchor in anchors:
anchors[anchor] = (x, y)
x += word.lw
y += lh
self.options = old_opts
# get data from provider
data = self._render_end()
assert(data)
# If the text is 1px width, usually, the data is black.
# Don't blit that kind of data, otherwise, you have a little black bar.
if data is not None and data.width > 1:
self.texture.blit_data(data)
def shorten_post(self, lines, w, h, margin=2):
''' Shortens the text to a single line according to the label options.
This function operates on a text that has already been laid out because
for markup, parts of text can have different size and options.
If :attr:`text_size` [0] is None, the lines are returned unchanged.
Otherwise, the lines are converted to a single line fitting within the
constrained width, :attr:`text_size` [0].
:params:
`lines`: list of `LayoutLine` instances describing the text.
`w`: int, the width of the text in lines, including padding.
`h`: int, the height of the text in lines, including padding.
`margin` int, the additional space left on the sides. This is in
addition to :attr:`padding_x`.
:returns:
3-tuple of (xw, h, lines), where w, and h is similar to the input
and contains the resulting width / height of the text, including
padding. lines, is a list containing a single `LayoutLine`, which
contains the words for the line.
'''
def n(line, c):
''' A function similar to text.find, except it's an iterator that
returns successive occurrences of string c in list line. line is
not a string, but a list of LayoutWord instances that we walk
from left to right returning the indices of c in the words as we
encounter them. Note that the options can be different among the
words.
:returns:
3-tuple: the index of the word in line, the index of the
occurrence in word, and the extents (width) of the combined
words until this occurrence, not including the occurrence char.
If no more are found it returns (-1, -1, total_w) where total_w
is the full width of all the words.
'''
total_w = 0
for w in range(len(line)):
word = line[w]
if not word.lw:
continue
f = partial(word.text.find, c)
i = f()
while i != -1:
self.options = word.options
yield w, i, total_w + self.get_extents(word.text[:i])[0]
i = f(i + 1)
self.options = word.options
total_w += self.get_extents(word.text)[0]
yield -1, -1, total_w # this should never be reached, really
def p(line, c):
''' Similar to the `n` function, except it returns occurrences of c
from right to left in the list, line, similar to rfind.
'''
total_w = 0
offset = 0 if len(c) else 1
for w in range(len(line) - 1, -1, -1):
word = line[w]
if not word.lw:
continue
f = partial(word.text.rfind, c)
i = f()
while i != -1:
self.options = word.options
yield (w, i, total_w +
self.get_extents(word.text[i + 1:])[0])
if i:
i = f(0, i - offset)
else:
if not c:
self.options = word.options
yield (w, -1, total_w +
self.get_extents(word.text)[0])
break
self.options = word.options
total_w += self.get_extents(word.text)[0]
yield -1, -1, total_w # this should never be reached, really
def n_restricted(line, uw, c):
''' Similar to the function `n`, except it only returns the first
occurrence and it's not an iterator. Furthermore, if the first
occurrence doesn't fit within width uw, it returns the index of
whatever amount of text will still fit in uw.
:returns:
similar to the function `n`, except it's a 4-tuple, with the
last element a boolean, indicating if we had to clip the text
to fit in uw (True) or if the whole text until the first
occurrence fitted in uw (False).
'''
total_w = 0
if not len(line):
return 0, 0, 0
for w in range(len(line)):
word = line[w]
f = partial(word.text.find, c)
self.options = word.options
extents = self.get_cached_extents()
i = f()
if i != -1:
ww = extents(word.text[:i])[0]
if i != -1 and total_w + ww <= uw: # found and it fits
return w, i, total_w + ww, False
elif i == -1:
ww = extents(word.text)[0]
if total_w + ww <= uw: # wasn't found and all fits
total_w += ww
continue
i = len(word.text)
# now just find whatever amount of the word does fit
e = 0
while e != i and total_w + extents(word.text[:e])[0] <= uw:
e += 1
e = max(0, e - 1)
return w, e, total_w + extents(word.text[:e])[0], True
return -1, -1, total_w, False
def p_restricted(line, uw, c):
''' Similar to `n_restricted`, except it returns the first
occurrence starting from the right, like `p`.
'''
total_w = 0
if not len(line):
return 0, 0, 0
for w in range(len(line) - 1, -1, -1):
word = line[w]
f = partial(word.text.rfind, c)
self.options = word.options
extents = self.get_cached_extents()
i = f()
if i != -1:
ww = extents(word.text[i + 1:])[0]
if i != -1 and total_w + ww <= uw: # found and it fits
return w, i, total_w + ww, False
elif i == -1:
ww = extents(word.text)[0]
if total_w + ww <= uw: # wasn't found and all fits
total_w += ww
continue
# now just find whatever amount of the word does fit
s = len(word.text) - 1
while s >= 0 and total_w + extents(word.text[s:])[0] <= uw:
s -= 1
return w, s, total_w + extents(word.text[s + 1:])[0], True
return -1, -1, total_w, False
textwidth = self.get_cached_extents()
uw = self.text_size[0]
if uw is None:
return w, h, lines
old_opts = copy(self.options)
uw = max(0, int(uw - old_opts['padding_x'] * 2 - margin))
chr = type(self.text)
ssize = textwidth(' ')
c = old_opts['split_str']
line_height = old_opts['line_height']
xpad, ypad = old_opts['padding_x'], old_opts['padding_y']
dir = old_opts['shorten_from'][0]
# flatten lines into single line
line = []
last_w = 0
for l in range(len(lines)):
# concatenate (non-empty) inside lines with a space
this_line = lines[l]
if last_w and this_line.w and not this_line.line_wrap:
line.append(LayoutWord(old_opts, ssize[0], ssize[1], chr(' ')))
last_w = this_line.w or last_w
for word in this_line.words:
if word.lw:
line.append(word)
# if that fits, just return the flattened line
lw = sum([word.lw for word in line])
if lw <= uw:
lh = max([word.lh for word in line] + [0]) * line_height
return lw + 2 * xpad, lh + 2 * ypad, [LayoutLine(0, 0,
lw, lh, 1, 0, line)]
# find the size of ellipsis that'll fit
elps_s = textwidth('...')
if elps_s[0] > uw: # even ellipsis didn't fit...
s = textwidth('..')
if s[0] <= uw:
return (s[0] + 2 * xpad, s[1] * line_height + 2 * ypad,
[LayoutLine(0, 0, s[0], s[1], 1, 0, [LayoutWord(old_opts,
s[0], s[1], '..')])])
else:
s = textwidth('.')
return (s[0] + 2 * xpad, s[1] * line_height + 2 * ypad,
[LayoutLine(0, 0, s[0], s[1], 1, 0, [LayoutWord(old_opts,
s[0], s[1], '.')])])
elps = LayoutWord(old_opts, elps_s[0], elps_s[1], '...')
uw -= elps_s[0]
# now find the first left and right words that fit
w1, e1, l1, clipped1 = n_restricted(line, uw, c)
w2, s2, l2, clipped2 = p_restricted(line, uw, c)
if dir != 'l': # center or right
line1 = None
if clipped1 or clipped2 or l1 + l2 > uw:
# if either was clipped or both don't fit, just take first
if len(c):
self.options = old_opts
old_opts['split_str'] = ''
res = self.shorten_post(lines, w, h, margin)
self.options['split_str'] = c
return res
line1 = line[:w1]
last_word = line[w1]
last_text = last_word.text[:e1]
self.options = last_word.options
s = self.get_extents(last_text)
line1.append(LayoutWord(last_word.options, s[0], s[1],
last_text))
elif (w1, e1) == (-1, -1): # this shouldn't occur
line1 = line
if line1:
line1.append(elps)
lw = sum([word.lw for word in line1])
lh = max([word.lh for word in line1]) * line_height
self.options = old_opts
return lw + 2 * xpad, lh + 2 * ypad, [LayoutLine(0, 0,
lw, lh, 1, 0, line1)]
# now we know that both the first and last word fit, and that
# there's at least one instances of the split_str in the line
if (w1, e1) != (w2, s2): # more than one split_str
if dir == 'r':
f = n(line, c) # iterator
assert next(f)[:-1] == (w1, e1) # first word should match
ww1, ee1, l1 = next(f)
while l2 + l1 <= uw:
w1, e1 = ww1, ee1
ww1, ee1, l1 = next(f)
if (w1, e1) == (w2, s2):
break
else: # center
f = n(line, c) # iterator
f_inv = p(line, c) # iterator
assert next(f)[:-1] == (w1, e1)
assert next(f_inv)[:-1] == (w2, s2)
while True:
if l1 <= l2:
ww1, ee1, l1 = next(f) # hypothesize that next fit
if l2 + l1 > uw:
break
w1, e1 = ww1, ee1
if (w1, e1) == (w2, s2):
break
else:
ww2, ss2, l2 = next(f_inv)
if l2 + l1 > uw:
break
w2, s2 = ww2, ss2
if (w1, e1) == (w2, s2):
break
else: # left
line1 = [elps]
if clipped1 or clipped2 or l1 + l2 > uw:
# if either was clipped or both don't fit, just take last
if len(c):
self.options = old_opts
old_opts['split_str'] = ''
res = self.shorten_post(lines, w, h, margin)
self.options['split_str'] = c
return res
first_word = line[w2]
first_text = first_word.text[s2 + 1:]
self.options = first_word.options
s = self.get_extents(first_text)
line1.append(LayoutWord(first_word.options, s[0], s[1],
first_text))
line1.extend(line[w2 + 1:])
elif (w1, e1) == (-1, -1): # this shouldn't occur
line1 = line
if len(line1) != 1:
lw = sum([word.lw for word in line1])
lh = max([word.lh for word in line1]) * line_height
self.options = old_opts
return lw + 2 * xpad, lh + 2 * ypad, [LayoutLine(0, 0,
lw, lh, 1, 0, line1)]
# now we know that both the first and last word fit, and that
# there's at least one instances of the split_str in the line
if (w1, e1) != (w2, s2): # more than one split_str
f_inv = p(line, c) # iterator
assert next(f_inv)[:-1] == (w2, s2) # last word should match
ww2, ss2, l2 = next(f_inv)
while l2 + l1 <= uw:
w2, s2 = ww2, ss2
ww2, ss2, l2 = next(f_inv)
if (w1, e1) == (w2, s2):
break
# now add back the left half
line1 = line[:w1]
last_word = line[w1]
last_text = last_word.text[:e1]
self.options = last_word.options
s = self.get_extents(last_text)
if len(last_text):
line1.append(LayoutWord(last_word.options, s[0], s[1], last_text))
line1.append(elps)
# now add back the right half
first_word = line[w2]
first_text = first_word.text[s2 + 1:]
self.options = first_word.options
s = self.get_extents(first_text)
if len(first_text):
line1.append(LayoutWord(first_word.options, s[0], s[1],
first_text))
line1.extend(line[w2 + 1:])
lw = sum([word.lw for word in line1])
lh = max([word.lh for word in line1]) * line_height
self.options = old_opts
return lw + 2 * xpad, lh + 2 * ypad, [LayoutLine(0, 0,
lw, lh, 1, 0, line1)]
| mit |
salsita/flask-serverinfo | flask_serverinfo.py | 1 | 3627 | """Flask server info view for inspecting server app and user requests."""
__all__ = 'setup view dumps logging_info logger_info server_info JSONEncoder'.split()
__version__ = '0.1.2'
import flask
from flask import json, Flask, Request, Response
from logging import Logger, getLogger, root
from werkzeug.local import LocalProxy
from werkzeug.routing import Map
from werkzeug.datastructures import MultiDict, Headers
class JSONEncoder(json.JSONEncoder):
base_types = (basestring, int, float, bool, type(None))
iter_types = (dict, tuple, list, set)
inspect_types = (LocalProxy, Flask, Map, Request, Response)
def default(self, o):
if isinstance(o, self.inspect_types):
return dict((k, getattr(o, k)) for k in dir(o)
if isinstance(k, basestring) and not k.startswith('__') and not callable(getattr(o, k)))
elif isinstance(o, MultiDict):
return o.lists()
elif isinstance(o, Headers):
return o.items()
elif isinstance(o, Logger):
return logger_info(o)
try:
return super(JSONEncoder, self).default(o)
except TypeError:
return '{} {!r}'.format(type(o), o)
def iterencode(self, o, _one_shot=False):
o = self.replace_circular_refs(o)
try:
for chunk in super(JSONEncoder, self).iterencode(o, _one_shot):
yield chunk
except ValueError as error:
yield '"{}: {} {!r}"'.format(error, type(o), o)
def replace_circular_refs(self, o, path='', cache=None):
if cache is None:
cache = {}
if not isinstance(o, self.base_types):
if isinstance(o, dict):
o = dict((k, o[k]) for k in o if isinstance(k, basestring))
elif isinstance(o, self.iter_types):
o = list(o)
else:
return self.replace_circular_refs(self.default(o), path, cache)
for key, value in (o.iteritems() if isinstance(o, dict) else enumerate(o)):
if not isinstance(value, self.base_types):
if id(value) in cache:
o[key] = '<$ref: {}>'.format(cache[id(value)])
else:
cache[id(value)] = '{}{}'.format(path, key)
o[key] = self.replace_circular_refs(value, '{}{}.'.format(path, key), cache)
return o
DUMP_OPTIONS = dict(
indent = 2,
sort_keys = True,
cls = JSONEncoder,
)
def dumps(data, **options):
options = dict(DUMP_OPTIONS, **options)
return json.dumps(data, **options)
def logging_info(*additional_logger_names):
return [logger_info(getLogger(name)) for name in
['root'] + list(additional_logger_names) + sorted(root.manager.loggerDict.keys())]
def logger_info(l):
p = l.parent or l
return '<Logger> [%02d/%02d] %01d%1s %s' % (
l.level, l.getEffectiveLevel(), len(l.handlers),
'+' if l.propagate else '',
l.name if p is l or l.name.startswith(p.name + '.') else p.name + ' :: ' + l.name,
)
def server_info(app=None, *additional_logger_names):
return dict(
app = app or flask.current_app,
logging = logging_info(*additional_logger_names),
)
def view():
return Response(dumps(dict(
request = flask.request,
response = Response(mimetype = 'application/json'),
server = server_info(),
)),
mimetype = 'application/json',
)
def setup(app, uri, endpoint='serverinfo_view', **options):
app.route(uri, endpoint=endpoint, **options)(view)
| mit |
ArcherCraftStore/ArcherVMPeridot | Python/Lib/pprint.py | 74 | 14861 | # Author: Fred L. Drake, Jr.
# fdrake@acm.org
#
# This is a simple little module I wrote to make life easier. I didn't
# see anything quite like it in the library, though I may have overlooked
# something. I wrote this when I was trying to read some heavily nested
# tuples with fairly non-descriptive content. This is modeled very much
# after Lisp/Scheme - style pretty-printing of lists. If you find it
# useful, thank small children who sleep at night.
"""Support to pretty-print lists, tuples, & dictionaries recursively.
Very simple, but useful, especially in debugging data structures.
Classes
-------
PrettyPrinter()
Handle pretty-printing operations onto a stream using a configured
set of formatting parameters.
Functions
---------
pformat()
Format a Python object into a pretty-printed representation.
pprint()
Pretty-print a Python object to a stream [default is sys.stdout].
saferepr()
Generate a 'standard' repr()-like value, but protect against recursive
data structures.
"""
import re
import sys as _sys
from collections import OrderedDict as _OrderedDict
from io import StringIO as _StringIO
__all__ = ["pprint","pformat","isreadable","isrecursive","saferepr",
"PrettyPrinter"]
def pprint(object, stream=None, indent=1, width=80, depth=None, *,
compact=False):
"""Pretty-print a Python object to a stream [default is sys.stdout]."""
printer = PrettyPrinter(
stream=stream, indent=indent, width=width, depth=depth,
compact=compact)
printer.pprint(object)
def pformat(object, indent=1, width=80, depth=None, *, compact=False):
"""Format a Python object into a pretty-printed representation."""
return PrettyPrinter(indent=indent, width=width, depth=depth,
compact=compact).pformat(object)
def saferepr(object):
"""Version of repr() which can handle recursive data structures."""
return _safe_repr(object, {}, None, 0)[0]
def isreadable(object):
"""Determine if saferepr(object) is readable by eval()."""
return _safe_repr(object, {}, None, 0)[1]
def isrecursive(object):
"""Determine if object requires a recursive representation."""
return _safe_repr(object, {}, None, 0)[2]
class _safe_key:
"""Helper function for key functions when sorting unorderable objects.
The wrapped-object will fallback to an Py2.x style comparison for
unorderable types (sorting first comparing the type name and then by
the obj ids). Does not work recursively, so dict.items() must have
_safe_key applied to both the key and the value.
"""
__slots__ = ['obj']
def __init__(self, obj):
self.obj = obj
def __lt__(self, other):
try:
rv = self.obj.__lt__(other.obj)
except TypeError:
rv = NotImplemented
if rv is NotImplemented:
rv = (str(type(self.obj)), id(self.obj)) < \
(str(type(other.obj)), id(other.obj))
return rv
def _safe_tuple(t):
"Helper function for comparing 2-tuples"
return _safe_key(t[0]), _safe_key(t[1])
class PrettyPrinter:
def __init__(self, indent=1, width=80, depth=None, stream=None, *,
compact=False):
"""Handle pretty printing operations onto a stream using a set of
configured parameters.
indent
Number of spaces to indent for each level of nesting.
width
Attempted maximum number of columns in the output.
depth
The maximum depth to print out nested structures.
stream
The desired output stream. If omitted (or false), the standard
output stream available at construction will be used.
compact
If true, several items will be combined in one line.
"""
indent = int(indent)
width = int(width)
assert indent >= 0, "indent must be >= 0"
assert depth is None or depth > 0, "depth must be > 0"
assert width, "width must be != 0"
self._depth = depth
self._indent_per_level = indent
self._width = width
if stream is not None:
self._stream = stream
else:
self._stream = _sys.stdout
self._compact = bool(compact)
def pprint(self, object):
self._format(object, self._stream, 0, 0, {}, 0)
self._stream.write("\n")
def pformat(self, object):
sio = _StringIO()
self._format(object, sio, 0, 0, {}, 0)
return sio.getvalue()
def isrecursive(self, object):
return self.format(object, {}, 0, 0)[2]
def isreadable(self, object):
s, readable, recursive = self.format(object, {}, 0, 0)
return readable and not recursive
def _format(self, object, stream, indent, allowance, context, level):
level = level + 1
objid = id(object)
if objid in context:
stream.write(_recursion(object))
self._recursive = True
self._readable = False
return
rep = self._repr(object, context, level - 1)
typ = type(object)
max_width = self._width - 1 - indent - allowance
sepLines = len(rep) > max_width
write = stream.write
if sepLines:
r = getattr(typ, "__repr__", None)
if issubclass(typ, dict):
write('{')
if self._indent_per_level > 1:
write((self._indent_per_level - 1) * ' ')
length = len(object)
if length:
context[objid] = 1
indent = indent + self._indent_per_level
if issubclass(typ, _OrderedDict):
items = list(object.items())
else:
items = sorted(object.items(), key=_safe_tuple)
key, ent = items[0]
rep = self._repr(key, context, level)
write(rep)
write(': ')
self._format(ent, stream, indent + len(rep) + 2,
allowance + 1, context, level)
if length > 1:
for key, ent in items[1:]:
rep = self._repr(key, context, level)
write(',\n%s%s: ' % (' '*indent, rep))
self._format(ent, stream, indent + len(rep) + 2,
allowance + 1, context, level)
indent = indent - self._indent_per_level
del context[objid]
write('}')
return
if ((issubclass(typ, list) and r is list.__repr__) or
(issubclass(typ, tuple) and r is tuple.__repr__) or
(issubclass(typ, set) and r is set.__repr__) or
(issubclass(typ, frozenset) and r is frozenset.__repr__)
):
length = len(object)
if issubclass(typ, list):
write('[')
endchar = ']'
elif issubclass(typ, tuple):
write('(')
endchar = ')'
else:
if not length:
write(rep)
return
if typ is set:
write('{')
endchar = '}'
else:
write(typ.__name__)
write('({')
endchar = '})'
indent += len(typ.__name__) + 1
object = sorted(object, key=_safe_key)
if self._indent_per_level > 1:
write((self._indent_per_level - 1) * ' ')
if length:
context[objid] = 1
self._format_items(object, stream,
indent + self._indent_per_level,
allowance + 1, context, level)
del context[objid]
if issubclass(typ, tuple) and length == 1:
write(',')
write(endchar)
return
if issubclass(typ, str) and len(object) > 0 and r is str.__repr__:
def _str_parts(s):
"""
Return a list of string literals comprising the repr()
of the given string using literal concatenation.
"""
lines = s.splitlines(True)
for i, line in enumerate(lines):
rep = repr(line)
if len(rep) <= max_width:
yield rep
else:
# A list of alternating (non-space, space) strings
parts = re.split(r'(\s+)', line) + ['']
current = ''
for i in range(0, len(parts), 2):
part = parts[i] + parts[i+1]
candidate = current + part
if len(repr(candidate)) > max_width:
if current:
yield repr(current)
current = part
else:
current = candidate
if current:
yield repr(current)
for i, rep in enumerate(_str_parts(object)):
if i > 0:
write('\n' + ' '*indent)
write(rep)
return
write(rep)
def _format_items(self, items, stream, indent, allowance, context, level):
write = stream.write
delimnl = ',\n' + ' ' * indent
delim = ''
width = max_width = self._width - indent - allowance + 2
for ent in items:
if self._compact:
rep = self._repr(ent, context, level)
w = len(rep) + 2
if width < w:
width = max_width
if delim:
delim = delimnl
if width >= w:
width -= w
write(delim)
delim = ', '
write(rep)
continue
write(delim)
delim = delimnl
self._format(ent, stream, indent, allowance, context, level)
def _repr(self, object, context, level):
repr, readable, recursive = self.format(object, context.copy(),
self._depth, level)
if not readable:
self._readable = False
if recursive:
self._recursive = True
return repr
def format(self, object, context, maxlevels, level):
"""Format object for a specific context, returning a string
and flags indicating whether the representation is 'readable'
and whether the object represents a recursive construct.
"""
return _safe_repr(object, context, maxlevels, level)
# Return triple (repr_string, isreadable, isrecursive).
def _safe_repr(object, context, maxlevels, level):
typ = type(object)
if typ is str:
if 'locale' not in _sys.modules:
return repr(object), True, False
if "'" in object and '"' not in object:
closure = '"'
quotes = {'"': '\\"'}
else:
closure = "'"
quotes = {"'": "\\'"}
qget = quotes.get
sio = _StringIO()
write = sio.write
for char in object:
if char.isalpha():
write(char)
else:
write(qget(char, repr(char)[1:-1]))
return ("%s%s%s" % (closure, sio.getvalue(), closure)), True, False
r = getattr(typ, "__repr__", None)
if issubclass(typ, dict) and r is dict.__repr__:
if not object:
return "{}", True, False
objid = id(object)
if maxlevels and level >= maxlevels:
return "{...}", False, objid in context
if objid in context:
return _recursion(object), False, True
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
saferepr = _safe_repr
items = sorted(object.items(), key=_safe_tuple)
for k, v in items:
krepr, kreadable, krecur = saferepr(k, context, maxlevels, level)
vrepr, vreadable, vrecur = saferepr(v, context, maxlevels, level)
append("%s: %s" % (krepr, vrepr))
readable = readable and kreadable and vreadable
if krecur or vrecur:
recursive = True
del context[objid]
return "{%s}" % ", ".join(components), readable, recursive
if (issubclass(typ, list) and r is list.__repr__) or \
(issubclass(typ, tuple) and r is tuple.__repr__):
if issubclass(typ, list):
if not object:
return "[]", True, False
format = "[%s]"
elif len(object) == 1:
format = "(%s,)"
else:
if not object:
return "()", True, False
format = "(%s)"
objid = id(object)
if maxlevels and level >= maxlevels:
return format % "...", False, objid in context
if objid in context:
return _recursion(object), False, True
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
for o in object:
orepr, oreadable, orecur = _safe_repr(o, context, maxlevels, level)
append(orepr)
if not oreadable:
readable = False
if orecur:
recursive = True
del context[objid]
return format % ", ".join(components), readable, recursive
rep = repr(object)
return rep, (rep and not rep.startswith('<')), False
def _recursion(object):
return ("<Recursion on %s with id=%s>"
% (type(object).__name__, id(object)))
def _perfcheck(object=None):
import time
if object is None:
object = [("string", (1, 2), [3, 4], {5: 6, 7: 8})] * 100000
p = PrettyPrinter()
t1 = time.time()
_safe_repr(object, {}, None, 0)
t2 = time.time()
p.pformat(object)
t3 = time.time()
print("_safe_repr:", t2 - t1)
print("pformat:", t3 - t2)
if __name__ == "__main__":
_perfcheck()
| apache-2.0 |
malmiron/incubator-airflow | tests/operators/test_s3_to_redshift_operator.py | 8 | 2504 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import mock
import unittest
from boto3.session import Session
from airflow.operators.s3_to_redshift_operator import S3ToRedshiftTransfer
from airflow.utils.tests import assertEqualIgnoreMultipleSpaces
class TestS3ToRedshiftTransfer(unittest.TestCase):
@mock.patch("boto3.session.Session")
@mock.patch("airflow.hooks.postgres_hook.PostgresHook.run")
def test_execute(self, mock_run, mock_Session):
access_key = "aws_access_key_id"
secret_key = "aws_secret_access_key"
mock_Session.return_value = Session(access_key, secret_key)
schema = "schema"
table = "table"
s3_bucket = "bucket"
s3_key = "key"
copy_options = ""
t = S3ToRedshiftTransfer(
schema=schema,
table=table,
s3_bucket=s3_bucket,
s3_key=s3_key,
copy_options=copy_options,
redshift_conn_id="redshift_conn_id",
aws_conn_id="aws_conn_id",
task_id="task_id",
dag=None)
t.execute(None)
copy_query = """
COPY {schema}.{table}
FROM 's3://{s3_bucket}/{s3_key}/{table}'
with credentials
'aws_access_key_id={access_key};aws_secret_access_key={secret_key}'
{copy_options};
""".format(schema=schema,
table=table,
s3_bucket=s3_bucket,
s3_key=s3_key,
access_key=access_key,
secret_key=secret_key,
copy_options=copy_options)
mock_run.assert_called_once()
assertEqualIgnoreMultipleSpaces(self, mock_run.call_args[0][0], copy_query)
| apache-2.0 |
alienigenahk/openkore | src/scons-local-2.0.1/SCons/Tool/dvips.py | 61 | 3454 | """SCons.Tool.dvips
Tool-specific initialization for dvips.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/dvips.py 5134 2010/08/16 23:02:40 bdeegan"
import SCons.Action
import SCons.Builder
import SCons.Tool.dvipdf
import SCons.Util
def DviPsFunction(target = None, source= None, env=None):
result = SCons.Tool.dvipdf.DviPdfPsFunction(PSAction,target,source,env)
return result
def DviPsStrFunction(target = None, source= None, env=None):
"""A strfunction for dvipdf that returns the appropriate
command string for the no_exec options."""
if env.GetOption("no_exec"):
result = env.subst('$PSCOM',0,target,source)
else:
result = ''
return result
PSAction = None
DVIPSAction = None
PSBuilder = None
def generate(env):
"""Add Builders and construction variables for dvips to an Environment."""
global PSAction
if PSAction is None:
PSAction = SCons.Action.Action('$PSCOM', '$PSCOMSTR')
global DVIPSAction
if DVIPSAction is None:
DVIPSAction = SCons.Action.Action(DviPsFunction, strfunction = DviPsStrFunction)
global PSBuilder
if PSBuilder is None:
PSBuilder = SCons.Builder.Builder(action = PSAction,
prefix = '$PSPREFIX',
suffix = '$PSSUFFIX',
src_suffix = '.dvi',
src_builder = 'DVI',
single_source=True)
env['BUILDERS']['PostScript'] = PSBuilder
env['DVIPS'] = 'dvips'
env['DVIPSFLAGS'] = SCons.Util.CLVar('')
# I'm not quite sure I got the directories and filenames right for variant_dir
# We need to be in the correct directory for the sake of latex \includegraphics eps included files.
env['PSCOM'] = 'cd ${TARGET.dir} && $DVIPS $DVIPSFLAGS -o ${TARGET.file} ${SOURCE.file}'
env['PSPREFIX'] = ''
env['PSSUFFIX'] = '.ps'
def exists(env):
return env.Detect('dvips')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-2.0 |
yask123/django | django/core/serializers/xml_serializer.py | 184 | 15662 | """
XML serializer.
"""
from __future__ import unicode_literals
from collections import OrderedDict
from xml.dom import pulldom
from xml.sax import handler
from xml.sax.expatreader import ExpatParser as _ExpatParser
from django.apps import apps
from django.conf import settings
from django.core.serializers import base
from django.db import DEFAULT_DB_ALIAS, models
from django.utils.encoding import smart_text
from django.utils.xmlutils import (
SimplerXMLGenerator, UnserializableContentError,
)
class Serializer(base.Serializer):
"""
Serializes a QuerySet to XML.
"""
def indent(self, level):
if self.options.get('indent') is not None:
self.xml.ignorableWhitespace('\n' + ' ' * self.options.get('indent') * level)
def start_serialization(self):
"""
Start serialization -- open the XML document and the root element.
"""
self.xml = SimplerXMLGenerator(self.stream, self.options.get("encoding", settings.DEFAULT_CHARSET))
self.xml.startDocument()
self.xml.startElement("django-objects", {"version": "1.0"})
def end_serialization(self):
"""
End serialization -- end the document.
"""
self.indent(0)
self.xml.endElement("django-objects")
self.xml.endDocument()
def start_object(self, obj):
"""
Called as each object is handled.
"""
if not hasattr(obj, "_meta"):
raise base.SerializationError("Non-model object (%s) encountered during serialization" % type(obj))
self.indent(1)
model = obj._meta.proxy_for_model if obj._deferred else obj.__class__
attrs = OrderedDict([("model", smart_text(model._meta))])
if not self.use_natural_primary_keys or not hasattr(obj, 'natural_key'):
obj_pk = obj._get_pk_val()
if obj_pk is not None:
attrs['pk'] = smart_text(obj_pk)
self.xml.startElement("object", attrs)
def end_object(self, obj):
"""
Called after handling all fields for an object.
"""
self.indent(1)
self.xml.endElement("object")
def handle_field(self, obj, field):
"""
Called to handle each field on an object (except for ForeignKeys and
ManyToManyFields)
"""
self.indent(2)
self.xml.startElement("field", OrderedDict([
("name", field.name),
("type", field.get_internal_type()),
]))
# Get a "string version" of the object's data.
if getattr(obj, field.name) is not None:
try:
self.xml.characters(field.value_to_string(obj))
except UnserializableContentError:
raise ValueError("%s.%s (pk:%s) contains unserializable characters" % (
obj.__class__.__name__, field.name, obj._get_pk_val()))
else:
self.xml.addQuickElement("None")
self.xml.endElement("field")
def handle_fk_field(self, obj, field):
"""
Called to handle a ForeignKey (we need to treat them slightly
differently from regular fields).
"""
self._start_relational_field(field)
related_att = getattr(obj, field.get_attname())
if related_att is not None:
if self.use_natural_foreign_keys and hasattr(field.remote_field.model, 'natural_key'):
related = getattr(obj, field.name)
# If related object has a natural key, use it
related = related.natural_key()
# Iterable natural keys are rolled out as subelements
for key_value in related:
self.xml.startElement("natural", {})
self.xml.characters(smart_text(key_value))
self.xml.endElement("natural")
else:
self.xml.characters(smart_text(related_att))
else:
self.xml.addQuickElement("None")
self.xml.endElement("field")
def handle_m2m_field(self, obj, field):
"""
Called to handle a ManyToManyField. Related objects are only
serialized as references to the object's PK (i.e. the related *data*
is not dumped, just the relation).
"""
if field.remote_field.through._meta.auto_created:
self._start_relational_field(field)
if self.use_natural_foreign_keys and hasattr(field.remote_field.model, 'natural_key'):
# If the objects in the m2m have a natural key, use it
def handle_m2m(value):
natural = value.natural_key()
# Iterable natural keys are rolled out as subelements
self.xml.startElement("object", {})
for key_value in natural:
self.xml.startElement("natural", {})
self.xml.characters(smart_text(key_value))
self.xml.endElement("natural")
self.xml.endElement("object")
else:
def handle_m2m(value):
self.xml.addQuickElement("object", attrs={
'pk': smart_text(value._get_pk_val())
})
for relobj in getattr(obj, field.name).iterator():
handle_m2m(relobj)
self.xml.endElement("field")
def _start_relational_field(self, field):
"""
Helper to output the <field> element for relational fields
"""
self.indent(2)
self.xml.startElement("field", OrderedDict([
("name", field.name),
("rel", field.remote_field.__class__.__name__),
("to", smart_text(field.remote_field.model._meta)),
]))
class Deserializer(base.Deserializer):
"""
Deserialize XML.
"""
def __init__(self, stream_or_string, **options):
super(Deserializer, self).__init__(stream_or_string, **options)
self.event_stream = pulldom.parse(self.stream, self._make_parser())
self.db = options.pop('using', DEFAULT_DB_ALIAS)
self.ignore = options.pop('ignorenonexistent', False)
def _make_parser(self):
"""Create a hardened XML parser (no custom/external entities)."""
return DefusedExpatParser()
def __next__(self):
for event, node in self.event_stream:
if event == "START_ELEMENT" and node.nodeName == "object":
self.event_stream.expandNode(node)
return self._handle_object(node)
raise StopIteration
def _handle_object(self, node):
"""
Convert an <object> node to a DeserializedObject.
"""
# Look up the model using the model loading mechanism. If this fails,
# bail.
Model = self._get_model_from_node(node, "model")
# Start building a data dictionary from the object.
data = {}
if node.hasAttribute('pk'):
data[Model._meta.pk.attname] = Model._meta.pk.to_python(
node.getAttribute('pk'))
# Also start building a dict of m2m data (this is saved as
# {m2m_accessor_attribute : [list_of_related_objects]})
m2m_data = {}
field_names = {f.name for f in Model._meta.get_fields()}
# Deserialize each field.
for field_node in node.getElementsByTagName("field"):
# If the field is missing the name attribute, bail (are you
# sensing a pattern here?)
field_name = field_node.getAttribute("name")
if not field_name:
raise base.DeserializationError("<field> node is missing the 'name' attribute")
# Get the field from the Model. This will raise a
# FieldDoesNotExist if, well, the field doesn't exist, which will
# be propagated correctly unless ignorenonexistent=True is used.
if self.ignore and field_name not in field_names:
continue
field = Model._meta.get_field(field_name)
# As is usually the case, relation fields get the special treatment.
if field.remote_field and isinstance(field.remote_field, models.ManyToManyRel):
m2m_data[field.name] = self._handle_m2m_field_node(field_node, field)
elif field.remote_field and isinstance(field.remote_field, models.ManyToOneRel):
data[field.attname] = self._handle_fk_field_node(field_node, field)
else:
if field_node.getElementsByTagName('None'):
value = None
else:
value = field.to_python(getInnerText(field_node).strip())
data[field.name] = value
obj = base.build_instance(Model, data, self.db)
# Return a DeserializedObject so that the m2m data has a place to live.
return base.DeserializedObject(obj, m2m_data)
def _handle_fk_field_node(self, node, field):
"""
Handle a <field> node for a ForeignKey
"""
# Check if there is a child node named 'None', returning None if so.
if node.getElementsByTagName('None'):
return None
else:
model = field.remote_field.model
if hasattr(model._default_manager, 'get_by_natural_key'):
keys = node.getElementsByTagName('natural')
if keys:
# If there are 'natural' subelements, it must be a natural key
field_value = [getInnerText(k).strip() for k in keys]
obj = model._default_manager.db_manager(self.db).get_by_natural_key(*field_value)
obj_pk = getattr(obj, field.remote_field.field_name)
# If this is a natural foreign key to an object that
# has a FK/O2O as the foreign key, use the FK value
if field.remote_field.model._meta.pk.remote_field:
obj_pk = obj_pk.pk
else:
# Otherwise, treat like a normal PK
field_value = getInnerText(node).strip()
obj_pk = model._meta.get_field(field.remote_field.field_name).to_python(field_value)
return obj_pk
else:
field_value = getInnerText(node).strip()
return model._meta.get_field(field.remote_field.field_name).to_python(field_value)
def _handle_m2m_field_node(self, node, field):
"""
Handle a <field> node for a ManyToManyField.
"""
model = field.remote_field.model
default_manager = model._default_manager
if hasattr(default_manager, 'get_by_natural_key'):
def m2m_convert(n):
keys = n.getElementsByTagName('natural')
if keys:
# If there are 'natural' subelements, it must be a natural key
field_value = [getInnerText(k).strip() for k in keys]
obj_pk = default_manager.db_manager(self.db).get_by_natural_key(*field_value).pk
else:
# Otherwise, treat like a normal PK value.
obj_pk = model._meta.pk.to_python(n.getAttribute('pk'))
return obj_pk
else:
m2m_convert = lambda n: model._meta.pk.to_python(n.getAttribute('pk'))
return [m2m_convert(c) for c in node.getElementsByTagName("object")]
def _get_model_from_node(self, node, attr):
"""
Helper to look up a model from a <object model=...> or a <field
rel=... to=...> node.
"""
model_identifier = node.getAttribute(attr)
if not model_identifier:
raise base.DeserializationError(
"<%s> node is missing the required '%s' attribute"
% (node.nodeName, attr))
try:
return apps.get_model(model_identifier)
except (LookupError, TypeError):
raise base.DeserializationError(
"<%s> node has invalid model identifier: '%s'"
% (node.nodeName, model_identifier))
def getInnerText(node):
"""
Get all the inner text of a DOM node (recursively).
"""
# inspired by http://mail.python.org/pipermail/xml-sig/2005-March/011022.html
inner_text = []
for child in node.childNodes:
if child.nodeType == child.TEXT_NODE or child.nodeType == child.CDATA_SECTION_NODE:
inner_text.append(child.data)
elif child.nodeType == child.ELEMENT_NODE:
inner_text.extend(getInnerText(child))
else:
pass
return "".join(inner_text)
# Below code based on Christian Heimes' defusedxml
class DefusedExpatParser(_ExpatParser):
"""
An expat parser hardened against XML bomb attacks.
Forbids DTDs, external entity references
"""
def __init__(self, *args, **kwargs):
_ExpatParser.__init__(self, *args, **kwargs)
self.setFeature(handler.feature_external_ges, False)
self.setFeature(handler.feature_external_pes, False)
def start_doctype_decl(self, name, sysid, pubid, has_internal_subset):
raise DTDForbidden(name, sysid, pubid)
def entity_decl(self, name, is_parameter_entity, value, base,
sysid, pubid, notation_name):
raise EntitiesForbidden(name, value, base, sysid, pubid, notation_name)
def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
# expat 1.2
raise EntitiesForbidden(name, None, base, sysid, pubid, notation_name)
def external_entity_ref_handler(self, context, base, sysid, pubid):
raise ExternalReferenceForbidden(context, base, sysid, pubid)
def reset(self):
_ExpatParser.reset(self)
parser = self._parser
parser.StartDoctypeDeclHandler = self.start_doctype_decl
parser.EntityDeclHandler = self.entity_decl
parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl
parser.ExternalEntityRefHandler = self.external_entity_ref_handler
class DefusedXmlException(ValueError):
"""Base exception."""
def __repr__(self):
return str(self)
class DTDForbidden(DefusedXmlException):
"""Document type definition is forbidden."""
def __init__(self, name, sysid, pubid):
super(DTDForbidden, self).__init__()
self.name = name
self.sysid = sysid
self.pubid = pubid
def __str__(self):
tpl = "DTDForbidden(name='{}', system_id={!r}, public_id={!r})"
return tpl.format(self.name, self.sysid, self.pubid)
class EntitiesForbidden(DefusedXmlException):
"""Entity definition is forbidden."""
def __init__(self, name, value, base, sysid, pubid, notation_name):
super(EntitiesForbidden, self).__init__()
self.name = name
self.value = value
self.base = base
self.sysid = sysid
self.pubid = pubid
self.notation_name = notation_name
def __str__(self):
tpl = "EntitiesForbidden(name='{}', system_id={!r}, public_id={!r})"
return tpl.format(self.name, self.sysid, self.pubid)
class ExternalReferenceForbidden(DefusedXmlException):
"""Resolving an external reference is forbidden."""
def __init__(self, context, base, sysid, pubid):
super(ExternalReferenceForbidden, self).__init__()
self.context = context
self.base = base
self.sysid = sysid
self.pubid = pubid
def __str__(self):
tpl = "ExternalReferenceForbidden(system_id='{}', public_id={})"
return tpl.format(self.sysid, self.pubid)
| bsd-3-clause |
luceatnobis/youtube-dl | youtube_dl/extractor/seeker.py | 72 | 2677 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class SeekerIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?seeker\.com/(?P<display_id>.*)-(?P<article_id>\d+)\.html'
_TESTS = [{
# player.loadRevision3Item
'url': 'http://www.seeker.com/should-trump-be-required-to-release-his-tax-returns-1833805621.html',
'md5': '30c1dc4030cc715cf05b423d0947ac18',
'info_dict': {
'id': '76243',
'ext': 'webm',
'title': 'Should Trump Be Required To Release His Tax Returns?',
'description': 'Donald Trump has been secretive about his "big," "beautiful" tax returns. So what can we learn if he decides to release them?',
'uploader': 'Seeker Daily',
'uploader_id': 'seekerdaily',
}
}, {
'url': 'http://www.seeker.com/changes-expected-at-zoos-following-recent-gorilla-lion-shootings-1834116536.html',
'playlist': [
{
'md5': '83bcd157cab89ad7318dd7b8c9cf1306',
'info_dict': {
'id': '67558',
'ext': 'mp4',
'title': 'The Pros & Cons Of Zoos',
'description': 'Zoos are often depicted as a terrible place for animals to live, but is there any truth to this?',
'uploader': 'DNews',
'uploader_id': 'dnews',
},
}
],
'info_dict': {
'id': '1834116536',
'title': 'After Gorilla Killing, Changes Ahead for Zoos',
'description': 'The largest association of zoos and others are hoping to learn from recent incidents that led to the shooting deaths of a gorilla and two lions.',
},
}]
def _real_extract(self, url):
display_id, article_id = re.match(self._VALID_URL, url).groups()
webpage = self._download_webpage(url, display_id)
mobj = re.search(r"player\.loadRevision3Item\('([^']+)'\s*,\s*(\d+)\);", webpage)
if mobj:
playlist_type, playlist_id = mobj.groups()
return self.url_result(
'revision3:%s:%s' % (playlist_type, playlist_id), 'Revision3Embed', playlist_id)
else:
entries = [self.url_result('revision3:video_id:%s' % video_id, 'Revision3Embed', video_id) for video_id in re.findall(
r'<iframe[^>]+src=[\'"](?:https?:)?//api\.seekernetwork\.com/player/embed\?videoId=(\d+)', webpage)]
return self.playlist_result(
entries, article_id, self._og_search_title(webpage), self._og_search_description(webpage))
| unlicense |
davehunt/kuma | vendor/packages/translate/search/test_match.py | 33 | 5664 | from translate.search import match
from translate.storage import csvl10n
class TestMatch:
"""Test the matching class"""
def candidatestrings(self, units):
"""returns only the candidate strings out of the list with (score, string) tuples"""
return [unit.source for unit in units]
def buildcsv(self, sources, targets=None):
"""Build a csvfile store with the given source and target strings"""
if targets is None:
targets = sources
else:
assert len(sources) == len(targets)
csvfile = csvl10n.csvfile()
for source, target in zip(sources, targets):
unit = csvfile.addsourceunit(source)
unit.target = target
return csvfile
def test_matching(self):
"""Test basic matching"""
csvfile = self.buildcsv(["hand", "asdf", "fdas", "haas", "pond"])
matcher = match.matcher(csvfile)
candidates = self.candidatestrings(matcher.matches("hond"))
candidates.sort()
assert candidates == ["hand", "pond"]
message = "Ek skop die bal"
csvfile = self.buildcsv(
["Hy skop die bal",
message,
"Jannie skop die bal",
"Ek skop die balle",
"Niemand skop die bal nie"])
matcher = match.matcher(csvfile)
candidates = self.candidatestrings(matcher.matches(message))
assert len(candidates) == 3
#test that the 100% match is indeed first:
assert candidates[0] == message
candidates.sort()
assert candidates[1:] == ["Ek skop die balle", "Hy skop die bal"]
def test_multiple_store(self):
"""Test using multiple datastores"""
csvfile1 = self.buildcsv(["hand", "asdf", "fdas"])
csvfile2 = self.buildcsv(["haas", "pond"])
matcher = match.matcher([csvfile1, csvfile2])
candidates = self.candidatestrings(matcher.matches("hond"))
candidates.sort()
assert candidates == ["hand", "pond"]
message = "Ek skop die bal"
csvfile1 = self.buildcsv(
["Hy skop die bal",
message,
"Jannie skop die bal"])
csvfile2 = self.buildcsv(
["Ek skop die balle",
"Niemand skop die bal nie"])
matcher = match.matcher([csvfile1, csvfile2])
candidates = self.candidatestrings(matcher.matches(message))
assert len(candidates) == 3
#test that the 100% match is indeed first:
assert candidates[0] == message
candidates.sort()
assert candidates[1:] == ["Ek skop die balle", "Hy skop die bal"]
def test_extendtm(self):
"""Test that we can extend the TM after creation."""
message = "Open file..."
csvfile1 = self.buildcsv(["Close application", "Do something"])
matcher = match.matcher([csvfile1])
candidates = self.candidatestrings(matcher.matches(message))
assert len(candidates) == 0
csvfile2 = self.buildcsv(["Open file"])
matcher.extendtm(csvfile2.units, store=csvfile2)
candidates = self.candidatestrings(matcher.matches(message))
assert len(candidates) == 1
assert candidates[0] == "Open file"
def test_terminology(self):
csvfile = self.buildcsv(["file", "computer", "directory"])
matcher = match.terminologymatcher(csvfile)
candidates = self.candidatestrings(matcher.matches("Copy the files from your computer"))
candidates.sort()
assert candidates == ["computer", "file"]
def test_brackets(self):
"""Tests that brackets at the end of a term are ignored"""
csvfile = self.buildcsv(["file (noun)", "ISP (Internet Service Provider)"])
matcher = match.terminologymatcher(csvfile)
candidates = self.candidatestrings(matcher.matches("Open File"))
assert candidates == ["file"]
candidates = self.candidatestrings(matcher.matches("Contact your ISP"))
# we lowercase everything - that is why we get it back differerntly.
# we don't change the target text, though
assert candidates == ["isp"]
def test_past_tences(self):
"""Tests matching of some past tenses"""
csvfile = self.buildcsv(["submit", "certify"])
matcher = match.terminologymatcher(csvfile)
candidates = self.candidatestrings(matcher.matches("The bug was submitted"))
assert candidates == ["submit"]
candidates = self.candidatestrings(matcher.matches("The site is certified"))
def test_space_mismatch(self):
"""Tests that we can match with some spacing mismatch"""
csvfile = self.buildcsv(["down time"])
matcher = match.terminologymatcher(csvfile)
candidates = self.candidatestrings(matcher.matches("%d minutes downtime"))
assert candidates == ["downtime"]
def test_hyphen_mismatch(self):
"""Tests that we can match with some spacing mismatch"""
csvfile = self.buildcsv(["pre-order"])
matcher = match.terminologymatcher(csvfile)
candidates = self.candidatestrings(matcher.matches("You can preorder"))
assert candidates == ["preorder"]
candidates = self.candidatestrings(matcher.matches("You can pre order"))
assert candidates == ["pre order"]
csvfile = self.buildcsv(["pre order"])
matcher = match.terminologymatcher(csvfile)
candidates = self.candidatestrings(matcher.matches("You can preorder"))
assert candidates == ["preorder"]
candidates = self.candidatestrings(matcher.matches("You can pre order"))
assert candidates == ["pre order"]
| mpl-2.0 |
AA33/dsa_exp | l33t_code/word_break.py | 1 | 1193 | import sys
__author__ = 'abhishekanurag'
class Solution:
# @param s, a string
# @param dict, a set of string
# @return a boolean
def wordBreak(self, s, dict):
length = len(s)
memo = [[None for i in range(length)] for j in range(length)]
for i in range(length):
for j in range(length):
if i == j:
if s[i] in dict:
memo[i][i] = True
else:
memo[i][i] = False
if j < i:
memo[i][j] = False
for i in range(length - 1, -1, -1):
for j in range(i, length):
memo[i][j] = s[i:j + 1] in dict
if not memo[i][j]:
for x in range(i, j + 1):
valid = memo[i][x] and memo[x+1][j]
memo[i][j] = memo[i][j] or valid
if memo[i][j]:
break
return memo[0][length - 1]
# Main
def main():
s = 'ab'
dict = set()
dict.add('a')
dict.add('b')
print Solution().wordBreak(s, dict)
if __name__ == "__main__":
sys.exit(main()) | gpl-3.0 |
shiquanwang/pylearn2 | pylearn2/scripts/icml_2013_wrepl/emotions/make_submission.py | 5 | 2094 | import sys
def usage():
print """usage: python make_submission.py model.pkl submission.csv
Where model.pkl contains a trained pylearn2.models.mlp.MLP object.
The script will make submission.csv, which you may then upload to the
kaggle site."""
if len(sys.argv) != 3:
usage()
print "(You used the wrong # of arguments)"
quit(-1)
_, model_path, out_path = sys.argv
import os
if os.path.exists(out_path):
usage()
print out_path+" already exists, and I don't want to overwrite anything just to be safe."
quit(-1)
from pylearn2.utils import serial
try:
model = serial.load(model_path)
except Exception, e:
usage()
print model_path + "doesn't seem to be a valid model path, I got this error when trying to load it: "
print e
from pylearn2.config import yaml_parse
dataset = yaml_parse.load(model.dataset_yaml_src)
dataset = dataset.get_test_set()
# use smallish batches to avoid running out of memory
batch_size = 100
model.set_batch_size(batch_size)
# dataset must be multiple of batch size of some batches will have
# different sizes. theano convolution requires a hard-coded batch size
m = dataset.X.shape[0]
extra = batch_size - m % batch_size
assert (m + extra) % batch_size == 0
import numpy as np
if extra > 0:
dataset.X = np.concatenate((dataset.X, np.zeros((extra, dataset.X.shape[1]),
dtype=dataset.X.dtype)), axis=0)
assert dataset.X.shape[0] % batch_size == 0
X = model.get_input_space().make_batch_theano()
Y = model.fprop(X)
from theano import tensor as T
y = T.argmax(Y, axis=1)
from theano import function
f = function([X], y)
y = []
for i in xrange(dataset.X.shape[0] / batch_size):
x_arg = dataset.X[i*batch_size:(i+1)*batch_size,:]
if X.ndim > 2:
x_arg = dataset.get_topological_view(x_arg)
y.append(f(x_arg.astype(X.dtype)))
y = np.concatenate(y)
assert y.ndim == 1
assert y.shape[0] == dataset.X.shape[0]
# discard any zero-padding that was used to give the batches uniform size
y = y[:m]
out = open(out_path, 'w')
for i in xrange(y.shape[0]):
out.write('%d\n' % y[i])
out.close()
| bsd-3-clause |
chirilo/remo | vendor-local/lib/python/unidecode/x090.py | 251 | 4631 | data = (
'Tui ', # 0x00
'Song ', # 0x01
'Gua ', # 0x02
'Tao ', # 0x03
'Pang ', # 0x04
'Hou ', # 0x05
'Ni ', # 0x06
'Dun ', # 0x07
'Jiong ', # 0x08
'Xuan ', # 0x09
'Xun ', # 0x0a
'Bu ', # 0x0b
'You ', # 0x0c
'Xiao ', # 0x0d
'Qiu ', # 0x0e
'Tou ', # 0x0f
'Zhu ', # 0x10
'Qiu ', # 0x11
'Di ', # 0x12
'Di ', # 0x13
'Tu ', # 0x14
'Jing ', # 0x15
'Ti ', # 0x16
'Dou ', # 0x17
'Yi ', # 0x18
'Zhe ', # 0x19
'Tong ', # 0x1a
'Guang ', # 0x1b
'Wu ', # 0x1c
'Shi ', # 0x1d
'Cheng ', # 0x1e
'Su ', # 0x1f
'Zao ', # 0x20
'Qun ', # 0x21
'Feng ', # 0x22
'Lian ', # 0x23
'Suo ', # 0x24
'Hui ', # 0x25
'Li ', # 0x26
'Sako ', # 0x27
'Lai ', # 0x28
'Ben ', # 0x29
'Cuo ', # 0x2a
'Jue ', # 0x2b
'Beng ', # 0x2c
'Huan ', # 0x2d
'Dai ', # 0x2e
'Lu ', # 0x2f
'You ', # 0x30
'Zhou ', # 0x31
'Jin ', # 0x32
'Yu ', # 0x33
'Chuo ', # 0x34
'Kui ', # 0x35
'Wei ', # 0x36
'Ti ', # 0x37
'Yi ', # 0x38
'Da ', # 0x39
'Yuan ', # 0x3a
'Luo ', # 0x3b
'Bi ', # 0x3c
'Nuo ', # 0x3d
'Yu ', # 0x3e
'Dang ', # 0x3f
'Sui ', # 0x40
'Dun ', # 0x41
'Sui ', # 0x42
'Yan ', # 0x43
'Chuan ', # 0x44
'Chi ', # 0x45
'Ti ', # 0x46
'Yu ', # 0x47
'Shi ', # 0x48
'Zhen ', # 0x49
'You ', # 0x4a
'Yun ', # 0x4b
'E ', # 0x4c
'Bian ', # 0x4d
'Guo ', # 0x4e
'E ', # 0x4f
'Xia ', # 0x50
'Huang ', # 0x51
'Qiu ', # 0x52
'Dao ', # 0x53
'Da ', # 0x54
'Wei ', # 0x55
'Appare ', # 0x56
'Yi ', # 0x57
'Gou ', # 0x58
'Yao ', # 0x59
'Chu ', # 0x5a
'Liu ', # 0x5b
'Xun ', # 0x5c
'Ta ', # 0x5d
'Di ', # 0x5e
'Chi ', # 0x5f
'Yuan ', # 0x60
'Su ', # 0x61
'Ta ', # 0x62
'Qian ', # 0x63
'[?] ', # 0x64
'Yao ', # 0x65
'Guan ', # 0x66
'Zhang ', # 0x67
'Ao ', # 0x68
'Shi ', # 0x69
'Ce ', # 0x6a
'Chi ', # 0x6b
'Su ', # 0x6c
'Zao ', # 0x6d
'Zhe ', # 0x6e
'Dun ', # 0x6f
'Di ', # 0x70
'Lou ', # 0x71
'Chi ', # 0x72
'Cuo ', # 0x73
'Lin ', # 0x74
'Zun ', # 0x75
'Rao ', # 0x76
'Qian ', # 0x77
'Xuan ', # 0x78
'Yu ', # 0x79
'Yi ', # 0x7a
'Wu ', # 0x7b
'Liao ', # 0x7c
'Ju ', # 0x7d
'Shi ', # 0x7e
'Bi ', # 0x7f
'Yao ', # 0x80
'Mai ', # 0x81
'Xie ', # 0x82
'Sui ', # 0x83
'Huan ', # 0x84
'Zhan ', # 0x85
'Teng ', # 0x86
'Er ', # 0x87
'Miao ', # 0x88
'Bian ', # 0x89
'Bian ', # 0x8a
'La ', # 0x8b
'Li ', # 0x8c
'Yuan ', # 0x8d
'Yao ', # 0x8e
'Luo ', # 0x8f
'Li ', # 0x90
'Yi ', # 0x91
'Ting ', # 0x92
'Deng ', # 0x93
'Qi ', # 0x94
'Yong ', # 0x95
'Shan ', # 0x96
'Han ', # 0x97
'Yu ', # 0x98
'Mang ', # 0x99
'Ru ', # 0x9a
'Qiong ', # 0x9b
'[?] ', # 0x9c
'Kuang ', # 0x9d
'Fu ', # 0x9e
'Kang ', # 0x9f
'Bin ', # 0xa0
'Fang ', # 0xa1
'Xing ', # 0xa2
'Na ', # 0xa3
'Xin ', # 0xa4
'Shen ', # 0xa5
'Bang ', # 0xa6
'Yuan ', # 0xa7
'Cun ', # 0xa8
'Huo ', # 0xa9
'Xie ', # 0xaa
'Bang ', # 0xab
'Wu ', # 0xac
'Ju ', # 0xad
'You ', # 0xae
'Han ', # 0xaf
'Tai ', # 0xb0
'Qiu ', # 0xb1
'Bi ', # 0xb2
'Pei ', # 0xb3
'Bing ', # 0xb4
'Shao ', # 0xb5
'Bei ', # 0xb6
'Wa ', # 0xb7
'Di ', # 0xb8
'Zou ', # 0xb9
'Ye ', # 0xba
'Lin ', # 0xbb
'Kuang ', # 0xbc
'Gui ', # 0xbd
'Zhu ', # 0xbe
'Shi ', # 0xbf
'Ku ', # 0xc0
'Yu ', # 0xc1
'Gai ', # 0xc2
'Ge ', # 0xc3
'Xi ', # 0xc4
'Zhi ', # 0xc5
'Ji ', # 0xc6
'Xun ', # 0xc7
'Hou ', # 0xc8
'Xing ', # 0xc9
'Jiao ', # 0xca
'Xi ', # 0xcb
'Gui ', # 0xcc
'Nuo ', # 0xcd
'Lang ', # 0xce
'Jia ', # 0xcf
'Kuai ', # 0xd0
'Zheng ', # 0xd1
'Otoko ', # 0xd2
'Yun ', # 0xd3
'Yan ', # 0xd4
'Cheng ', # 0xd5
'Dou ', # 0xd6
'Chi ', # 0xd7
'Lu ', # 0xd8
'Fu ', # 0xd9
'Wu ', # 0xda
'Fu ', # 0xdb
'Gao ', # 0xdc
'Hao ', # 0xdd
'Lang ', # 0xde
'Jia ', # 0xdf
'Geng ', # 0xe0
'Jun ', # 0xe1
'Ying ', # 0xe2
'Bo ', # 0xe3
'Xi ', # 0xe4
'Bei ', # 0xe5
'Li ', # 0xe6
'Yun ', # 0xe7
'Bu ', # 0xe8
'Xiao ', # 0xe9
'Qi ', # 0xea
'Pi ', # 0xeb
'Qing ', # 0xec
'Guo ', # 0xed
'Zhou ', # 0xee
'Tan ', # 0xef
'Zou ', # 0xf0
'Ping ', # 0xf1
'Lai ', # 0xf2
'Ni ', # 0xf3
'Chen ', # 0xf4
'You ', # 0xf5
'Bu ', # 0xf6
'Xiang ', # 0xf7
'Dan ', # 0xf8
'Ju ', # 0xf9
'Yong ', # 0xfa
'Qiao ', # 0xfb
'Yi ', # 0xfc
'Du ', # 0xfd
'Yan ', # 0xfe
'Mei ', # 0xff
)
| bsd-3-clause |
yencarnacion/jaikuengine | .google_appengine/lib/django-1.2/tests/regressiontests/utils/feedgenerator.py | 39 | 2217 | import datetime
import unittest
from django.utils import feedgenerator, tzinfo
class FeedgeneratorTest(unittest.TestCase):
"""
Tests for the low-level syndication feed framework.
"""
def test_get_tag_uri(self):
"""
Test get_tag_uri() correctly generates TagURIs.
"""
self.assertEqual(
feedgenerator.get_tag_uri('http://example.org/foo/bar#headline', datetime.date(2004, 10, 25)),
u'tag:example.org,2004-10-25:/foo/bar/headline')
def test_get_tag_uri_with_port(self):
"""
Test that get_tag_uri() correctly generates TagURIs from URLs with port
numbers.
"""
self.assertEqual(
feedgenerator.get_tag_uri('http://www.example.org:8000/2008/11/14/django#headline', datetime.datetime(2008, 11, 14, 13, 37, 0)),
u'tag:www.example.org,2008-11-14:/2008/11/14/django/headline')
def test_rfc2822_date(self):
"""
Test rfc2822_date() correctly formats datetime objects.
"""
self.assertEqual(
feedgenerator.rfc2822_date(datetime.datetime(2008, 11, 14, 13, 37, 0)),
"Fri, 14 Nov 2008 13:37:00 -0000"
)
def test_rfc2822_date_with_timezone(self):
"""
Test rfc2822_date() correctly formats datetime objects with tzinfo.
"""
self.assertEqual(
feedgenerator.rfc2822_date(datetime.datetime(2008, 11, 14, 13, 37, 0, tzinfo=tzinfo.FixedOffset(datetime.timedelta(minutes=60)))),
"Fri, 14 Nov 2008 13:37:00 +0100"
)
def test_rfc3339_date(self):
"""
Test rfc3339_date() correctly formats datetime objects.
"""
self.assertEqual(
feedgenerator.rfc3339_date(datetime.datetime(2008, 11, 14, 13, 37, 0)),
"2008-11-14T13:37:00Z"
)
def test_rfc3339_date_with_timezone(self):
"""
Test rfc3339_date() correctly formats datetime objects with tzinfo.
"""
self.assertEqual(
feedgenerator.rfc3339_date(datetime.datetime(2008, 11, 14, 13, 37, 0, tzinfo=tzinfo.FixedOffset(datetime.timedelta(minutes=120)))),
"2008-11-14T13:37:00+02:00"
)
| apache-2.0 |
feuchner/seismicsource-toolkit | mt_seismicsource/algorithms/recurrence.py | 1 | 18525 | # -*- coding: utf-8 -*-
"""
SHARE Seismic Source Toolkit
Python implementation of Hilmar Bungum's recurrence code.
See: Bungum (2007) Computers & Geosciences, 33, 808--820
doi:10.1016/j.cageo.2006.10.011
Author: Fabian Euchner, fabian@sed.ethz.ch
"""
############################################################################
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the #
# Free Software Foundation, Inc., #
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. #
############################################################################
import numpy
import os
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
import QPCatalog
from mt_seismicsource import attributes
from mt_seismicsource import features
from mt_seismicsource import utils
from mt_seismicsource.algorithms import atticivy
from mt_seismicsource.algorithms import momentrate
from mt_seismicsource.layers import areasource
from mt_seismicsource.layers import eqcatalog
# minimum magintude for recurrence computation
MAGNITUDE_MIN = 5.0
# magnitude binning of result FMD
MAGNITUDE_BINNING = 0.1
# parameter alpha in Bungum paper, Table 1, line 9
# this is fault slip to fault length ratio
# this could in principle be computed from geometry of fault polygon
# and annual slip rate integrated over time
ALPHA_BUNGUM = 1.0e-04
# fault length to fault width factor, Bungum paper, Table 1, line 10
# this could be determined from geometry of fault polygon, but seems hard
# to do
FAULT_ASPECT_RATIO = 2.0
FAULT_BACKGROUND_MAG_THRESHOLD = 5.5
RECURRENCE_MODEL_NAMES = ("Anderson-Luco (1983) Model 2",)
def assignRecurrence(layer_fault, layer_fault_background=None,
layer_background=None, catalog=None, catalog_time_span=None, b_value=None,
mmin=atticivy.ATTICIVY_MMIN,
m_threshold=FAULT_BACKGROUND_MAG_THRESHOLD,
mindepth=eqcatalog.CUT_DEPTH_MIN, maxdepth=eqcatalog.CUT_DEPTH_MAX,
ui_mode=True):
"""Compute recurrence parameters according to Bungum paper. Add
activity rates, a/b values, and min/max seismic moment rate
as attributes to fault polygon layer.
Input:
layer_fault QGis layer with fault zone features
layer_fault_background QGis layer with fault background zone features
layer_background QGis layer with background zone features
(provides Mmax and Mc distribution)
catalog Earthquake catalog as QuakePy object
b_value b value to be used for computation
"""
if b_value is None and (layer_fault_background is None or \
layer_background is None or catalog is None):
error_msg = \
"If no b value is given, the other parameters must not be None"
raise RuntimeError, error_msg
recurrence = computeRecurrence(layer_fault, layer_fault_background,
layer_background, catalog, catalog_time_span, b_value, mmin,
m_threshold, mindepth, maxdepth, ui_mode=ui_mode)
attributes.writeLayerAttributes(layer_fault,
features.FAULT_SOURCE_ATTRIBUTES_RECURRENCE_COMPUTE, recurrence)
def computeRecurrence(layer_fault, layer_fault_background=None,
layer_background=None, catalog=None, catalog_time_span=None, b_value=None,
mmin=atticivy.ATTICIVY_MMIN,
m_threshold=FAULT_BACKGROUND_MAG_THRESHOLD,
mindepth=eqcatalog.CUT_DEPTH_MIN, maxdepth=eqcatalog.CUT_DEPTH_MAX,
ui_mode=True):
"""Compute recurrence parameters according to Bungum paper.
Parameters from Jochen's Matlab implementation:
% rParams.fBvalue : b-value
% rParams.fS : Slip rate (mm/year)
% rParams.fD : Average slip (m)
% rParams.fLength : Fault length (m)
% rParams.fWidth : Fault width (m)
% rParams.fM00 : M0(0) for Ms = 0, c in logM0=c-dM
% rParams.fMmin : Minimum magnitude
% rParams.fBinM : magnitude binnning
% rParams.fMmax : Maximum magnitude
% rParams.fDmoment : d in logM0=c-dM
% rParams.fRigid : Rgidity in Pascal
rParams.fBvalue = 1;
rParams.fS = 1e-3;
rParams.fD = 1;
rParams.fLength = 100000;
rParams.fWidth = 50000;
rParams.fM00 = 16.05;
rParams.fMmin = 5;
rParams.fBinM = 0.1;
rParams.fMmax = 8;
rParams.fDmoment = 1.5;
rParams.fRigid = 30e+6;
"""
result_values = []
provider_fault = layer_fault.dataProvider()
fts = layer_fault.selectedFeatures()
# loop over fault polygons
for zone_idx, feature in enumerate(fts):
# collect the following attribute data:
# - FBZ zone ID
# - (a, b, act_a, act_b) for FBZ
# - (a, b, act_a, act_b) for buffer zone
# - m_threshold
# - (a, b, act_a, act_b) for FBZ, below magnitude threshold
# - (a, b, act_a, act_b) for FBZ, above magnitude threshold
# - (a, b, Mc) from maximum likelihood G-R
# - Mmax from background
# - a from slip rate (min/max)
# - activity rate (min/max)
# - moment rate (min/max)
zone_data_string_min = ""
zone_data_string_max = ""
if ui_mode is False:
print "\n=== Processing FSZ feature, id %s ===" % feature.id()
# get parameters from background zones
activity_back = computeActivityFromBackground(feature,
layer_fault_background, layer_background, catalog, mmin,
m_threshold, mindepth, maxdepth, ui_mode=ui_mode)
if activity_back is None:
result_values.append(None)
continue
# get attribute values of zone:
# - MAXMAG, SLIPRATEMI, SLIPRATEMA
attribute_map_fault = utils.getAttributeIndex(provider_fault,
features.FAULT_SOURCE_ATTRIBUTES_RECURRENCE, create=True)
# get maximum magnitude (Note: it's int in feature attributes)
maxmag_name = features.FAULT_SOURCE_ATTR_MAGNITUDE_MAX['name']
maxmag = feature[attribute_map_fault[maxmag_name][0]].toDouble()[0]
# get minimum of annual slip rate
slipratemi_name = features.FAULT_SOURCE_ATTR_SLIPRATE_MIN['name']
slipratemi = \
feature[attribute_map_fault[slipratemi_name][0]].toDouble()[0]
# get maximum of annual slip rate
slipratema_name = features.FAULT_SOURCE_ATTR_SLIPRATE_MAX['name']
slipratema = \
feature[attribute_map_fault[slipratema_name][0]].toDouble()[0]
# get area of fault zone
polylist, vertices = utils.polygonsQGS2Shapely((feature,))
fault_poly = polylist[0]
fault_area = utils.polygonAreaFromWGS84(fault_poly)
# determine b value that is used in further computations
# - use b value computed on fault background zone
if b_value is None:
b_value = activity_back['fbz']['activity'][atticivy.ATTICIVY_B_IDX]
# equidistant magnitude array on which activity rates are computed
# from global Mmin to zone-dependent Mmax
mag_arr = numpy.arange(MAGNITUDE_MIN, maxmag, MAGNITUDE_BINNING)
if len(mag_arr) == 0:
result_values.append(None)
continue
cumulative_number_min = cumulative_occurrence_model_2(mag_arr, maxmag,
slipratemi, b_value, fault_area) / catalog_time_span
cumulative_number_max = cumulative_occurrence_model_2(mag_arr, maxmag,
slipratema, b_value, fault_area) / catalog_time_span
# use a value computed with max of slip rate
a_value_min = computeAValueFromOccurrence(
numpy.log10(cumulative_number_min[0]), b_value, MAGNITUDE_MIN)
a_value_max = computeAValueFromOccurrence(
numpy.log10(cumulative_number_max[0]), b_value, MAGNITUDE_MIN)
# compute contribution to total seismic moment
# TODO(fab): double-check scaling with Laurentiu!
# shear modulus: Pa = N / m^2 = kg / (m * s^2) = kg / (10^-3 km * s^2)
# 1 kg / (km * s^2) = 10^3 N
# slip rate: mm / year
# area: m^2
# moment rate unit: Nm / (year * km^2)
# kg * 10^-3 m * m^2 / (m * s^2 * 365.25*24*60*60 s)
# = 10^3 N * 10^-3 m^3 / (10^-3 * [year] s))
# = 10^3 Nm * m^2 / [year] s <- divide this by area in metres (?)
# kg m^3 / (m s^3) = kg m^2 / s^3
(seismic_moment_rate_min, seismic_moment_rate_max) = \
momentrate.momentrateFromSlipRate(slipratemi, slipratema,
fault_area)
# serialize activity rate FMD
for value_pair_idx in xrange(mag_arr.shape[0]):
zone_data_string_min = "%s %.1f %.2e" % (zone_data_string_min,
mag_arr[value_pair_idx],
cumulative_number_min[value_pair_idx])
zone_data_string_max = "%s %.1f %.2e" % (zone_data_string_max,
mag_arr[value_pair_idx],
cumulative_number_max[value_pair_idx])
attribute_list = []
if activity_back['fbz']['ID'] is None:
fbz_id = activity_back['fbz']['ID']
else:
fbz_id = str(activity_back['fbz']['ID'])
attribute_list.append(fbz_id)
attribute_list.extend(checkAndCastActivityResult(
activity_back['fbz']['activity']))
attribute_list.extend(checkAndCastActivityResult(
activity_back['bz']['activity']))
attribute_list.append(float(m_threshold))
attribute_list.extend(checkAndCastActivityResult(
activity_back['fbz_below']['activity']))
attribute_list.extend(checkAndCastActivityResult(
activity_back['fbz_above']['activity']))
attribute_list.extend([numpy.nan] * 4) # ML, Mmax
attribute_list.extend([float(a_value_min), float(a_value_max)])
attribute_list.extend([numpy.nan] * 3) # three momentrate components
attribute_list.extend([
float(seismic_moment_rate_min),
float(seismic_moment_rate_max),
zone_data_string_min.lstrip(),
zone_data_string_max.lstrip()])
result_values.append(attribute_list)
return result_values
def cumulative_occurrence_model_2(mag_arr, maxmag, sliprate, b_value,
area_metres):
"""Compute cumulative occurrence rate for given magnitude (model 2,
eq. 7 in Bungum paper.
Input:
mag_arr array of target magnitudes (CHANGE)
maxmag maximum magnitude of fault
sliprate annual slip rate (mm/yr)
b_value b value of background seismicity
area_metres fault area in metres
"""
# re-scaled parameters, as given for eq. 5 in Bungum paper
# b value of background seismicity
b_bar = b_value * numpy.log(10.0)
# d coefficient from scaling ratio of seismic moment to
# moment magnitude
d_bar = momentrate.CONST_KANAMORI_D * numpy.log(10.0)
# alpha is the ratio of total displacement across the fault
# and fault length. Use fixed parameter value.
alpha = ALPHA_BUNGUM
beta_numerator = alpha * numpy.power(10, momentrate.CONST_KANAMORI_C_CGS)
# convert shear modulus from Pa (N/m^2, kg/(m * s^2))
# to dyn/cm^2, 1 dyn = 1 g * cm/s^2 = 10^-5 N
# 1 GPa = 10^9 kg/(m * s^2) = 10^12 g/(m * s^2) = 10^10 g/(cm *s^2)
# = 10^10 dyn/cm^2
# convert area from square metres to square centimetres
# Original equation has W (fault width) in denominator, we replace
# this with fault area (which we get from geometry),
# and fixed fault length/width ratio
beta_denominator = 1.0e10 * momentrate.SHEAR_MODULUS * numpy.sqrt(
area_metres * 100 * 100 / FAULT_ASPECT_RATIO)
beta = numpy.sqrt(beta_numerator / beta_denominator)
# factors in Bungum eq. 7
f1 = (d_bar - b_bar) / b_bar
# convert annual slip rate from mm/yr to cm/yr
f2 = sliprate / (10 * beta)
f3 = numpy.exp(b_bar * (maxmag - mag_arr)) - 1
f4 = numpy.exp(-0.5 * d_bar * maxmag)
# compute activity rate per fault polygon
cumulative_number = f1 * f2 * f3 * f4
return cumulative_number
def computeAValueFromOccurrence(lg_occurrence, b_value, mmin=MAGNITUDE_MIN):
return lg_occurrence + b_value * mmin
def computeActivityFromBackground(feature, layer_fault_background,
layer_background, catalog, mmin=atticivy.ATTICIVY_MMIN,
m_threshold=FAULT_BACKGROUND_MAG_THRESHOLD,
mindepth=eqcatalog.CUT_DEPTH_MIN, maxdepth=eqcatalog.CUT_DEPTH_MAX,
ui_mode=True):
"""Compute activity parameters a and b for (i) fault background zone, and
(ii) from buffer zone around fault zone.
Input:
feature fault source zone
layer_fault_background
layer_background
catalog
mmin
Output:
TODO(fab)
"""
activity = {}
provider_fault_back = layer_fault_background.dataProvider()
provider_back = layer_background.dataProvider()
# get Shapely polygon from fault zone feature geometry
polylist, vertices = utils.polygonsQGS2Shapely((feature,))
try:
fault_poly = polylist[0]
except IndexError:
error_msg = "Background activity: invalid FSZ geometry, id %s" % (
feature.id())
if ui_mode is True:
QMessageBox.warning(None, "FSZ Warning", error_msg)
else:
print error_msg
return None
# get buffer zone around fault zone (convert buffer distance to degrees)
(bz_poly, bz_area) = utils.computeBufferZone(fault_poly,
momentrate.BUFFER_AROUND_FAULT_ZONE_KM)
# find fault background zone in which centroid of fault zone lies
# NOTE: this can yield a wrong background zone if the fault zone
# is curved and at the edge of background zone.
# TODO(fab): use GIS "within" function instead, but note that fault
# zone can overlap several BG zones
(fbz, fbz_poly, fbz_area) = utils.findBackgroundZone(fault_poly.centroid,
provider_fault_back, ui_mode=ui_mode)
if fbz is None:
error_msg = "Recurrence: could not determine FBZ for zone %s" % (
feature.id())
if ui_mode is True:
QMessageBox.warning(None, "Recurrence Warning", error_msg)
else:
print error_msg
fbz_id = None
else:
attribute_map_fbz = utils.getAttributeIndex(provider_fault_back,
(features.FAULT_BACKGROUND_ATTR_ID,), create=False)
# get fault background zone ID
id_name = features.FAULT_BACKGROUND_ATTR_ID['name']
fbz_id = int(fbz[attribute_map_fbz[id_name][0]].toDouble()[0])
# get mmax and mcdist for FBZ from background zone
(mcdist_qv, mmax_qv) = areasource.getAttributesFromBackgroundZones(
fbz_poly.centroid, provider_back, areasource.MCDIST_MMAX_ATTRIBUTES,
ui_mode=ui_mode)
if mcdist_qv is None or mmax_qv is None:
error_msg = "Recurrence: could not determine mcdist or mmax for "\
"zone %s" % (feature.id())
if ui_mode is True:
QMessageBox.warning(None, "Recurrence Warning", error_msg)
else:
print error_msg
return None
else:
mmax = float(mmax_qv.toDouble()[0])
mcdist = str(mcdist_qv.toString())
## moment rate from activity (RM)
# a and b value from FBZ
# cut catalog with depth constraint
cat_cut = QPCatalog.QPCatalog()
cat_cut.merge(catalog)
cat_cut.cut(mindepth=mindepth, maxdepth=maxdepth)
activity_fbz = atticivy.computeActivityAtticIvy((fbz_poly,), (mmax,),
(mcdist,), cat_cut, mmin=mmin, ui_mode=ui_mode)
activity['fbz'] = {'ID': fbz_id, 'area': fbz_area,
'activity': activity_fbz[0]}
# get separate catalogs below and above magnitude threshold
cat_below_threshold = QPCatalog.QPCatalog()
cat_below_threshold.merge(cat_cut)
cat_below_threshold.cut(maxmag=m_threshold, maxmag_excl=True)
cat_above_threshold = QPCatalog.QPCatalog()
cat_above_threshold.merge(cat_cut)
cat_above_threshold.cut(minmag=m_threshold, maxmag_excl=False)
activity_below_threshold = atticivy.computeActivityAtticIvy(
(fbz_poly,), (mmax,), (mcdist,), cat_below_threshold, mmin=mmin,
ui_mode=ui_mode)
activity['fbz_below'] = {'ID': fbz_id, 'area': fbz_area,
'activity': activity_below_threshold[0]}
activity_above_threshold = atticivy.computeActivityAtticIvy(
(fbz_poly,), (mmax,), (mcdist,), cat_above_threshold, mmin=mmin,
ui_mode=ui_mode)
activity['fbz_above'] = {'ID': fbz_id, 'area': fbz_area,
'activity': activity_above_threshold[0]}
# a and b value on buffer zone
activity_bz = atticivy.computeActivityAtticIvy((bz_poly,), (mmax,),
(mcdist,), cat_cut, mmin, ui_mode=ui_mode)
activity['bz'] = {'area': bz_area, 'activity': activity_bz[0]}
activity['background'] = {'mmax': mmax, 'mcdist': mcdist}
return activity
def checkAndCastActivityResult(activity):
"""Check if an activity result is not None, and convert components."""
if activity is not None:
activity_arr = [float(activity[atticivy.ATTICIVY_A_IDX]),
float(activity[atticivy.ATTICIVY_B_IDX]),
str(activity[atticivy.ATTICIVY_ACT_A_IDX]),
str(activity[atticivy.ATTICIVY_ACT_B_IDX])]
else:
activity_arr = [None, None, None, None]
return activity_arr
| gpl-2.0 |
chengdh/openerp-ktv | openerp/addons/ktv_sale/__init__.py | 1 | 2353 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import fee_type
import fee_type_member_class_discount
import pay_type
import room
import room_area
import room_type
import room
import room_operate #包厢操作对象
import room_scheduled #包厢预定对象
import room_opens #开房对象
import room_checkout #包厢结账
import room_checkout_buyout #包厢结账-买断
import room_checkout_buytime #包厢结账-买钟
import room_type_special_day
import buyout_config
import buyout_config_special_day
import buffet_config
import buffet_config_special_day
import minimum_fee_config
import minimum_fee_config_special_day
import price_class
import hourly_fee_discount
import hourly_fee_discount_special_day
import hourly_fee_p_discount
import hourly_fee_p_discount_special_day
import hourly_fee_promotion
import member_hourly_fee_discount
import member_hourly_fee_discount_special_day
import member_class #会员卡等级设置
import member #会员信息设置
import member_class_change_config #会员升降级设置
import discount_card_type #打折卡类别
import discount_card #打折卡
import song_ticket #欢唱券
import sales_voucher_type #抵用券类型
import sales_voucher #抵用券登记
import wizard
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
michelts/lettuce | tests/integration/lib/Django-1.2.5/django/core/management/commands/syncdb.py | 44 | 7931 | from optparse import make_option
import sys
from django.conf import settings
from django.core.management.base import NoArgsCommand
from django.core.management.color import no_style
from django.core.management.sql import custom_sql_for_model, emit_post_sync_signal
from django.db import connections, router, transaction, models, DEFAULT_DB_ALIAS
from django.utils.datastructures import SortedDict
from django.utils.importlib import import_module
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'),
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to synchronize. '
'Defaults to the "default" database.'),
)
help = "Create the database tables for all apps in INSTALLED_APPS whose tables haven't already been created."
def handle_noargs(self, **options):
verbosity = int(options.get('verbosity', 1))
interactive = options.get('interactive')
show_traceback = options.get('traceback', False)
# Stealth option -- 'load_initial_data' is used by the testing setup
# process to disable initial fixture loading.
load_initial_data = options.get('load_initial_data', True)
self.style = no_style()
# Import the 'management' module within each installed app, to register
# dispatcher events.
for app_name in settings.INSTALLED_APPS:
try:
import_module('.management', app_name)
except ImportError, exc:
# This is slightly hackish. We want to ignore ImportErrors
# if the "management" module itself is missing -- but we don't
# want to ignore the exception if the management module exists
# but raises an ImportError for some reason. The only way we
# can do this is to check the text of the exception. Note that
# we're a bit broad in how we check the text, because different
# Python implementations may not use the same text.
# CPython uses the text "No module named management"
# PyPy uses "No module named myproject.myapp.management"
msg = exc.args[0]
if not msg.startswith('No module named') or 'management' not in msg:
raise
db = options.get('database', DEFAULT_DB_ALIAS)
connection = connections[db]
cursor = connection.cursor()
# Get a list of already installed *models* so that references work right.
tables = connection.introspection.table_names()
seen_models = connection.introspection.installed_models(tables)
created_models = set()
pending_references = {}
# Build the manifest of apps and models that are to be synchronized
all_models = [
(app.__name__.split('.')[-2],
[m for m in models.get_models(app, include_auto_created=True)
if router.allow_syncdb(db, m)])
for app in models.get_apps()
]
def model_installed(model):
opts = model._meta
converter = connection.introspection.table_name_converter
return not ((converter(opts.db_table) in tables) or
(opts.auto_created and converter(opts.auto_created._meta.db_table) in tables))
manifest = SortedDict(
(app_name, filter(model_installed, model_list))
for app_name, model_list in all_models
)
# Create the tables for each model
for app_name, model_list in manifest.items():
for model in model_list:
# Create the model's database table, if it doesn't already exist.
if verbosity >= 2:
print "Processing %s.%s model" % (app_name, model._meta.object_name)
sql, references = connection.creation.sql_create_model(model, self.style, seen_models)
seen_models.add(model)
created_models.add(model)
for refto, refs in references.items():
pending_references.setdefault(refto, []).extend(refs)
if refto in seen_models:
sql.extend(connection.creation.sql_for_pending_references(refto, self.style, pending_references))
sql.extend(connection.creation.sql_for_pending_references(model, self.style, pending_references))
if verbosity >= 1 and sql:
print "Creating table %s" % model._meta.db_table
for statement in sql:
cursor.execute(statement)
tables.append(connection.introspection.table_name_converter(model._meta.db_table))
transaction.commit_unless_managed(using=db)
# Send the post_syncdb signal, so individual apps can do whatever they need
# to do at this point.
emit_post_sync_signal(created_models, verbosity, interactive, db)
# The connection may have been closed by a syncdb handler.
cursor = connection.cursor()
# Install custom SQL for the app (but only if this
# is a model we've just created)
for app_name, model_list in manifest.items():
for model in model_list:
if model in created_models:
custom_sql = custom_sql_for_model(model, self.style, connection)
if custom_sql:
if verbosity >= 1:
print "Installing custom SQL for %s.%s model" % (app_name, model._meta.object_name)
try:
for sql in custom_sql:
cursor.execute(sql)
except Exception, e:
sys.stderr.write("Failed to install custom SQL for %s.%s model: %s\n" % \
(app_name, model._meta.object_name, e))
if show_traceback:
import traceback
traceback.print_exc()
transaction.rollback_unless_managed(using=db)
else:
transaction.commit_unless_managed(using=db)
else:
if verbosity >= 2:
print "No custom SQL for %s.%s model" % (app_name, model._meta.object_name)
# Install SQL indicies for all newly created models
for app_name, model_list in manifest.items():
for model in model_list:
if model in created_models:
index_sql = connection.creation.sql_indexes_for_model(model, self.style)
if index_sql:
if verbosity >= 1:
print "Installing index for %s.%s model" % (app_name, model._meta.object_name)
try:
for sql in index_sql:
cursor.execute(sql)
except Exception, e:
sys.stderr.write("Failed to install index for %s.%s model: %s\n" % \
(app_name, model._meta.object_name, e))
transaction.rollback_unless_managed(using=db)
else:
transaction.commit_unless_managed(using=db)
# Load initial_data fixtures (unless that has been disabled)
if load_initial_data:
from django.core.management import call_command
call_command('loaddata', 'initial_data', verbosity=verbosity, database=db)
| gpl-3.0 |
awentzonline/image-analogies | image_analogy/main.py | 1 | 5783 | import os
import time
import numpy as np
import scipy.ndimage
from keras import backend as K
from scipy.misc import imsave
from image_analogy import img_utils, vgg16
from image_analogy.optimizer import Optimizer
def main(args, model_class):
'''The main loop which does the things.'''
K.set_image_dim_ordering('th')
# calculate scales
if args.num_scales > 1:
step_scale_factor = (1 - args.min_scale) / (args.num_scales - 1)
else:
step_scale_factor = 0.0
args.min_scale = 1.0
# prepare the input images
full_ap_image = img_utils.load_image(args.ap_image_path)
full_a_image = img_utils.load_image(args.a_image_path)
full_b_image = img_utils.load_image(args.b_image_path)
# calculate the output size
full_img_width, full_img_height = calculate_image_dims(args, full_b_image)
img_num_channels = 3 # TODO: allow alpha
b_scale_ratio_width = float(full_b_image.shape[1]) / full_img_width
b_scale_ratio_height = float(full_b_image.shape[0]) / full_img_height
# ensure the output dir exists
output_dir = os.path.dirname(args.result_prefix)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# multi-scale loop
x = None # this is going to hold our output image
optimizer = Optimizer()
for scale_i in range(args.num_scales):
scale_factor = (scale_i * step_scale_factor) + args.min_scale
# scale our inputs
img_width = int(round(full_img_width * scale_factor))
img_height = int(round(full_img_height * scale_factor))
# prepare the current optimizer state
if x is None: # we need to create an initial state
x = np.random.uniform(0, 255, (img_height, img_width, 3)).astype(np.float32)
x = vgg16.img_to_vgg(x)
else: # resize the last state
zoom_ratio = img_width / float(x.shape[-1])
x = scipy.ndimage.zoom(x, (1, zoom_ratio, zoom_ratio), order=1)
img_height, img_width = x.shape[-2:]
# determine scaling of "A" images
if args.a_scale_mode == 'match':
a_img_width = img_width
a_img_height = img_height
elif args.a_scale_mode == 'none':
a_img_width = full_a_image.shape[1] * scale_factor
a_img_height = full_a_image.shape[0] * scale_factor
else: # should just be 'ratio'
a_img_width = full_a_image.shape[1] * scale_factor * b_scale_ratio_width
a_img_height = full_a_image.shape[0] * scale_factor * b_scale_ratio_height
a_img_width = int(round(args.a_scale * a_img_width))
a_img_height = int(round(args.a_scale * a_img_height))
# prepare images for use
a_image = img_utils.preprocess_image(full_a_image, a_img_width, a_img_height)
ap_image = img_utils.preprocess_image(full_ap_image, a_img_width, a_img_height)
b_image = img_utils.preprocess_image(full_b_image, img_width, img_height)
print('Scale factor {} "A" shape {} "B" shape {}'.format(scale_factor, a_image.shape, b_image.shape))
# load up the net and create the model
net = vgg16.get_model(img_width, img_height, weights_path=args.vgg_weights, pool_mode=args.pool_mode)
model = model_class(net, args)
model.build(a_image, ap_image, b_image, (1, img_num_channels, img_height, img_width))
for i in range(args.num_iterations_per_scale):
print('Start of iteration {} x {}'.format(scale_i, i))
start_time = time.time()
if args.color_jitter:
color_jitter = (args.color_jitter * 2) * (np.random.random((3, img_height, img_width)) - 0.5)
x += color_jitter
if args.jitter:
jitter = args.jitter * scale_factor
ox, oy = np.random.randint(-jitter, jitter+1, 2)
x = np.roll(np.roll(x, ox, -1), oy, -2) # apply jitter shift
# actually run the optimizer
x, min_val, info = optimizer.optimize(x, model)
print('Current loss value: {}'.format(min_val))
# unjitter the image
x = x.reshape((3, img_height, img_width))
if args.jitter:
x = np.roll(np.roll(x, -ox, -1), -oy, -2) # unshift image
if args.color_jitter:
x -= color_jitter
# save the image
if args.output_full_size:
out_resize_shape = (full_img_height, full_img_width)
else:
out_resize_shape = None
img = img_utils.deprocess_image(np.copy(x), contrast_percent=args.contrast_percent,resize=out_resize_shape)
fname = args.result_prefix + '_at_iteration_{}_{}.png'.format(scale_i, i)
imsave(fname, img)
end_time = time.time()
print('Image saved as {}'.format(fname))
print('Iteration completed in {:.2f} seconds'.format(end_time - start_time,))
def calculate_image_dims(args, full_b_image):
'''Determine the dimensions of the generated picture.
Defaults to the size of Image B.
'''
full_img_width = full_b_image.shape[1]
full_img_height = full_b_image.shape[0]
if args.out_width or args.out_height:
if args.out_width and args.out_height:
full_img_width = args.out_width
full_img_height = args.out_height
else:
if args.out_width:
full_img_height = int(round(args.out_width / float(full_img_width) * full_img_height))
full_img_width = args.out_width
else:
full_img_width = int(round(args.out_height / float(full_img_height) * full_img_width))
full_img_height = args.out_height
return full_img_width, full_img_height
| mit |
brandond/ansible | test/units/modules/network/f5/test_bigip_snmp_community.py | 21 | 9254 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_snmp_community import ApiParameters
from library.modules.bigip_snmp_community import ModuleParameters
from library.modules.bigip_snmp_community import ModuleManager
from library.modules.bigip_snmp_community import V1Manager
from library.modules.bigip_snmp_community import V2Manager
from library.modules.bigip_snmp_community import ArgumentSpec
from library.module_utils.network.f5.common import F5ModuleError
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_snmp_community import ApiParameters
from ansible.modules.network.f5.bigip_snmp_community import ModuleParameters
from ansible.modules.network.f5.bigip_snmp_community import ModuleManager
from ansible.modules.network.f5.bigip_snmp_community import V1Manager
from ansible.modules.network.f5.bigip_snmp_community import V2Manager
from ansible.modules.network.f5.bigip_snmp_community import ArgumentSpec
from ansible.module_utils.network.f5.common import F5ModuleError
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
version='v2c',
community='foo',
source='1.1.1.1',
port='8080',
oid='.1',
access='ro',
ip_version=4,
snmp_username='admin',
snmp_auth_protocol='sha',
snmp_auth_password='secretsecret',
snmp_privacy_protocol='des',
snmp_privacy_password='secretsecret',
update_password='always',
state='present'
)
p = ModuleParameters(params=args)
assert p.version == 'v2c'
assert p.community == 'foo'
assert p.source == '1.1.1.1'
assert p.port == 8080
assert p.oid == '.1'
assert p.access == 'ro'
assert p.ip_version == 4
assert p.snmp_username == 'admin'
assert p.snmp_auth_protocol == 'sha'
assert p.snmp_auth_password == 'secretsecret'
assert p.snmp_privacy_protocol == 'des'
assert p.snmp_privacy_password == 'secretsecret'
assert p.update_password == 'always'
assert p.state == 'present'
def test_api_parameters_community_1(self):
args = load_fixture('load_sys_snmp_communities_1.json')
p = ApiParameters(params=args)
assert p.access == 'ro'
assert p.community == 'foo'
assert p.ip_version == 4
def test_api_parameters_community_2(self):
args = load_fixture('load_sys_snmp_communities_2.json')
p = ApiParameters(params=args)
assert p.access == 'rw'
assert p.community == 'foo'
assert p.ip_version == 4
assert p.oid == '.1'
assert p.source == '1.1.1.1'
def test_api_parameters_community_3(self):
args = load_fixture('load_sys_snmp_communities_3.json')
p = ApiParameters(params=args)
assert p.access == 'ro'
assert p.community == 'foo'
assert p.ip_version == 6
assert p.oid == '.1'
assert p.source == '2001:0db8:85a3:0000:0000:8a2e:0370:7334'
def test_api_parameters_community_4(self):
args = load_fixture('load_sys_snmp_communities_4.json')
p = ApiParameters(params=args)
assert p.access == 'ro'
assert p.community == 'foo'
assert p.ip_version == 6
def test_api_parameters_users_1(self):
args = load_fixture('load_sys_snmp_users_1.json')
p = ApiParameters(params=args)
assert p.access == 'ro'
assert p.snmp_auth_protocol == 'sha'
assert p.oid == '.1'
assert p.snmp_privacy_protocol == 'aes'
assert p.snmp_username == 'foo'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_v2c_community_1(self, *args):
set_module_args(dict(
version='v2c',
community='foo',
source='1.1.1.1',
port='8080',
oid='.1',
access='ro',
ip_version=4,
state='present',
partition='Common',
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
m1 = V1Manager(module=module)
# Override methods to force specific logic in the module to happen
m1.exists = Mock(side_effect=[False, True])
m1.create_on_device = Mock(return_value=True)
m0 = ModuleManager(module=module)
m0.get_manager = Mock(return_value=m1)
results = m0.exec_module()
assert results['changed'] is True
def test_create_v1_community_1(self, *args):
set_module_args(dict(
version='v1',
community='foo',
source='1.1.1.1',
port='8080',
oid='.1',
access='ro',
ip_version=4,
state='present',
partition='Common',
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
m1 = V1Manager(module=module)
# Override methods to force specific logic in the module to happen
m1.exists = Mock(side_effect=[False, True])
m1.create_on_device = Mock(return_value=True)
m0 = ModuleManager(module=module)
m0.get_manager = Mock(return_value=m1)
results = m0.exec_module()
assert results['changed'] is True
def test_create_v3_community_1(self, *args):
set_module_args(dict(
version='v3',
oid='.1',
access='ro',
snmp_username='admin',
snmp_auth_protocol='md5',
snmp_auth_password='secretsecret',
snmp_privacy_protocol='des',
snmp_privacy_password='secretsecret',
state='present',
partition='Common',
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
m1 = V2Manager(module=module)
# Override methods to force specific logic in the module to happen
m1.exists = Mock(side_effect=[False, True])
m1.create_on_device = Mock(return_value=True)
m0 = ModuleManager(module=module)
m0.get_manager = Mock(return_value=m1)
results = m0.exec_module()
assert results['changed'] is True
def test_create_v3_community_2(self, *args):
set_module_args(dict(
version='v3',
access='ro',
snmp_username='admin',
snmp_auth_protocol='md5',
snmp_auth_password='secretsecret',
snmp_privacy_protocol='des',
snmp_privacy_password='secretsecret',
state='present',
partition='Common',
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
m1 = V2Manager(module=module)
# Override methods to force specific logic in the module to happen
m1.exists = Mock(side_effect=[False, True])
m1.create_on_device = Mock(return_value=True)
m0 = ModuleManager(module=module)
m0.get_manager = Mock(return_value=m1)
with pytest.raises(F5ModuleError) as ex:
m0.exec_module()
assert 'oid must be specified when creating a new v3 community.' == str(ex.value)
| gpl-3.0 |
agaldona/odoo-addons | task_delegation_wizard/models/project_task.py | 3 | 3131 | # -*- coding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from openerp.osv import orm
from openerp.tools.translate import _
class ProjectTask(orm.Model):
_inherit = 'project.task'
def do_delegate(self, cr, uid, ids, delegate_data=None, context=None):
""" Delegate Task to another users. """
if delegate_data is None:
delegate_data = {}
assert delegate_data['user_id'],\
_("Delegated User should be specified")
delegated_tasks = {}
if not delegate_data.get('split_in'):
delegated_tasks = super(ProjectTask, self).do_delegate(
cr, uid, ids, delegate_data=delegate_data, contex=context)
else:
for task in self.browse(cr, uid, ids, context=context):
for i in range(delegate_data['split_in']):
delegated_task_id = self.copy(cr, uid, task.id, {
'name': delegate_data['name'],
'user_id': False,
'project_id': (delegate_data['project_id'] and
delegate_data['project_id'][0] or
False),
'stage_id': task.stage_id.id,
'planned_hours': delegate_data['planned_hours'] or 0.0,
'remaining_hours': (delegate_data['planned_hours'] or
0.0),
'parent_ids': [(6, 0, [task.id])],
'description': (
delegate_data['new_task_description'] or ''),
'child_ids': [],
'work_ids': [],
}, context=context)
self._delegate_task_attachments(cr, uid, task.id,
delegated_task_id,
context=context)
remain = (delegate_data['split_in'] *
delegate_data['planned_hours_me'])
task.write({
'name': delegate_data['prefix'],
'remaining_hours': remain,
}, context=context)
delegated_tasks[task.id] = delegated_task_id
return delegated_tasks
| agpl-3.0 |
syci/l10n-spain | l10n_es_aeat_sii/__openerp__.py | 4 | 2103 | # -*- coding: utf-8 -*-
# Copyright 2017 Acysos - Ignacio Ibeas <ignacio@acysos.com>
# Copyright 2017 Diagram Software S.L.
# Copyright 2017 MINORISA - <ramon.guiu@minorisa.net>
# Copyright 2017 Studio73 - Pablo Fuentes <pablo@studio73.es>
# Copyright 2017 Studio73 - Jordi Tolsà <jordi@studio73.es>
# Copyright 2017 Factor Libre - Ismael Calvo
# Copyright 2017 Otherway - Pedro Rodríguez Gil
# Copyright 2017 Tecnativa - Pedro M. Baeza
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Suministro Inmediato de Información en el IVA",
"version": "8.0.2.17.5",
"category": "Accounting & Finance",
"website": "https://odoospain.odoo.com",
"author": "Acysos S.L.,"
"Diagram,"
"Minorisa,"
"Studio73,"
"FactorLibre,"
"Comunitea,"
"Otherway,"
"Tecnativa,"
"Odoo Community Association (OCA)",
"license": "AGPL-3",
"application": False,
"installable": True,
"external_dependencies": {
"python": [
"zeep",
"requests",
"OpenSSL",
],
},
"depends": [
"account_refund_original",
"l10n_es_aeat",
"connector",
"account_invoice_currency",
],
"data": [
"data/ir_config_parameter.xml",
"views/res_company_view.xml",
"views/account_invoice_view.xml",
"views/aeat_sii_view.xml",
"wizards/aeat_sii_password_view.xml",
"wizards/account_invoice_refund_views.xml",
"wizards/send_first_semester.xml",
"views/aeat_sii_mapping_registration_keys_view.xml",
"data/aeat_sii_mapping_registration_keys_data.xml",
"views/aeat_sii_map_view.xml",
"data/aeat_sii_map_data.xml",
"security/ir.model.access.csv",
"security/aeat_sii.xml",
"views/product_view.xml",
"views/queue_job_views.xml",
"views/account_fiscal_position_view.xml",
"views/res_partner_views.xml",
],
"post_init_hook": "add_key_to_existing_invoices",
}
| agpl-3.0 |
Dunkas12/BeepBoopBot | lib/requests/packages/urllib3/__init__.py | 79 | 2853 | """
urllib3 - Thread-safe connection pooling and re-using.
"""
from __future__ import absolute_import
import warnings
from .connectionpool import (
HTTPConnectionPool,
HTTPSConnectionPool,
connection_from_url
)
from . import exceptions
from .filepost import encode_multipart_formdata
from .poolmanager import PoolManager, ProxyManager, proxy_from_url
from .response import HTTPResponse
from .util.request import make_headers
from .util.url import get_host
from .util.timeout import Timeout
from .util.retry import Retry
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
__author__ = 'Andrey Petrov (andrey.petrov@shazow.net)'
__license__ = 'MIT'
__version__ = '1.20'
__all__ = (
'HTTPConnectionPool',
'HTTPSConnectionPool',
'PoolManager',
'ProxyManager',
'HTTPResponse',
'Retry',
'Timeout',
'add_stderr_logger',
'connection_from_url',
'disable_warnings',
'encode_multipart_formdata',
'get_host',
'make_headers',
'proxy_from_url',
)
logging.getLogger(__name__).addHandler(NullHandler())
def add_stderr_logger(level=logging.DEBUG):
"""
Helper for quickly adding a StreamHandler to the logger. Useful for
debugging.
Returns the handler after adding it.
"""
# This method needs to be in this __init__.py to get the __name__ correct
# even if urllib3 is vendored within another package.
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
logger.addHandler(handler)
logger.setLevel(level)
logger.debug('Added a stderr logging handler to logger: %s', __name__)
return handler
# ... Clean up.
del NullHandler
# All warning filters *must* be appended unless you're really certain that they
# shouldn't be: otherwise, it's very hard for users to use most Python
# mechanisms to silence them.
# SecurityWarning's always go off by default.
warnings.simplefilter('always', exceptions.SecurityWarning, append=True)
# SubjectAltNameWarning's should go off once per host
warnings.simplefilter('default', exceptions.SubjectAltNameWarning, append=True)
# InsecurePlatformWarning's don't vary between requests, so we keep it default.
warnings.simplefilter('default', exceptions.InsecurePlatformWarning,
append=True)
# SNIMissingWarnings should go off only once.
warnings.simplefilter('default', exceptions.SNIMissingWarning, append=True)
def disable_warnings(category=exceptions.HTTPWarning):
"""
Helper for quickly disabling all urllib3 warnings.
"""
warnings.simplefilter('ignore', category)
| gpl-3.0 |
fernandojvdasilva/centering_py | src/anaphor_resolution/Centering_Elements.py | 1 | 7232 | '''
Created on 10/11/2009
@author: Fernando
Copyright 2009-2013 Fernando J. V. da Silva
This file is part of centering_py.
centering_py is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
centering_py is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with centering_py. If not, see <http://www.gnu.org/licenses/>.
'''
from corpus.Word import *
from anaphor_resolution.Centering_Algorithm import *
class Centering_Element:
'''
This class contains several attributes and methods
usefull for many Centering Theory related objects
'''
def __init__(self):
'''The linked_objects attribute may store objects which are linked by utility '''
self.linked_objects = {}
self.anaphors = []
self.anaphor = None
self.referent_list = []
self.centeringSets = [] # Set of possible Cf's and Cb's for this sentence
self.centeringSet = None
class Un(Centering_Element):
'''
Represents a sentence for centering viewpoint
'''
def __init__(self):
'''
Constructor
'''
Centering_Element.__init__(self)
self.re_set = []
self.index = 0
def addCenteringSet(self, cb, cf, anaphor=None):
cs = CenteringSet(cb, cf, anaphor)
if anaphor != None:
anaphor.centeringSets.append(cs)
else:
self.centeringSets.append(cs)
class CenteringSet(Centering_Element):
'''
Represents a possible Forward Looking Center (Cf) set and a possible Backward Looking Center (Cb)
'''
def __init__(self, cb=None, cf=None, anaphor=None):
Centering_Element.__init__(self)
# RE's whose represents the Referential Expressions for this possible Cf set
if cf != None:
self.Cf = cf
else:
self.Cf = []
# Word which represent the Referential Expression for this possible Cb
self.Cb = cb
# Type of center transition, assuming this Cf
self.transition_type = None
# Sets the anaphor which "owns" this centeringSet
self.anaphor = anaphor
self.referent_list = []
if cb != None and anaphor != None:
self.referent_list.append(cb)
# Mark the filter used by BFP algorithm
self.filtered = None
def transition_asString(self):
if self.transition_type == Centering_Algorithm.CONTINUING:
return 'CONTINUING'
if self.transition_type == Centering_Algorithm.RETAINING:
return 'RETAINING'
if self.transition_type == Centering_Algorithm.SMOOTH_SHIFT:
return 'SMOOTH-SHIFT'
if self.transition_type == Centering_Algorithm.SHIFT:
return 'SHIFT'
def asString(self):
result = '\t\tCf = {'
for cf in self.Cf:
if type(cf) is dict:
result = result + cf['anaphor'].word.properties['text']
if cf['referent'] != None:
result = result + '=' + cf['referent'].word.properties['text']
result += ', '
else:
#result = result + cf.word.properties['text'] + ', '
result = result + cf.referents_asString() + ', '
result = result + '}\n'
if self.Cb != None:
result = result + '\t\tCb = ' + self.Cb.word.properties['text'] + '\n'
else:
result = result + '\t\tCb = None\n'
result = result + '\t\tTransition = '+ str(self.transition_asString()) + '\n'
return result
def referents_asString(self):
result = self.anaphor.word.properties['text']
if len(self.referent_list) > 0:
for rf in self.referent_list:
result = result + '=' + rf.referents_asString()
return result
class RE(Centering_Element):
'''
Represents an Referring Expression (an word that is a pronoun or a noun phrase
or a proper name)
'''
def __init__(self, word=None, re=None):
Centering_Element.__init__(self)
# Word which represent this RE
if word != None:
self.word = word
elif re != None:
self.word = re.word
else:
self.word = None
self.marked = False
# rank information used by SH_Order algorithm
self.inf_status = -1
''' Represents an utterance index in the following form:
1001 for the first entity on the first utterance
2010 for the 10th entity on the seconde utterance
10003 for the 3rd entity on the 10th utterance '''
self.utt = -1
def referents_asString(self):
if self.word.properties['tag'] == Word.PRON and\
self.centeringSet != None:
return self.centeringSet.referents_asString()
else:
if self.referent_list != None and len(self.referent_list) > 0:
result = self.word.properties['text'] + '='
for ref in self.referent_list:
result = result + ref.referents_asString()
return result
else:
return self.word.properties['text']
def get_entity_referent(self):
if len(self.referent_list) == 0 or self.referent_list[0] == None:
return None
elif self.referent_list[0].word.properties['tag'] in [Word.PROP, Word.N]:
return self.referent_list[0]
else:
return self.referent_list[0].get_entity_referent()
def __getitem__(self, key):
if key == 'rank':
return self.word['rank']
if key == 'utt':
return self.utt
if key == 'word_id':
return self.word.properties['id']
def has_key(self, key):
return key in ['rank', 'utt', 'word_id']
@staticmethod
def word_set_to_re_set(word_set):
re_set = []
for word in word_set:
re_set.append(RE(word))
return re_set
@staticmethod
def re_set_clone(re_set):
cp_re_set = []
for re in re_set:
cp_re_set.append(RE(None, re))
return cp_re_set
| gpl-3.0 |
mims2707/bite-project | tools/bugs/server/appengine/models/templates/template.py | 17 | 5795 | # Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model for bug templates.
Bug Templates provides a model for a template for a type of bug. A project
owner can define templates for their project, which pre-populate the
backend project that the bug should be filed to and provide a starting place for
the bug report writer to write their notes. Bug Templates are stored in
AppEngine's Datastore.
"""
__author__ = 'ralphj@google.com (Julie Ralph)'
import simplejson
from google.appengine.ext import db
class BugTemplate(db.Model):
"""Models a Bug Template stored in AppEngine's Datastore.
TODO(ralphj): urls should be more flexible and should be able to
handle patterns.
Attributes:
template_id: A unique string identifier for this template.
name: A human-readable name for this template.
urls: A list of urls that this template should be used for.
project: The human-readable project that this template is associated with.
backend_project: An identifier for the project that is compatable with the
backend provider.
backend_provider: The issue tracking system that this template is
associated with.
selector_text: Text that should appear when the user is asked to pick a
template, under 'What kind of problem are you reporting?'
note_text: Text that should appear in the notes field.
display_order: An integer declaring the relative position where this
template should be displayed in lists. Higher numbers are displayed
after lower numbers.
"""
template_id = db.StringProperty(required=True)
name = db.StringProperty(required=True)
urls = db.StringListProperty(required=True)
project = db.StringProperty(required=True)
backend_project = db.StringProperty(required=True)
backend_provider = db.StringProperty(required=True)
selector_text = db.StringProperty(required=True)
note_text = db.TextProperty(required=True)
display_order = db.IntegerProperty(required=True, default=0)
class BugTemplateEncoder(simplejson.JSONEncoder):
"""Encoder to properly encode Bug Template objects."""
# Disable 'Invalid method name' lint error.
# pylint: disable-msg=C6409
def default(self, obj):
"""Overrides the default JSONEncoder.
Args:
obj: Object to serialize.
Returns:
A serializable representation of the Bug Template object.
"""
if isinstance(obj, BugTemplate):
return {'id': obj.template_id,
'name': obj.name,
'urls': obj.urls,
'project': obj.project,
'backendProject': obj.backend_project,
'backendProvider': obj.backend_provider,
'selectorText': obj.selector_text,
'noteText': obj.note_text,
'displayOrder': obj.display_order}
else:
return simplejson.JSONEncoder.default(self, obj)
def JsonEncode(template):
"""Encodes a bug template model as JSON.
Args:
template: A bug template to encode.
Returns:
A JSON-encoded string representation of the bug template list.
"""
return simplejson.dumps(template, cls=BugTemplateEncoder)
def StoreBugTemplate(template_id, name, urls, project, backend_project,
backend_provider, selector_text, note_text, display_order):
"""Stores a new bug template in the App Engine Datastore.
If there is already a Bug Template with the same template_id, overwrites
the old template.
Args:
template_id: A unique string identifier for this template.
name: A human-readable name for this template.
urls: A list of urls that this template should be used for.
project: The project that this template is associated with.
backend_project: An identifier for the project that is compatable with the
backend provider.
backend_provider: The issue tracking system that this template is
associated with.
selector_text: Text that should appear when the user is asked to pick a
template, under 'What kind of problem are you reporting?'
note_text: Text that should appear in the notes field.
display_order: An integer declaring the relative position where this
template should be displayed in lists. Higher numbers are displayed
after lower numbers.
Returns:
The newly created bug template.
"""
template = BugTemplate.get_by_key_name(template_id)
if template is None:
template = BugTemplate(key_name=template_id,
template_id=template_id,
name=name,
urls=urls,
project=project,
backend_project=backend_project,
backend_provider=backend_provider,
selector_text=selector_text,
note_text=note_text,
display_order=display_order)
else:
template.name = name
template.urls = urls
template.project = project
template.backend_project = backend_project
template.backend_provider = backend_provider
template.selector_text = selector_text
template.note_text = note_text
template.display_order = display_order
template.put()
return template
| apache-2.0 |
KNMI/VERCE | verce-hpc-pe/src/networkx/algorithms/vitality.py | 72 | 2488 | """
Vitality measures.
"""
# Copyright (C) 2012 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import networkx as nx
__author__ = "\n".join(['Aric Hagberg (hagberg@lanl.gov)',
'Renato Fabbri'])
__all__ = ['closeness_vitality']
def weiner_index(G, weight=None):
# compute sum of distances between all node pairs
# (with optional weights)
weiner=0.0
if weight is None:
for n in G:
path_length=nx.single_source_shortest_path_length(G,n)
weiner+=sum(path_length.values())
else:
for n in G:
path_length=nx.single_source_dijkstra_path_length(G,
n,weight=weight)
weiner+=sum(path_length.values())
return weiner
def closeness_vitality(G, weight=None):
"""Compute closeness vitality for nodes.
Closeness vitality of a node is the change in the sum of distances
between all node pairs when excluding that node.
Parameters
----------
G : graph
weight : None or string (optional)
The name of the edge attribute used as weight. If None the edge
weights are ignored.
Returns
-------
nodes : dictionary
Dictionary with nodes as keys and closeness vitality as the value.
Examples
--------
>>> G=nx.cycle_graph(3)
>>> nx.closeness_vitality(G)
{0: 4.0, 1: 4.0, 2: 4.0}
See Also
--------
closeness_centrality()
References
----------
.. [1] Ulrik Brandes, Sec. 3.6.2 in
Network Analysis: Methodological Foundations, Springer, 2005.
http://books.google.com/books?id=TTNhSm7HYrIC
"""
multigraph = G.is_multigraph()
wig = weiner_index(G,weight)
closeness_vitality = {}
for n in G:
# remove edges connected to node n and keep list of edges with data
# could remove node n but it doesn't count anyway
if multigraph:
edges = G.edges(n,data=True,keys=True)
if G.is_directed():
edges += G.in_edges(n,data=True,keys=True)
else:
edges = G.edges(n,data=True)
if G.is_directed():
edges += G.in_edges(n,data=True)
G.remove_edges_from(edges)
closeness_vitality[n] = wig - weiner_index(G,weight)
# add edges and data back to graph
G.add_edges_from(edges)
return closeness_vitality
| mit |
AlexanderVangelov/pjsip | pjsip-apps/src/pygui/settings.py | 2 | 15900 | # $Id: settings.py 4704 2014-01-16 05:30:46Z ming $
#
# pjsua Python GUI Demo
#
# Copyright (C)2013 Teluu Inc. (http://www.teluu.com)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import sys
if sys.version_info[0] >= 3: # Python 3
import tkinter as tk
from tkinter import ttk
from tkinter import messagebox as msgbox
else:
import Tkinter as tk
import tkMessageBox as msgbox
import ttk
import pjsua2 as pj
#import application
# Transport setting
class SipTransportConfig:
def __init__(self, type, enabled):
#pj.PersistentObject.__init__(self)
self.type = type
self.enabled = enabled
self.config = pj.TransportConfig()
def readObject(self, node):
child_node = node.readContainer("SipTransport")
self.type = child_node.readInt("type")
self.enabled = child_node.readBool("enabled")
self.config.readObject(child_node)
def writeObject(self, node):
child_node = node.writeNewContainer("SipTransport")
child_node.writeInt("type", self.type)
child_node.writeBool("enabled", self.enabled)
self.config.writeObject(child_node)
# Account setting with buddy list
class AccConfig:
def __init__(self):
self.enabled = True
self.config = pj.AccountConfig()
self.buddyConfigs = []
def readObject(self, node):
acc_node = node.readContainer("Account")
self.enabled = acc_node.readBool("enabled")
self.config.readObject(acc_node)
buddy_node = acc_node.readArray("buddies")
while buddy_node.hasUnread():
buddy_cfg = pj.BuddyConfig()
buddy_cfg.readObject(buddy_node)
self.buddyConfigs.append(buddy_cfg)
def writeObject(self, node):
acc_node = node.writeNewContainer("Account")
acc_node.writeBool("enabled", self.enabled)
self.config.writeObject(acc_node)
buddy_node = acc_node.writeNewArray("buddies")
for buddy in self.buddyConfigs:
buddy_node.writeObject(buddy)
# Master settings
class AppConfig:
def __init__(self):
self.epConfig = pj.EpConfig() # pj.EpConfig()
self.udp = SipTransportConfig(pj.PJSIP_TRANSPORT_UDP, True)
self.tcp = SipTransportConfig(pj.PJSIP_TRANSPORT_TCP, True)
self.tls = SipTransportConfig(pj.PJSIP_TRANSPORT_TLS, False)
self.accounts = [] # Array of AccConfig
def loadFile(self, file):
json = pj.JsonDocument()
json.loadFile(file)
root = json.getRootContainer()
self.epConfig = pj.EpConfig()
self.epConfig.readObject(root)
tp_node = root.readArray("transports")
self.udp.readObject(tp_node)
self.tcp.readObject(tp_node)
if tp_node.hasUnread():
self.tls.readObject(tp_node)
acc_node = root.readArray("accounts")
while acc_node.hasUnread():
acfg = AccConfig()
acfg.readObject(acc_node)
self.accounts.append(acfg)
def saveFile(self,file):
json = pj.JsonDocument()
# Write endpoint config
json.writeObject(self.epConfig)
# Write transport config
tp_node = json.writeNewArray("transports")
self.udp.writeObject(tp_node)
self.tcp.writeObject(tp_node)
self.tls.writeObject(tp_node)
# Write account configs
node = json.writeNewArray("accounts")
for acc in self.accounts:
acc.writeObject(node)
json.saveFile(file)
# Settings dialog
class Dialog(tk.Toplevel):
"""
This implements account settings dialog to manipulate account settings.
"""
def __init__(self, parent, cfg):
tk.Toplevel.__init__(self, parent)
self.transient(parent)
self.parent = parent
self.title('Settings')
self.frm = ttk.Frame(self)
self.frm.pack(expand='yes', fill='both')
self.isOk = False
self.cfg = cfg
self.createWidgets()
def doModal(self):
if self.parent:
self.parent.wait_window(self)
else:
self.wait_window(self)
return self.isOk
def createWidgets(self):
# The notebook
self.frm.rowconfigure(0, weight=1)
self.frm.rowconfigure(1, weight=0)
self.frm.columnconfigure(0, weight=1)
self.frm.columnconfigure(1, weight=1)
self.wTab = ttk.Notebook(self.frm)
self.wTab.grid(column=0, row=0, columnspan=2, padx=10, pady=10, ipadx=20, ipady=20, sticky=tk.N+tk.S+tk.W+tk.E)
# Main buttons
btnOk = ttk.Button(self.frm, text='Ok', command=self.onOk)
btnOk.grid(column=0, row=1, sticky=tk.E, padx=20, pady=10)
btnCancel = ttk.Button(self.frm, text='Cancel', command=self.onCancel)
btnCancel.grid(column=1, row=1, sticky=tk.W, padx=20, pady=10)
# Tabs
self.createBasicTab()
self.createNetworkTab()
self.createMediaTab()
def createBasicTab(self):
# Prepare the variables to set/receive values from GUI
self.cfgLogFile = tk.StringVar(value=self.cfg.epConfig.logConfig.filename)
self.cfgLogAppend = tk.BooleanVar(value=True if (self.cfg.epConfig.logConfig.fileFlags & pj.PJ_O_APPEND) else False)
# Build the tab page
frm = ttk.Frame(self.frm)
frm.columnconfigure(0, weight=1)
frm.columnconfigure(1, weight=2)
row = 0
ttk.Label(frm, text='User Agent:').grid(row=row, column=0, sticky=tk.E, pady=2, padx=8)
ttk.Label(frm, text=self.cfg.epConfig.uaConfig.userAgent).grid(row=row, column=1, sticky=tk.W, pady=2, padx=6)
row += 1
ttk.Label(frm, text='Max calls:').grid(row=row, column=0, sticky=tk.E, pady=2, padx=8)
ttk.Label(frm, text=str(self.cfg.epConfig.uaConfig.maxCalls)).grid(row=row, column=1, sticky=tk.W, pady=2, padx=6)
row += 1
ttk.Label(frm, text='Log file:').grid(row=row, column=0, sticky=tk.E, pady=2, padx=8)
ttk.Entry(frm, textvariable=self.cfgLogFile, width=32).grid(row=row, column=1, sticky=tk.W, padx=6)
row += 1
ttk.Checkbutton(frm, text='Append log file', variable=self.cfgLogAppend).grid(row=row, column=1, sticky=tk.W, padx=6, pady=2)
self.wTab.add(frm, text='Basic')
def createNetworkTab(self):
self.cfgNameserver = tk.StringVar()
if len(self.cfg.epConfig.uaConfig.nameserver):
self.cfgNameserver.set(self.cfg.epConfig.uaConfig.nameserver[0])
self.cfgStunServer = tk.StringVar()
if len(self.cfg.epConfig.uaConfig.stunServer):
self.cfgStunServer.set(self.cfg.epConfig.uaConfig.stunServer[0])
self.cfgStunIgnoreError = tk.BooleanVar(value=self.cfg.epConfig.uaConfig.stunIgnoreFailure)
self.cfgUdpEnabled = tk.BooleanVar(value=self.cfg.udp.enabled)
self.cfgUdpPort = tk.IntVar(value=self.cfg.udp.config.port)
self.cfgTcpEnabled = tk.BooleanVar(value=self.cfg.tcp.enabled)
self.cfgTcpPort = tk.IntVar(value=self.cfg.tcp.config.port)
self.cfgTlsEnabled = tk.BooleanVar(value=self.cfg.tls.enabled)
self.cfgTlsPort = tk.IntVar(value=self.cfg.tls.config.port)
self.cfgTlsCaFile = tk.StringVar(value=self.cfg.tls.config.tlsConfig.CaListFile)
self.cfgTlsCertFile = tk.StringVar(value=self.cfg.tls.config.tlsConfig.certFile)
self.cfgTlsVerifyClient = tk.BooleanVar(value=self.cfg.tls.config.tlsConfig.verifyClient)
self.cfgTlsVerifyServer = tk.BooleanVar(value=self.cfg.tls.config.tlsConfig.verifyServer)
# Build the tab page
frm = ttk.Frame(self.frm)
frm.columnconfigure(0, weight=1)
frm.columnconfigure(1, weight=2)
row = 0
#ttk.Label(frm, text='UDP transport:').grid(row=row, column=0, sticky=tk.E, pady=2, padx=8)
ttk.Checkbutton(frm, text='Enable UDP transport', variable=self.cfgUdpEnabled).grid(row=row, column=0, sticky=tk.W, padx=6, pady=2)
row += 1
ttk.Label(frm, text='UDP port:').grid(row=row, column=0, sticky=tk.E, pady=2, padx=8)
tk.Spinbox(frm, from_=0, to=65535, textvariable=self.cfgUdpPort, width=5).grid(row=row, column=1, sticky=tk.W, padx=6)
ttk.Label(frm, text='(0 for any)').grid(row=row, column=1, sticky=tk.E, pady=6, padx=6)
row += 1
#ttk.Label(frm, text='TCP transport:').grid(row=row, column=0, sticky=tk.E, pady=2, padx=8)
ttk.Checkbutton(frm, text='Enable TCP transport', variable=self.cfgTcpEnabled).grid(row=row, column=0, sticky=tk.W, padx=6, pady=2)
row += 1
ttk.Label(frm, text='TCP port:').grid(row=row, column=0, sticky=tk.E, pady=2, padx=8)
tk.Spinbox(frm, from_=0, to=65535, textvariable=self.cfgTcpPort, width=5).grid(row=row, column=1, sticky=tk.W, padx=6)
ttk.Label(frm, text='(0 for any)').grid(row=row, column=1, sticky=tk.E, pady=6, padx=6)
row += 1
#ttk.Label(frm, text='TLS transport:').grid(row=row, column=0, sticky=tk.E, pady=2, padx=8)
ttk.Checkbutton(frm, text='Enable TLS transport', variable=self.cfgTlsEnabled).grid(row=row, column=0, sticky=tk.W, padx=6, pady=2)
row += 1
ttk.Label(frm, text='TLS port:').grid(row=row, column=0, sticky=tk.E, pady=2, padx=8)
tk.Spinbox(frm, from_=0, to=65535, textvariable=self.cfgTlsPort, width=5).grid(row=row, column=1, sticky=tk.W, padx=6)
ttk.Label(frm, text='(0 for any)').grid(row=row, column=1, sticky=tk.E, pady=6, padx=6)
row += 1
ttk.Label(frm, text='TLS CA file:').grid(row=row, column=0, sticky=tk.E, pady=2, padx=8)
ttk.Entry(frm, textvariable=self.cfgTlsCaFile, width=32).grid(row=row, column=1, sticky=tk.W, padx=6)
row += 1
ttk.Label(frm, text='TLS cert file:').grid(row=row, column=0, sticky=tk.E, pady=2, padx=8)
ttk.Entry(frm, textvariable=self.cfgTlsCertFile, width=32).grid(row=row, column=1, sticky=tk.W, padx=6)
row += 1
ttk.Checkbutton(frm, text='TLS verify server', variable=self.cfgTlsVerifyServer).grid(row=row, column=1, sticky=tk.W, padx=6, pady=2)
row += 1
ttk.Checkbutton(frm, text='TLS verify client', variable=self.cfgTlsVerifyClient).grid(row=row, column=1, sticky=tk.W, padx=6, pady=2)
row += 1
ttk.Label(frm, text='DNS and STUN:').grid(row=row, column=0, sticky=tk.W, pady=2, padx=8)
row += 1
ttk.Label(frm, text='Nameserver:').grid(row=row, column=0, sticky=tk.E, pady=2, padx=8)
ttk.Entry(frm, textvariable=self.cfgNameserver, width=32).grid(row=row, column=1, sticky=tk.W, padx=6)
row += 1
ttk.Label(frm, text='STUN Server:').grid(row=row, column=0, sticky=tk.E, pady=2, padx=8)
ttk.Entry(frm, textvariable=self.cfgStunServer, width=32).grid(row=row, column=1, sticky=tk.W, padx=6)
row += 1
ttk.Checkbutton(frm, text='Ignore STUN failure at startup', variable=self.cfgStunIgnoreError).grid(row=row, column=1, sticky=tk.W, padx=6, pady=2)
self.wTab.add(frm, text='Network')
def createMediaTab(self):
self.cfgClockrate = tk.IntVar(value=self.cfg.epConfig.medConfig.clockRate)
self.cfgSndClockrate = tk.IntVar(value=self.cfg.epConfig.medConfig.sndClockRate)
self.cfgAudioPtime = tk.IntVar(value=self.cfg.epConfig.medConfig.audioFramePtime)
self.cfgMediaQuality = tk.IntVar(value=self.cfg.epConfig.medConfig.quality)
self.cfgCodecPtime = tk.IntVar(value=self.cfg.epConfig.medConfig.ptime)
self.cfgVad = tk.BooleanVar(value=not self.cfg.epConfig.medConfig.noVad)
self.cfgEcTailLen = tk.IntVar(value=self.cfg.epConfig.medConfig.ecTailLen)
# Build the tab page
frm = ttk.Frame(self.frm)
frm.columnconfigure(0, weight=1)
frm.columnconfigure(1, weight=2)
row = 0
ttk.Label(frm, text='Max media ports:').grid(row=row, column=0, sticky=tk.E, pady=2, padx=8)
ttk.Label(frm, text=str(self.cfg.epConfig.medConfig.maxMediaPorts)).grid(row=row, column=1, sticky=tk.W, pady=2, padx=6)
row += 1
ttk.Label(frm, text='Core clock rate:').grid(row=row, column=0, sticky=tk.E, pady=2, padx=8)
tk.Spinbox(frm, from_=8000, to=48000, increment=8000, textvariable=self.cfgClockrate, width=5).grid(row=row, column=1, sticky=tk.W, padx=6)
row += 1
ttk.Label(frm, text='Snd device clock rate:').grid(row=row, column=0, sticky=tk.E, pady=2, padx=8)
tk.Spinbox(frm, from_=0, to=48000, increment=8000, textvariable=self.cfgSndClockrate, width=5).grid(row=row, column=1, sticky=tk.W, padx=6)
ttk.Label(frm, text='(0: follow core)').grid(row=row, column=1, sticky=tk.E, pady=6, padx=6)
row += 1
ttk.Label(frm, text='Core ptime:').grid(row=row, column=0, sticky=tk.E, pady=2, padx=8)
tk.Spinbox(frm, from_=10, to=400, increment=10, textvariable=self.cfgAudioPtime, width=3).grid(row=row, column=1, sticky=tk.W, padx=6)
row += 1
ttk.Label(frm, text='RTP ptime:').grid(row=row, column=0, sticky=tk.E, pady=2, padx=8)
tk.Spinbox(frm, from_=20, to=400, increment=10, textvariable=self.cfgCodecPtime, width=3).grid(row=row, column=1, sticky=tk.W, padx=6)
row += 1
ttk.Label(frm, text='Media quality (1-10):').grid(row=row, column=0, sticky=tk.E, pady=2, padx=8)
tk.Spinbox(frm, from_=1, to=10, textvariable=self.cfgMediaQuality, width=5).grid(row=row, column=1, sticky=tk.W, padx=6)
row += 1
ttk.Label(frm, text='VAD:').grid(row=row, column=0, sticky=tk.E, pady=2, padx=8)
ttk.Checkbutton(frm, text='Enable', variable=self.cfgVad).grid(row=row, column=1, sticky=tk.W, padx=6, pady=2)
row += 1
ttk.Label(frm, text='Echo canceller tail length:').grid(row=row, column=0, sticky=tk.E, pady=2, padx=8)
tk.Spinbox(frm, from_=0, to=400, increment=10, textvariable=self.cfgEcTailLen, width=3).grid(row=row, column=1, sticky=tk.W, padx=6)
ttk.Label(frm, text='(ms, 0 to disable)').grid(row=row, column=1, sticky=tk.E, pady=6, padx=6)
self.wTab.add(frm, text='Media')
def onOk(self):
# Check basic settings
errors = "";
if errors:
msgbox.showerror("Error detected:", errors)
return
# Basic settings
self.cfg.epConfig.logConfig.filename = self.cfgLogFile.get()
flags = pj.PJ_O_APPEND if self.cfgLogAppend.get() else 0
self.cfg.epConfig.logConfig.fileFlags = self.cfg.epConfig.logConfig.fileFlags | flags
# Network settings
self.cfg.epConfig.uaConfig.nameserver.clear()
if len(self.cfgNameserver.get()):
self.cfg.epConfig.uaConfig.nameserver.append(self.cfgNameserver.get())
self.cfg.epConfig.uaConfig.stunServer.clear()
if len(self.cfgStunServer.get()):
self.cfg.epConfig.uaConfig.stunServer.append(self.cfgStunServer.get())
self.cfg.epConfig.uaConfig.stunIgnoreFailure = self.cfgStunIgnoreError.get()
self.cfg.udp.enabled = self.cfgUdpEnabled.get()
self.cfg.udp.config.port = self.cfgUdpPort.get()
self.cfg.tcp.enabled = self.cfgTcpEnabled.get()
self.cfg.tcp.config.port = self.cfgTcpPort.get()
self.cfg.tls.enabled = self.cfgTlsEnabled.get()
self.cfg.tls.config.port = self.cfgTlsPort.get()
self.cfg.tls.config.tlsConfig.CaListFile = self.cfgTlsCaFile.get()
self.cfg.tls.config.tlsConfig.certFile = self.cfgTlsCertFile.get()
self.cfg.tls.config.tlsConfig.verifyClient = self.cfgTlsVerifyClient.get()
self.cfg.tls.config.tlsConfig.verifyServer = self.cfgTlsVerifyServer.get()
# Media
self.cfg.epConfig.medConfig.clockRate = self.cfgClockrate.get()
self.cfg.epConfig.medConfig.sndClockRate = self.cfgSndClockrate.get()
self.cfg.epConfig.medConfig.audioFramePtime = self.cfgAudioPtime.get()
self.cfg.epConfig.medConfig.quality = self.cfgMediaQuality.get()
self.cfg.epConfig.medConfig.ptime = self.cfgCodecPtime.get()
self.cfg.epConfig.medConfig.noVad = not self.cfgVad.get()
self.cfg.epConfig.medConfig.ecTailLen = self.cfgEcTailLen.get()
self.isOk = True
self.destroy()
def onCancel(self):
self.destroy()
if __name__ == '__main__':
#application.main()
acfg = AppConfig()
acfg.loadFile('pygui.js')
dlg = Dialog(None, acfg)
if dlg.doModal():
acfg.saveFile('pygui.js')
| gpl-2.0 |
cwolferh/heat-scratch | heat/tests/openstack/heat/test_software_component.py | 3 | 10017 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
from heat.common import exception as exc
from heat.common import template_format
from heat.engine import stack
from heat.engine import template
from heat.tests import common
from heat.tests import utils
class SoftwareComponentTest(common.HeatTestCase):
def setUp(self):
super(SoftwareComponentTest, self).setUp()
self.ctx = utils.dummy_context()
tpl = '''
heat_template_version: 2013-05-23
resources:
mysql_component:
type: OS::Heat::SoftwareComponent
properties:
configs:
- actions: [CREATE]
config: |
#!/bin/bash
echo "Create MySQL"
tool: script
- actions: [UPDATE]
config: |
#!/bin/bash
echo "Update MySQL"
tool: script
inputs:
- name: mysql_port
outputs:
- name: root_password
'''
self.template = template_format.parse(tpl)
self.stack = stack.Stack(
self.ctx, 'software_component_test_stack',
template.Template(self.template))
self.component = self.stack['mysql_component']
self.rpc_client = mock.MagicMock()
self.component._rpc_client = self.rpc_client
def test_handle_create(self):
config_id = 'c8a19429-7fde-47ea-a42f-40045488226c'
value = {'id': config_id}
self.rpc_client.create_software_config.return_value = value
props = dict(self.component.properties)
self.component.handle_create()
self.rpc_client.create_software_config.assert_called_with(
self.ctx,
group='component',
name=None,
inputs=props['inputs'],
outputs=props['outputs'],
config={'configs': props['configs']},
options=None)
self.assertEqual(config_id, self.component.resource_id)
def test_handle_delete(self):
self.resource_id = None
self.assertIsNone(self.component.handle_delete())
config_id = 'c8a19429-7fde-47ea-a42f-40045488226c'
self.component.resource_id = config_id
self.rpc_client.delete_software_config.return_value = None
self.assertIsNone(self.component.handle_delete())
self.rpc_client.delete_software_config.side_effect = exc.NotFound
self.assertIsNone(self.component.handle_delete())
def test_resolve_attribute(self):
self.assertIsNone(self.component._resolve_attribute('others'))
self.component.resource_id = None
self.assertIsNone(self.component._resolve_attribute('configs'))
self.component.resource_id = 'c8a19429-7fde-47ea-a42f-40045488226c'
configs = self.template['resources']['mysql_component'
]['properties']['configs']
# configs list is stored in 'config' property of SoftwareConfig
value = {'config': {'configs': configs}}
self.rpc_client.show_software_config.return_value = value
self.assertEqual(configs, self.component._resolve_attribute('configs'))
self.rpc_client.show_software_config.side_effect = exc.NotFound
self.assertIsNone(self.component._resolve_attribute('configs'))
class SoftwareComponentValidationTest(common.HeatTestCase):
scenarios = [
(
'component_full',
dict(snippet='''
component:
type: OS::Heat::SoftwareComponent
properties:
configs:
- actions: [CREATE]
config: |
#!/bin/bash
echo CREATE $foo
tool: script
inputs:
- name: foo
outputs:
- name: bar
options:
opt1: blah
''',
err=None,
err_msg=None)
),
(
'no_input_output_options',
dict(snippet='''
component:
type: OS::Heat::SoftwareComponent
properties:
configs:
- actions: [CREATE]
config: |
#!/bin/bash
echo CREATE $foo
tool: script
''',
err=None,
err_msg=None)
),
(
'wrong_property_config',
dict(snippet='''
component:
type: OS::Heat::SoftwareComponent
properties:
config: #!/bin/bash
configs:
- actions: [CREATE]
config: |
#!/bin/bash
echo CREATE $foo
tool: script
''',
err=exc.StackValidationFailed,
err_msg='Unknown Property config')
),
(
'missing_configs',
dict(snippet='''
component:
type: OS::Heat::SoftwareComponent
properties:
inputs:
- name: foo
''',
err=exc.StackValidationFailed,
err_msg='Property configs not assigned')
),
(
'empty_configs',
dict(snippet='''
component:
type: OS::Heat::SoftwareComponent
properties:
configs:
''',
err=exc.StackValidationFailed,
err_msg='resources.component.properties.configs: '
'length (0) is out of range (min: 1, max: None)')
),
(
'invalid_configs',
dict(snippet='''
component:
type: OS::Heat::SoftwareComponent
properties:
configs:
actions: [CREATE]
config: #!/bin/bash
tool: script
''',
err=exc.StackValidationFailed,
err_msg='is not a list')
),
(
'config_empty_actions',
dict(snippet='''
component:
type: OS::Heat::SoftwareComponent
properties:
configs:
- actions: []
config: #!/bin/bash
tool: script
''',
err=exc.StackValidationFailed,
err_msg='component.properties.configs[0].actions: '
'length (0) is out of range (min: 1, max: None)')
),
(
'multiple_configs_per_action_single',
dict(snippet='''
component:
type: OS::Heat::SoftwareComponent
properties:
configs:
- actions: [CREATE]
config: #!/bin/bash
tool: script
- actions: [CREATE]
config: #!/bin/bash
tool: script
''',
err=exc.StackValidationFailed,
err_msg='Defining more than one configuration for the same '
'action in SoftwareComponent "component" is not '
'allowed.')
),
(
'multiple_configs_per_action_overlapping_list',
dict(snippet='''
component:
type: OS::Heat::SoftwareComponent
properties:
configs:
- actions: [CREATE, UPDATE, RESUME]
config: #!/bin/bash
tool: script
- actions: [UPDATE]
config: #!/bin/bash
tool: script
''',
err=exc.StackValidationFailed,
err_msg='Defining more than one configuration for the same '
'action in SoftwareComponent "component" is not '
'allowed.')
),
]
def setUp(self):
super(SoftwareComponentValidationTest, self).setUp()
self.ctx = utils.dummy_context()
tpl = '''
heat_template_version: 2013-05-23
resources:
%s
''' % self.snippet
self.template = template_format.parse(tpl)
self.stack = stack.Stack(
self.ctx, 'software_component_test_stack',
template.Template(self.template))
self.component = self.stack['component']
self.component._rpc_client = mock.MagicMock()
def test_properties_schema(self):
if self.err:
err = self.assertRaises(self.err, self.stack.validate)
if self.err_msg:
self.assertIn(self.err_msg, six.text_type(err))
else:
self.assertIsNone(self.stack.validate())
| apache-2.0 |
shaufi10/odoo | addons/mail/res_users.py | 314 | 10337 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp.osv import fields, osv
from openerp import api
from openerp import SUPERUSER_ID
from openerp.tools.translate import _
import openerp
class res_users(osv.Model):
""" Update of res.users class
- add a preference about sending emails about notifications
- make a new user follow itself
- add a welcome message
- add suggestion preference
"""
_name = 'res.users'
_inherit = ['res.users']
_inherits = {'mail.alias': 'alias_id'}
_columns = {
'alias_id': fields.many2one('mail.alias', 'Alias', ondelete="restrict", required=True,
help="Email address internally associated with this user. Incoming "\
"emails will appear in the user's notifications.", copy=False, auto_join=True),
'display_groups_suggestions': fields.boolean("Display Groups Suggestions"),
}
_defaults = {
'display_groups_suggestions': True,
}
def __init__(self, pool, cr):
""" Override of __init__ to add access rights on notification_email_send
and alias fields. Access rights are disabled by default, but allowed
on some specific fields defined in self.SELF_{READ/WRITE}ABLE_FIELDS.
"""
init_res = super(res_users, self).__init__(pool, cr)
# duplicate list to avoid modifying the original reference
self.SELF_WRITEABLE_FIELDS = list(self.SELF_WRITEABLE_FIELDS)
self.SELF_WRITEABLE_FIELDS.extend(['notify_email', 'display_groups_suggestions'])
# duplicate list to avoid modifying the original reference
self.SELF_READABLE_FIELDS = list(self.SELF_READABLE_FIELDS)
self.SELF_READABLE_FIELDS.extend(['notify_email', 'alias_domain', 'alias_name', 'display_groups_suggestions'])
return init_res
def _auto_init(self, cr, context=None):
""" Installation hook: aliases, partner following themselves """
# create aliases for all users and avoid constraint errors
return self.pool.get('mail.alias').migrate_to_alias(cr, self._name, self._table, super(res_users, self)._auto_init,
self._name, self._columns['alias_id'], 'login', alias_force_key='id', context=context)
def create(self, cr, uid, data, context=None):
if not data.get('login', False):
model, action_id = self.pool['ir.model.data'].get_object_reference(cr, uid, 'base', 'action_res_users')
msg = _("You cannot create a new user from here.\n To create new user please go to configuration panel.")
raise openerp.exceptions.RedirectWarning(msg, action_id, _('Go to the configuration panel'))
if context is None:
context = {}
create_context = dict(context, alias_model_name=self._name, alias_parent_model_name=self._name)
user_id = super(res_users, self).create(cr, uid, data, context=create_context)
user = self.browse(cr, uid, user_id, context=context)
self.pool.get('mail.alias').write(cr, SUPERUSER_ID, [user.alias_id.id], {"alias_force_thread_id": user_id, "alias_parent_thread_id": user_id}, context)
# create a welcome message
self._create_welcome_message(cr, uid, user, context=context)
return user_id
def copy_data(self, *args, **kwargs):
data = super(res_users, self).copy_data(*args, **kwargs)
if data and data.get('alias_name'):
data['alias_name'] = data['login']
return data
def _create_welcome_message(self, cr, uid, user, context=None):
if not self.has_group(cr, uid, 'base.group_user'):
return False
company_name = user.company_id.name if user.company_id else ''
body = _('%s has joined the %s network.') % (user.name, company_name)
# TODO change SUPERUSER_ID into user.id but catch errors
return self.pool.get('res.partner').message_post(cr, SUPERUSER_ID, [user.partner_id.id],
body=body, context=context)
def unlink(self, cr, uid, ids, context=None):
# Cascade-delete mail aliases as well, as they should not exist without the user.
alias_pool = self.pool.get('mail.alias')
alias_ids = [user.alias_id.id for user in self.browse(cr, uid, ids, context=context) if user.alias_id]
res = super(res_users, self).unlink(cr, uid, ids, context=context)
alias_pool.unlink(cr, uid, alias_ids, context=context)
return res
def _message_post_get_pid(self, cr, uid, thread_id, context=None):
assert thread_id, "res.users does not support posting global messages"
if context and 'thread_model' in context:
context['thread_model'] = 'res.users'
if isinstance(thread_id, (list, tuple)):
thread_id = thread_id[0]
return self.browse(cr, SUPERUSER_ID, thread_id).partner_id.id
@api.cr_uid_ids_context
def message_post(self, cr, uid, thread_id, context=None, **kwargs):
""" Redirect the posting of message on res.users as a private discussion.
This is done because when giving the context of Chatter on the
various mailboxes, we do not have access to the current partner_id. """
if isinstance(thread_id, (list, tuple)):
thread_id = thread_id[0]
current_pids = []
partner_ids = kwargs.get('partner_ids', [])
user_pid = self._message_post_get_pid(cr, uid, thread_id, context=context)
for partner_id in partner_ids:
if isinstance(partner_id, (list, tuple)) and partner_id[0] == 4 and len(partner_id) == 2:
current_pids.append(partner_id[1])
elif isinstance(partner_id, (list, tuple)) and partner_id[0] == 6 and len(partner_id) == 3:
current_pids.append(partner_id[2])
elif isinstance(partner_id, (int, long)):
current_pids.append(partner_id)
if user_pid not in current_pids:
partner_ids.append(user_pid)
kwargs['partner_ids'] = partner_ids
if context and context.get('thread_model') == 'res.partner':
return self.pool['res.partner'].message_post(cr, uid, user_pid, **kwargs)
return self.pool['mail.thread'].message_post(cr, uid, uid, **kwargs)
def message_update(self, cr, uid, ids, msg_dict, update_vals=None, context=None):
return True
def message_subscribe(self, cr, uid, ids, partner_ids, subtype_ids=None, context=None):
return True
def message_get_partner_info_from_emails(self, cr, uid, emails, link_mail=False, context=None):
return self.pool.get('mail.thread').message_get_partner_info_from_emails(cr, uid, emails, link_mail=link_mail, context=context)
def message_get_suggested_recipients(self, cr, uid, ids, context=None):
return dict((res_id, list()) for res_id in ids)
def stop_showing_groups_suggestions(self, cr, uid, user_id, context=None):
"""Update display_groups_suggestions value to False"""
if context is None:
context = {}
self.write(cr, uid, user_id, {"display_groups_suggestions": False}, context)
class res_users_mail_group(osv.Model):
""" Update of res.users class
- if adding groups to an user, check mail.groups linked to this user
group, and the user. This is done by overriding the write method.
"""
_name = 'res.users'
_inherit = ['res.users']
# FP Note: to improve, post processing may be better ?
def write(self, cr, uid, ids, vals, context=None):
write_res = super(res_users_mail_group, self).write(cr, uid, ids, vals, context=context)
if vals.get('groups_id'):
# form: {'group_ids': [(3, 10), (3, 3), (4, 10), (4, 3)]} or {'group_ids': [(6, 0, [ids]}
user_group_ids = [command[1] for command in vals['groups_id'] if command[0] == 4]
user_group_ids += [id for command in vals['groups_id'] if command[0] == 6 for id in command[2]]
mail_group_obj = self.pool.get('mail.group')
mail_group_ids = mail_group_obj.search(cr, uid, [('group_ids', 'in', user_group_ids)], context=context)
mail_group_obj.message_subscribe_users(cr, uid, mail_group_ids, ids, context=context)
return write_res
class res_groups_mail_group(osv.Model):
""" Update of res.groups class
- if adding users from a group, check mail.groups linked to this user
group and subscribe them. This is done by overriding the write method.
"""
_name = 'res.groups'
_inherit = 'res.groups'
# FP Note: to improve, post processeing, after the super may be better
def write(self, cr, uid, ids, vals, context=None):
write_res = super(res_groups_mail_group, self).write(cr, uid, ids, vals, context=context)
if vals.get('users'):
# form: {'group_ids': [(3, 10), (3, 3), (4, 10), (4, 3)]} or {'group_ids': [(6, 0, [ids]}
user_ids = [command[1] for command in vals['users'] if command[0] == 4]
user_ids += [id for command in vals['users'] if command[0] == 6 for id in command[2]]
mail_group_obj = self.pool.get('mail.group')
mail_group_ids = mail_group_obj.search(cr, uid, [('group_ids', 'in', ids)], context=context)
mail_group_obj.message_subscribe_users(cr, uid, mail_group_ids, user_ids, context=context)
return write_res
| agpl-3.0 |
robertding/vo | vo/pubsub.py | 1 | 1446 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# Author : RobertDing
# E-mail : robertdingx@gmail.com
# Date : 15/09/06 00:13:16
# Desc : pub/sub download urls
#
from __future__ import absolute_import, division, with_statement
from redis import Redis
from vo.config import REDIS_HOST
from vo.config import REDIS_PASSWD
class PubSub(object):
"""
PubSub provides publish, subscribe and listen support to Redis channels.
an implimention of redis pubsub
"""
def __init__(self, **kwargs):
self.redis = Redis(self.redis_config)
self.pubsub = self.redis.pubsub(kwargs)
@property
def redis_config(self):
host, port = REDIS_HOST.split(":")
port = int(port)
password = REDIS_PASSWD
decode_responses = True
conf = dict(host=host, port=port, password=password,
decode_responses=decode_responses)
return conf
def publish(self, channel, message):
return self.redis.publish(channel, message)
def subscribe(self, *args, **kwargs):
return self.pubsub.subscribe(args, kwargs)
def unsubscribe(self, *args):
return self.pubsub.unsubscribe(args)
def listen(self):
yield self.pubsub.listen()
def get_message(self, ignore_subscribe_message=False):
return self.pubsub.get_message(ignore_subscribe_message)
def close(self):
return self.pubsub.close()
| mit |
thnee/ansible | lib/ansible/modules/network/aci/aci_static_binding_to_epg.py | 13 | 14786 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Bruno Calogero <brunocalogero@hotmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: aci_static_binding_to_epg
short_description: Bind static paths to EPGs (fv:RsPathAtt)
description:
- Bind static paths to EPGs on Cisco ACI fabrics.
version_added: '2.5'
options:
tenant:
description:
- Name of an existing tenant.
type: str
aliases: [ tenant_name ]
ap:
description:
- Name of an existing application network profile, that will contain the EPGs.
type: str
aliases: [ app_profile, app_profile_name ]
epg:
description:
- The name of the end point group.
type: str
aliases: [ epg_name ]
description:
description:
- Description for the static path to EPG binding.
type: str
aliases: [ descr ]
version_added: '2.7'
encap_id:
description:
- The encapsulation ID associating the C(epg) with the interface path.
- This acts as the secondary C(encap_id) when using micro-segmentation.
- Accepted values are any valid encap ID for specified encap, currently ranges between C(1) and C(4096).
type: int
aliases: [ vlan, vlan_id ]
primary_encap_id:
description:
- Determines the primary encapsulation ID associating the C(epg)
with the interface path when using micro-segmentation.
- Accepted values are any valid encap ID for specified encap, currently ranges between C(1) and C(4096).
type: int
aliases: [ primary_vlan, primary_vlan_id ]
deploy_immediacy:
description:
- The Deployment Immediacy of Static EPG on PC, VPC or Interface.
- The APIC defaults to C(lazy) when unset during creation.
type: str
choices: [ immediate, lazy ]
interface_mode:
description:
- Determines how layer 2 tags will be read from and added to frames.
- Values C(802.1p) and C(native) are identical.
- Values C(access) and C(untagged) are identical.
- Values C(regular), C(tagged) and C(trunk) are identical.
- The APIC defaults to C(trunk) when unset during creation.
type: str
choices: [ 802.1p, access, native, regular, tagged, trunk, untagged ]
aliases: [ interface_mode_name, mode ]
interface_type:
description:
- The type of interface for the static EPG deployment.
type: str
choices: [ fex, port_channel, switch_port, vpc ]
default: switch_port
pod_id:
description:
- The pod number part of the tDn.
- C(pod_id) is usually an integer below C(10).
type: int
aliases: [ pod, pod_number ]
leafs:
description:
- The switch ID(s) that the C(interface) belongs to.
- When C(interface_type) is C(switch_port), C(port_channel), or C(fex), then C(leafs) is a string of the leaf ID.
- When C(interface_type) is C(vpc), then C(leafs) is a list with both leaf IDs.
- The C(leafs) value is usually something like '101' or '101-102' depending on C(connection_type).
type: list
aliases: [ leaves, nodes, paths, switches ]
interface:
description:
- The C(interface) string value part of the tDn.
- Usually a policy group like C(test-IntPolGrp) or an interface of the following format C(1/7) depending on C(interface_type).
type: str
extpaths:
description:
- The C(extpaths) integer value part of the tDn.
- C(extpaths) is only used if C(interface_type) is C(fex).
- Usually something like C(1011).
type: int
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
notes:
- The C(tenant), C(ap), C(epg) used must exist before using this module in your playbook.
The M(aci_tenant), M(aci_ap), M(aci_epg) modules can be used for this.
seealso:
- module: aci_tenant
- module: aci_ap
- module: aci_epg
- name: APIC Management Information Model reference
description: More information about the internal APIC class B(fv:RsPathAtt).
link: https://developer.cisco.com/docs/apic-mim-ref/
author:
- Bruno Calogero (@brunocalogero)
'''
EXAMPLES = r'''
- name: Deploy Static Path binding for given EPG
aci_static_binding_to_epg:
host: apic
username: admin
password: SomeSecretPassword
tenant: accessport-code-cert
ap: accessport_code_app
epg: accessport_epg1
encap_id: 222
deploy_immediacy: lazy
interface_mode: untagged
interface_type: switch_port
pod_id: 1
leafs: 101
interface: '1/7'
state: present
delegate_to: localhost
- name: Remove Static Path binding for given EPG
aci_static_binding_to_epg:
host: apic
username: admin
password: SomeSecretPassword
tenant: accessport-code-cert
ap: accessport_code_app
epg: accessport_epg1
interface_type: switch_port
pod: 1
leafs: 101
interface: '1/7'
state: absent
delegate_to: localhost
- name: Get specific Static Path binding for given EPG
aci_static_binding_to_epg:
host: apic
username: admin
password: SomeSecretPassword
tenant: accessport-code-cert
ap: accessport_code_app
epg: accessport_epg1
interface_type: switch_port
pod: 1
leafs: 101
interface: '1/7'
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: str
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: str
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: str
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: str
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: str
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
INTERFACE_MODE_MAPPING = {
'802.1p': 'native',
'access': 'untagged',
'native': 'native',
'regular': 'regular',
'tagged': 'regular',
'trunk': 'regular',
'untagged': 'untagged',
}
INTERFACE_TYPE_MAPPING = dict(
fex='topology/pod-{pod_id}/paths-{leafs}/extpaths-{extpaths}/pathep-[eth{interface}]',
port_channel='topology/pod-{pod_id}/paths-{leafs}/pathep-[{interface}]',
switch_port='topology/pod-{pod_id}/paths-{leafs}/pathep-[eth{interface}]',
vpc='topology/pod-{pod_id}/protpaths-{leafs}/pathep-[{interface}]',
)
# TODO: change 'deploy_immediacy' to 'resolution_immediacy' (as seen in aci_epg_to_domain)?
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
tenant=dict(type='str', aliases=['tenant_name']), # Not required for querying all objects
ap=dict(type='str', aliases=['app_profile', 'app_profile_name']), # Not required for querying all objects
epg=dict(type='str', aliases=['epg_name']), # Not required for querying all objects
description=dict(type='str', aliases=['descr']),
encap_id=dict(type='int', aliases=['vlan', 'vlan_id']),
primary_encap_id=dict(type='int', aliases=['primary_vlan', 'primary_vlan_id']),
deploy_immediacy=dict(type='str', choices=['immediate', 'lazy']),
interface_mode=dict(type='str', choices=['802.1p', 'access', 'native', 'regular', 'tagged', 'trunk', 'untagged'],
aliases=['interface_mode_name', 'mode']),
interface_type=dict(type='str', default='switch_port', choices=['fex', 'port_channel', 'switch_port', 'vpc']),
pod_id=dict(type='int', aliases=['pod', 'pod_number']), # Not required for querying all objects
leafs=dict(type='list', aliases=['leaves', 'nodes', 'paths', 'switches']), # Not required for querying all objects
interface=dict(type='str'), # Not required for querying all objects
extpaths=dict(type='int'),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['interface_type', 'fex', ['extpaths']],
['state', 'absent', ['ap', 'epg', 'interface', 'leafs', 'pod_id', 'tenant']],
['state', 'present', ['ap', 'encap_id', 'epg', 'interface', 'leafs', 'pod_id', 'tenant']],
],
)
tenant = module.params.get('tenant')
ap = module.params.get('ap')
epg = module.params.get('epg')
description = module.params.get('description')
encap_id = module.params.get('encap_id')
primary_encap_id = module.params.get('primary_encap_id')
deploy_immediacy = module.params.get('deploy_immediacy')
interface_mode = module.params.get('interface_mode')
interface_type = module.params.get('interface_type')
pod_id = module.params.get('pod_id')
leafs = module.params.get('leafs')
if leafs is not None:
# Process leafs, and support dash-delimited leafs
leafs = []
for leaf in module.params.get('leafs'):
# Users are likely to use integers for leaf IDs, which would raise an exception when using the join method
leafs.extend(str(leaf).split('-'))
if len(leafs) == 1:
if interface_type == 'vpc':
module.fail_json(msg='A interface_type of "vpc" requires 2 leafs')
leafs = leafs[0]
elif len(leafs) == 2:
if interface_type != 'vpc':
module.fail_json(msg='The interface_types "switch_port", "port_channel", and "fex" \
do not support using multiple leafs for a single binding')
leafs = "-".join(leafs)
else:
module.fail_json(msg='The "leafs" parameter must not have more than 2 entries')
interface = module.params.get('interface')
extpaths = module.params.get('extpaths')
state = module.params.get('state')
if encap_id is not None:
if encap_id not in range(1, 4097):
module.fail_json(msg='Valid VLAN assigments are from 1 to 4096')
encap_id = 'vlan-{0}'.format(encap_id)
if primary_encap_id is not None:
if primary_encap_id not in range(1, 4097):
module.fail_json(msg='Valid VLAN assigments are from 1 to 4096')
primary_encap_id = 'vlan-{0}'.format(primary_encap_id)
static_path = INTERFACE_TYPE_MAPPING[interface_type].format(pod_id=pod_id, leafs=leafs, extpaths=extpaths, interface=interface)
path_target_filter = {}
if pod_id is not None and leafs is not None and interface is not None and (interface_type != 'fex' or extpaths is not None):
path_target_filter = {'tDn': static_path}
if interface_mode is not None:
interface_mode = INTERFACE_MODE_MAPPING[interface_mode]
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='fvTenant',
aci_rn='tn-{0}'.format(tenant),
module_object=tenant,
target_filter={'name': tenant},
),
subclass_1=dict(
aci_class='fvAp',
aci_rn='ap-{0}'.format(ap),
module_object=ap,
target_filter={'name': ap},
),
subclass_2=dict(
aci_class='fvAEPg',
aci_rn='epg-{0}'.format(epg),
module_object=epg,
target_filter={'name': epg},
),
subclass_3=dict(
aci_class='fvRsPathAtt',
aci_rn='rspathAtt-[{0}]'.format(static_path),
module_object=static_path,
target_filter=path_target_filter,
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='fvRsPathAtt',
class_config=dict(
descr=description,
encap=encap_id,
primaryEncap=primary_encap_id,
instrImedcy=deploy_immediacy,
mode=interface_mode,
tDn=static_path,
),
)
aci.get_diff(aci_class='fvRsPathAtt')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
| gpl-3.0 |
stone5495/NewsBlur | utils/S3.py | 25 | 21229 | #!/usr/bin/env python
# This software code is made available "AS IS" without warranties of any
# kind. You may copy, display, modify and redistribute the software
# code either by itself or as incorporated into your code; provided that
# you do not remove any proprietary notices. Your use of this software
# code is at your own risk and you waive any claim against Amazon
# Digital Services, Inc. or its affiliates with respect to your use of
# this software code. (c) 2006-2007 Amazon Digital Services, Inc. or its
# affiliates.
import base64
import hmac
import httplib
import re
import sha
import sys
import time
import urllib
import urlparse
import xml.sax
DEFAULT_HOST = 's3.amazonaws.com'
PORTS_BY_SECURITY = { True: 443, False: 80 }
METADATA_PREFIX = 'x-amz-meta-'
AMAZON_HEADER_PREFIX = 'x-amz-'
# generates the aws canonical string for the given parameters
def canonical_string(method, bucket="", key="", query_args={}, headers={}, expires=None):
interesting_headers = {}
for header_key in headers:
lk = header_key.lower()
if lk in ['content-md5', 'content-type', 'date'] or lk.startswith(AMAZON_HEADER_PREFIX):
interesting_headers[lk] = headers[header_key].strip()
# these keys get empty strings if they don't exist
if not interesting_headers.has_key('content-type'):
interesting_headers['content-type'] = ''
if not interesting_headers.has_key('content-md5'):
interesting_headers['content-md5'] = ''
# just in case someone used this. it's not necessary in this lib.
if interesting_headers.has_key('x-amz-date'):
interesting_headers['date'] = ''
# if you're using expires for query string auth, then it trumps date
# (and x-amz-date)
if expires:
interesting_headers['date'] = str(expires)
sorted_header_keys = interesting_headers.keys()
sorted_header_keys.sort()
buf = "%s\n" % method
for header_key in sorted_header_keys:
if header_key.startswith(AMAZON_HEADER_PREFIX):
buf += "%s:%s\n" % (header_key, interesting_headers[header_key])
else:
buf += "%s\n" % interesting_headers[header_key]
# append the bucket if it exists
if bucket != "":
buf += "/%s" % bucket
# add the key. even if it doesn't exist, add the slash
buf += "/%s" % urllib.quote_plus(key)
# handle special query string arguments
if query_args.has_key("acl"):
buf += "?acl"
elif query_args.has_key("torrent"):
buf += "?torrent"
elif query_args.has_key("logging"):
buf += "?logging"
elif query_args.has_key("location"):
buf += "?location"
return buf
# computes the base64'ed hmac-sha hash of the canonical string and the secret
# access key, optionally urlencoding the result
def encode(aws_secret_access_key, str, urlencode=False):
b64_hmac = base64.encodestring(hmac.new(aws_secret_access_key, str, sha).digest()).strip()
if urlencode:
return urllib.quote_plus(b64_hmac)
else:
return b64_hmac
def merge_meta(headers, metadata):
final_headers = headers.copy()
for k in metadata.keys():
final_headers[METADATA_PREFIX + k] = metadata[k]
return final_headers
# builds the query arg string
def query_args_hash_to_string(query_args):
query_string = ""
pairs = []
for k, v in query_args.items():
piece = k
if v != None:
piece += "=%s" % urllib.quote_plus(str(v))
pairs.append(piece)
return '&'.join(pairs)
class CallingFormat:
PATH = 1
SUBDOMAIN = 2
VANITY = 3
def build_url_base(protocol, server, port, bucket, calling_format):
url_base = '%s://' % protocol
if bucket == '':
url_base += server
elif calling_format == CallingFormat.SUBDOMAIN:
url_base += "%s.%s" % (bucket, server)
elif calling_format == CallingFormat.VANITY:
url_base += bucket
else:
url_base += server
url_base += ":%s" % port
if (bucket != '') and (calling_format == CallingFormat.PATH):
url_base += "/%s" % bucket
return url_base
build_url_base = staticmethod(build_url_base)
class Location:
DEFAULT = None
EU = 'EU'
class AWSAuthConnection:
def __init__(self, aws_access_key_id, aws_secret_access_key, is_secure=True,
server=DEFAULT_HOST, port=None, calling_format=CallingFormat.SUBDOMAIN):
if not port:
port = PORTS_BY_SECURITY[is_secure]
self.aws_access_key_id = aws_access_key_id
self.aws_secret_access_key = aws_secret_access_key
self.is_secure = is_secure
self.server = server
self.port = port
self.calling_format = calling_format
def create_bucket(self, bucket, headers={}):
return Response(self._make_request('PUT', bucket, '', {}, headers))
def create_located_bucket(self, bucket, location=Location.DEFAULT, headers={}):
if location == Location.DEFAULT:
body = ""
else:
body = "<CreateBucketConstraint><LocationConstraint>" + \
location + \
"</LocationConstraint></CreateBucketConstraint>"
return Response(self._make_request('PUT', bucket, '', {}, headers, body))
def check_bucket_exists(self, bucket):
return self._make_request('HEAD', bucket, '', {}, {})
def list_bucket(self, bucket, options={}, headers={}):
return ListBucketResponse(self._make_request('GET', bucket, '', options, headers))
def delete_bucket(self, bucket, headers={}):
return Response(self._make_request('DELETE', bucket, '', {}, headers))
def put(self, bucket, key, object, headers={}):
if not isinstance(object, S3Object):
object = S3Object(object)
return Response(
self._make_request(
'PUT',
bucket,
key,
{},
headers,
object.data,
object.metadata))
def get(self, bucket, key, headers={}):
return GetResponse(
self._make_request('GET', bucket, key, {}, headers))
def delete(self, bucket, key, headers={}):
return Response(
self._make_request('DELETE', bucket, key, {}, headers))
def get_bucket_logging(self, bucket, headers={}):
return GetResponse(self._make_request('GET', bucket, '', { 'logging': None }, headers))
def put_bucket_logging(self, bucket, logging_xml_doc, headers={}):
return Response(self._make_request('PUT', bucket, '', { 'logging': None }, headers, logging_xml_doc))
def get_bucket_acl(self, bucket, headers={}):
return self.get_acl(bucket, '', headers)
def get_acl(self, bucket, key, headers={}):
return GetResponse(
self._make_request('GET', bucket, key, { 'acl': None }, headers))
def put_bucket_acl(self, bucket, acl_xml_document, headers={}):
return self.put_acl(bucket, '', acl_xml_document, headers)
def put_acl(self, bucket, key, acl_xml_document, headers={}):
return Response(
self._make_request(
'PUT',
bucket,
key,
{ 'acl': None },
headers,
acl_xml_document))
def list_all_my_buckets(self, headers={}):
return ListAllMyBucketsResponse(self._make_request('GET', '', '', {}, headers))
def get_bucket_location(self, bucket):
return LocationResponse(self._make_request('GET', bucket, '', {'location' : None}))
# end public methods
def _make_request(self, method, bucket='', key='', query_args={}, headers={}, data='', metadata={}):
server = ''
if bucket == '':
server = self.server
elif self.calling_format == CallingFormat.SUBDOMAIN:
server = "%s.%s" % (bucket, self.server)
elif self.calling_format == CallingFormat.VANITY:
server = bucket
else:
server = self.server
path = ''
if (bucket != '') and (self.calling_format == CallingFormat.PATH):
path += "/%s" % bucket
# add the slash after the bucket regardless
# the key will be appended if it is non-empty
path += "/%s" % urllib.quote_plus(key)
# build the path_argument string
# add the ? in all cases since
# signature and credentials follow path args
if len(query_args):
path += "?" + query_args_hash_to_string(query_args)
is_secure = self.is_secure
host = "%s:%d" % (server, self.port)
while True:
if (is_secure):
connection = httplib.HTTPSConnection(host)
else:
connection = httplib.HTTPConnection(host)
final_headers = merge_meta(headers, metadata);
# add auth header
self._add_aws_auth_header(final_headers, method, bucket, key, query_args)
connection.request(method, path, data, final_headers)
resp = connection.getresponse()
if resp.status < 300 or resp.status >= 400:
return resp
# handle redirect
location = resp.getheader('location')
if not location:
return resp
# (close connection)
resp.read()
scheme, host, path, params, query, fragment \
= urlparse.urlparse(location)
if scheme == "http": is_secure = True
elif scheme == "https": is_secure = False
else: raise invalidURL("Not http/https: " + location)
if query: path += "?" + query
# retry with redirect
def _add_aws_auth_header(self, headers, method, bucket, key, query_args):
if not headers.has_key('Date'):
headers['Date'] = time.strftime("%a, %d %b %Y %X GMT", time.gmtime())
c_string = canonical_string(method, bucket, key, query_args, headers)
headers['Authorization'] = \
"AWS %s:%s" % (self.aws_access_key_id, encode(self.aws_secret_access_key, c_string))
class QueryStringAuthGenerator:
# by default, expire in 1 minute
DEFAULT_EXPIRES_IN = 60
def __init__(self, aws_access_key_id, aws_secret_access_key, is_secure=True,
server=DEFAULT_HOST, port=None, calling_format=CallingFormat.SUBDOMAIN):
if not port:
port = PORTS_BY_SECURITY[is_secure]
self.aws_access_key_id = aws_access_key_id
self.aws_secret_access_key = aws_secret_access_key
if (is_secure):
self.protocol = 'https'
else:
self.protocol = 'http'
self.is_secure = is_secure
self.server = server
self.port = port
self.calling_format = calling_format
self.__expires_in = QueryStringAuthGenerator.DEFAULT_EXPIRES_IN
self.__expires = None
# for backwards compatibility with older versions
self.server_name = "%s:%s" % (self.server, self.port)
def set_expires_in(self, expires_in):
self.__expires_in = expires_in
self.__expires = None
def set_expires(self, expires):
self.__expires = expires
self.__expires_in = None
def create_bucket(self, bucket, headers={}):
return self.generate_url('PUT', bucket, '', {}, headers)
def list_bucket(self, bucket, options={}, headers={}):
return self.generate_url('GET', bucket, '', options, headers)
def delete_bucket(self, bucket, headers={}):
return self.generate_url('DELETE', bucket, '', {}, headers)
def put(self, bucket, key, object, headers={}):
if not isinstance(object, S3Object):
object = S3Object(object)
return self.generate_url(
'PUT',
bucket,
key,
{},
merge_meta(headers, object.metadata))
def get(self, bucket, key, headers={}):
return self.generate_url('GET', bucket, key, {}, headers)
def delete(self, bucket, key, headers={}):
return self.generate_url('DELETE', bucket, key, {}, headers)
def get_bucket_logging(self, bucket, headers={}):
return self.generate_url('GET', bucket, '', { 'logging': None }, headers)
def put_bucket_logging(self, bucket, logging_xml_doc, headers={}):
return self.generate_url('PUT', bucket, '', { 'logging': None }, headers)
def get_bucket_acl(self, bucket, headers={}):
return self.get_acl(bucket, '', headers)
def get_acl(self, bucket, key='', headers={}):
return self.generate_url('GET', bucket, key, { 'acl': None }, headers)
def put_bucket_acl(self, bucket, acl_xml_document, headers={}):
return self.put_acl(bucket, '', acl_xml_document, headers)
# don't really care what the doc is here.
def put_acl(self, bucket, key, acl_xml_document, headers={}):
return self.generate_url('PUT', bucket, key, { 'acl': None }, headers)
def list_all_my_buckets(self, headers={}):
return self.generate_url('GET', '', '', {}, headers)
def make_bare_url(self, bucket, key=''):
full_url = self.generate_url(self, bucket, key)
return full_url[:full_url.index('?')]
def generate_url(self, method, bucket='', key='', query_args={}, headers={}):
expires = 0
if self.__expires_in != None:
expires = int(time.time() + self.__expires_in)
elif self.__expires != None:
expires = int(self.__expires)
else:
raise "Invalid expires state"
canonical_str = canonical_string(method, bucket, key, query_args, headers, expires)
encoded_canonical = encode(self.aws_secret_access_key, canonical_str)
url = CallingFormat.build_url_base(self.protocol, self.server, self.port, bucket, self.calling_format)
url += "/%s" % urllib.quote_plus(key)
query_args['Signature'] = encoded_canonical
query_args['Expires'] = expires
query_args['AWSAccessKeyId'] = self.aws_access_key_id
url += "?%s" % query_args_hash_to_string(query_args)
return url
class S3Object:
def __init__(self, data, metadata={}):
self.data = data
self.metadata = metadata
class Owner:
def __init__(self, id='', display_name=''):
self.id = id
self.display_name = display_name
class ListEntry:
def __init__(self, key='', last_modified=None, etag='', size=0, storage_class='', owner=None):
self.key = key
self.last_modified = last_modified
self.etag = etag
self.size = size
self.storage_class = storage_class
self.owner = owner
class CommonPrefixEntry:
def __init(self, prefix=''):
self.prefix = prefix
class Bucket:
def __init__(self, name='', creation_date=''):
self.name = name
self.creation_date = creation_date
class Response:
def __init__(self, http_response):
self.http_response = http_response
# you have to do this read, even if you don't expect a body.
# otherwise, the next request fails.
self.body = http_response.read()
if http_response.status >= 300 and self.body:
self.message = self.body
else:
self.message = "%03d %s" % (http_response.status, http_response.reason)
class ListBucketResponse(Response):
def __init__(self, http_response):
Response.__init__(self, http_response)
if http_response.status < 300:
handler = ListBucketHandler()
xml.sax.parseString(self.body, handler)
self.entries = handler.entries
self.common_prefixes = handler.common_prefixes
self.name = handler.name
self.marker = handler.marker
self.prefix = handler.prefix
self.is_truncated = handler.is_truncated
self.delimiter = handler.delimiter
self.max_keys = handler.max_keys
self.next_marker = handler.next_marker
else:
self.entries = []
class ListAllMyBucketsResponse(Response):
def __init__(self, http_response):
Response.__init__(self, http_response)
if http_response.status < 300:
handler = ListAllMyBucketsHandler()
xml.sax.parseString(self.body, handler)
self.entries = handler.entries
else:
self.entries = []
class GetResponse(Response):
def __init__(self, http_response):
Response.__init__(self, http_response)
response_headers = http_response.msg # older pythons don't have getheaders
metadata = self.get_aws_metadata(response_headers)
self.object = S3Object(self.body, metadata)
def get_aws_metadata(self, headers):
metadata = {}
for hkey in headers.keys():
if hkey.lower().startswith(METADATA_PREFIX):
metadata[hkey[len(METADATA_PREFIX):]] = headers[hkey]
del headers[hkey]
return metadata
class LocationResponse(Response):
def __init__(self, http_response):
Response.__init__(self, http_response)
if http_response.status < 300:
handler = LocationHandler()
xml.sax.parseString(self.body, handler)
self.location = handler.location
class ListBucketHandler(xml.sax.ContentHandler):
def __init__(self):
self.entries = []
self.curr_entry = None
self.curr_text = ''
self.common_prefixes = []
self.curr_common_prefix = None
self.name = ''
self.marker = ''
self.prefix = ''
self.is_truncated = False
self.delimiter = ''
self.max_keys = 0
self.next_marker = ''
self.is_echoed_prefix_set = False
def startElement(self, name, attrs):
if name == 'Contents':
self.curr_entry = ListEntry()
elif name == 'Owner':
self.curr_entry.owner = Owner()
elif name == 'CommonPrefixes':
self.curr_common_prefix = CommonPrefixEntry()
def endElement(self, name):
if name == 'Contents':
self.entries.append(self.curr_entry)
elif name == 'CommonPrefixes':
self.common_prefixes.append(self.curr_common_prefix)
elif name == 'Key':
self.curr_entry.key = self.curr_text
elif name == 'LastModified':
self.curr_entry.last_modified = self.curr_text
elif name == 'ETag':
self.curr_entry.etag = self.curr_text
elif name == 'Size':
self.curr_entry.size = int(self.curr_text)
elif name == 'ID':
self.curr_entry.owner.id = self.curr_text
elif name == 'DisplayName':
self.curr_entry.owner.display_name = self.curr_text
elif name == 'StorageClass':
self.curr_entry.storage_class = self.curr_text
elif name == 'Name':
self.name = self.curr_text
elif name == 'Prefix' and self.is_echoed_prefix_set:
self.curr_common_prefix.prefix = self.curr_text
elif name == 'Prefix':
self.prefix = self.curr_text
self.is_echoed_prefix_set = True
elif name == 'Marker':
self.marker = self.curr_text
elif name == 'IsTruncated':
self.is_truncated = self.curr_text == 'true'
elif name == 'Delimiter':
self.delimiter = self.curr_text
elif name == 'MaxKeys':
self.max_keys = int(self.curr_text)
elif name == 'NextMarker':
self.next_marker = self.curr_text
self.curr_text = ''
def characters(self, content):
self.curr_text += content
class ListAllMyBucketsHandler(xml.sax.ContentHandler):
def __init__(self):
self.entries = []
self.curr_entry = None
self.curr_text = ''
def startElement(self, name, attrs):
if name == 'Bucket':
self.curr_entry = Bucket()
def endElement(self, name):
if name == 'Name':
self.curr_entry.name = self.curr_text
elif name == 'CreationDate':
self.curr_entry.creation_date = self.curr_text
elif name == 'Bucket':
self.entries.append(self.curr_entry)
def characters(self, content):
self.curr_text = content
class LocationHandler(xml.sax.ContentHandler):
def __init__(self):
self.location = None
self.state = 'init'
def startElement(self, name, attrs):
if self.state == 'init':
if name == 'LocationConstraint':
self.state = 'tag_location'
self.location = ''
else: self.state = 'bad'
else: self.state = 'bad'
def endElement(self, name):
if self.state == 'tag_location' and name == 'LocationConstraint':
self.state = 'done'
else: self.state = 'bad'
def characters(self, content):
if self.state == 'tag_location':
self.location += content
| mit |
cloudfoundry/php-buildpack-legacy | builds/runtimes/python-2.7.6/lib/python2.7/idlelib/ObjectBrowser.py | 90 | 4150 | # XXX TO DO:
# - popup menu
# - support partial or total redisplay
# - more doc strings
# - tooltips
# object browser
# XXX TO DO:
# - for classes/modules, add "open source" to object browser
from idlelib.TreeWidget import TreeItem, TreeNode, ScrolledCanvas
from repr import Repr
myrepr = Repr()
myrepr.maxstring = 100
myrepr.maxother = 100
class ObjectTreeItem(TreeItem):
def __init__(self, labeltext, object, setfunction=None):
self.labeltext = labeltext
self.object = object
self.setfunction = setfunction
def GetLabelText(self):
return self.labeltext
def GetText(self):
return myrepr.repr(self.object)
def GetIconName(self):
if not self.IsExpandable():
return "python"
def IsEditable(self):
return self.setfunction is not None
def SetText(self, text):
try:
value = eval(text)
self.setfunction(value)
except:
pass
else:
self.object = value
def IsExpandable(self):
return not not dir(self.object)
def GetSubList(self):
keys = dir(self.object)
sublist = []
for key in keys:
try:
value = getattr(self.object, key)
except AttributeError:
continue
item = make_objecttreeitem(
str(key) + " =",
value,
lambda value, key=key, object=self.object:
setattr(object, key, value))
sublist.append(item)
return sublist
class InstanceTreeItem(ObjectTreeItem):
def IsExpandable(self):
return True
def GetSubList(self):
sublist = ObjectTreeItem.GetSubList(self)
sublist.insert(0,
make_objecttreeitem("__class__ =", self.object.__class__))
return sublist
class ClassTreeItem(ObjectTreeItem):
def IsExpandable(self):
return True
def GetSubList(self):
sublist = ObjectTreeItem.GetSubList(self)
if len(self.object.__bases__) == 1:
item = make_objecttreeitem("__bases__[0] =",
self.object.__bases__[0])
else:
item = make_objecttreeitem("__bases__ =", self.object.__bases__)
sublist.insert(0, item)
return sublist
class AtomicObjectTreeItem(ObjectTreeItem):
def IsExpandable(self):
return 0
class SequenceTreeItem(ObjectTreeItem):
def IsExpandable(self):
return len(self.object) > 0
def keys(self):
return range(len(self.object))
def GetSubList(self):
sublist = []
for key in self.keys():
try:
value = self.object[key]
except KeyError:
continue
def setfunction(value, key=key, object=self.object):
object[key] = value
item = make_objecttreeitem("%r:" % (key,), value, setfunction)
sublist.append(item)
return sublist
class DictTreeItem(SequenceTreeItem):
def keys(self):
keys = self.object.keys()
try:
keys.sort()
except:
pass
return keys
from types import *
dispatch = {
IntType: AtomicObjectTreeItem,
LongType: AtomicObjectTreeItem,
FloatType: AtomicObjectTreeItem,
StringType: AtomicObjectTreeItem,
TupleType: SequenceTreeItem,
ListType: SequenceTreeItem,
DictType: DictTreeItem,
InstanceType: InstanceTreeItem,
ClassType: ClassTreeItem,
}
def make_objecttreeitem(labeltext, object, setfunction=None):
t = type(object)
if t in dispatch:
c = dispatch[t]
else:
c = ObjectTreeItem
return c(labeltext, object, setfunction)
# Test script
def _test():
import sys
from Tkinter import Tk
root = Tk()
root.configure(bd=0, bg="yellow")
root.focus_set()
sc = ScrolledCanvas(root, bg="white", highlightthickness=0, takefocus=1)
sc.frame.pack(expand=1, fill="both")
item = make_objecttreeitem("sys", sys)
node = TreeNode(sc.canvas, None, item)
node.update()
root.mainloop()
if __name__ == '__main__':
_test()
| mit |
richardnpaul/FWL-Website | lib/python2.7/site-packages/psycopg2/tests/__init__.py | 55 | 2899 | #!/usr/bin/env python
# psycopg2 test suite
#
# Copyright (C) 2007-2011 Federico Di Gregorio <fog@debian.org>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import sys
from testconfig import dsn
from testutils import unittest
import test_async
import test_bugX000
import test_bug_gc
import test_cancel
import test_connection
import test_copy
import test_cursor
import test_dates
import test_extras_dictcursor
import test_green
import test_lobject
import test_module
import test_notify
import test_psycopg2_dbapi20
import test_quote
import test_transaction
import test_types_basic
import test_types_extras
if sys.version_info[:2] >= (2, 5):
import test_with
else:
test_with = None
def test_suite():
# If connection to test db fails, bail out early.
import psycopg2
try:
cnn = psycopg2.connect(dsn)
except Exception, e:
print "Failed connection to test db:", e.__class__.__name__, e
print "Please set env vars 'PSYCOPG2_TESTDB*' to valid values."
sys.exit(1)
else:
cnn.close()
suite = unittest.TestSuite()
suite.addTest(test_async.test_suite())
suite.addTest(test_bugX000.test_suite())
suite.addTest(test_bug_gc.test_suite())
suite.addTest(test_cancel.test_suite())
suite.addTest(test_connection.test_suite())
suite.addTest(test_copy.test_suite())
suite.addTest(test_cursor.test_suite())
suite.addTest(test_dates.test_suite())
suite.addTest(test_extras_dictcursor.test_suite())
suite.addTest(test_green.test_suite())
suite.addTest(test_lobject.test_suite())
suite.addTest(test_module.test_suite())
suite.addTest(test_notify.test_suite())
suite.addTest(test_psycopg2_dbapi20.test_suite())
suite.addTest(test_quote.test_suite())
suite.addTest(test_transaction.test_suite())
suite.addTest(test_types_basic.test_suite())
suite.addTest(test_types_extras.test_suite())
if test_with:
suite.addTest(test_with.test_suite())
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| gpl-3.0 |
gwillem/rgkit | rg.py | 3 | 1416 | # users will import rg to be able to use robot game functions
import math
import operator
settings = None
# constants
CENTER_POINT = None
def after_settings():
global CENTER_POINT
global settings
CENTER_POINT = (int(settings.board_size / 2), int(settings.board_size / 2))
def set_settings(s):
global settings
settings = s
after_settings()
##############################
dist = lambda p1, p2: math.sqrt((p2[0]-p1[0])**2 + (p2[1]-p1[1])**2)
wdist = lambda p1, p2: abs(p2[0]-p1[0]) + abs(p2[1]-p1[1])
def loc_types(loc):
for i in range(2):
if not (0 <= loc[i] < settings.board_size):
return ['invalid']
types = ['normal']
if loc in settings.spawn_coords:
types.append('spawn')
if loc in settings.obstacles:
types.append('obstacle')
return types
def locs_around(loc, filter_out=None):
filter_out = filter_out or []
offsets = ((0, 1), (1, 0), (0, -1), (-1, 0))
locs = []
for o in offsets:
new_loc = tuple(map(operator.add, loc, o))
if len(set(filter_out) & set(loc_types(new_loc))) == 0:
locs.append(new_loc)
return locs
def toward(curr, dest):
if curr == dest:
return curr
x0, y0 = curr
x, y = dest
x_diff, y_diff = x - x0, y - y0
if abs(x_diff) < abs(y_diff):
return (x0, y0 + y_diff / abs(y_diff))
return (x0 + x_diff / abs(x_diff), y0)
| unlicense |
WenTr/TrendingApps | googleplayStats.py | 1 | 3436 | # -*- coding: utf-8 -*-
"""
Created on Sat May 23 16:39:32 2015
@author: Wendy Tran
"""
'''
Sources:
http://stackoverflow.com/questions/252703/python-append-vs-extend
http://docs.scipy.org/doc/numpy/reference/generated/numpy.std.html
http://stackoverflow.com/questions/38987/how-can-i-merge-two-python-dictionaries-in-a-single-expression
'''
from collections import Counter
import pymongo
import re
import numpy as np
from textblob import TextBlob
#import json
conn = pymongo.MongoClient()
db = conn['trendingapps']
gcollec = db['googleplay']
class GooglePlayStats:
def __init__(self):
pass
def getMean(self, appRatings):
for (appName, ratingList) in appRatings.items():
appMean = np.mean(ratingList)
return round(appMean, 1)
def getMedian(self, appRatings):
for (appName, ratingList) in appRatings.items():
return np.median(ratingList)
def getMode(self, appRatings):
for (appName, ratingList) in appRatings.items():
return Counter(ratingList).most_common(1)[0][0]
def getStDev(self, appRatings):
for (appName, ratingList) in appRatings.items():
sD = np.std(ratingList)
return sD
def getSentiAnaly(self, appNum):
sentianaly = {}
reviews = {}
meanOfPolar = 0.0
sentiList = []
for reviewNum in range(0, 40):
rev = gcollec.find({str(appNum): {'$exists': 1}}, {str(appNum) + '.reviews.review_'+str(reviewNum)+'.comment': 1, '_id': 0})[0]
#the comment
r = rev[str(appNum)]['reviews']['review_'+str(reviewNum)]['comment']
#sentimental analysis on the comment
r2 = TextBlob(r.encode('ascii', 'ignore'))
polar = r2.sentiment[0]
#adding the comment into the dictionary and list
sentianaly['review_'+str(reviewNum)] = round(polar, 5)
sentiList.append(polar)
reviews['SentimentalAnalysis'] = sentianaly
#finding the mean of the polarity
meanOfPolar = np.mean(sentiList)
reviews['meanOfPolarity'] = round(meanOfPolar, 5)
#print json.dumps(reviews, indent=4)
return reviews
def getReviewRatings(self):
appRatings = {}
appStats = {}
appStatsTemp = {}
apps = {}
sa = {}
for num in range(1, 11):
appList = []
appDict = db.googleplay.find({str(num): {'$exists': 1}}, {str(num) + '.title': 1, str(num) + '.reviews': 1, '_id': 0})[0]
#print appDict[str(num)]['title']
for reviewNum in range(0, 40):
appList.append(int(re.findall('\d', str(appDict[str(num)]['reviews']['review_'+str(reviewNum)]['rating']))[0]))
appList.sort()
appRatings[str(appDict[str(num)]['title'])] = appList
#print appRatings
appStatsTemp['stats'] = {'mean':self.getMean(appRatings), 'median':self.getMedian(appRatings), 'mode':self.getMode(appRatings), 'sd':self.getStDev(appRatings)}
sa = self.getSentiAnaly(num)
temp = appStatsTemp.copy()
temp.update(sa)
appStats[str(appDict[str(num)]['title'])] = temp
apps['appNames'] = appStats
return apps | apache-2.0 |
saeki-masaki/cinder | cinder/volume/drivers/emc/emc_cli_fc.py | 1 | 10101 | # Copyright (c) 2012 - 2015 EMC Corporation, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Fibre Channel Driver for EMC VNX array based on CLI.
"""
from oslo_log import log as logging
from cinder.volume import driver
from cinder.volume.drivers.emc import emc_vnx_cli
from cinder.zonemanager import utils as zm_utils
LOG = logging.getLogger(__name__)
class EMCCLIFCDriver(driver.FibreChannelDriver):
"""EMC FC Driver for VNX using CLI.
Version history:
1.0.0 - Initial driver
2.0.0 - Thick/thin provisioning, robust enhancement
3.0.0 - Array-based Backend Support, FC Basic Support,
Target Port Selection for MPIO,
Initiator Auto Registration,
Storage Group Auto Deletion,
Multiple Authentication Type Support,
Storage-Assisted Volume Migration,
SP Toggle for HA
3.0.1 - Security File Support
4.0.0 - Advance LUN Features (Compression Support,
Deduplication Support, FAST VP Support,
FAST Cache Support), Storage-assisted Retype,
External Volume Management, Read-only Volume,
FC Auto Zoning
4.1.0 - Consistency group support
5.0.0 - Performance enhancement, LUN Number Threshold Support,
Initiator Auto Deregistration,
Force Deleting LUN in Storage Groups,
robust enhancement
5.1.0 - iSCSI multipath enhancement
5.2.0 - Pool-aware scheduler support
5.3.0 - Consistency group modification support
6.0.0 - Over subscription support
Create consistency group from cgsnapshot support
Multiple pools support enhancement
Manage/unmanage volume revise
"""
def __init__(self, *args, **kwargs):
super(EMCCLIFCDriver, self).__init__(*args, **kwargs)
self.cli = emc_vnx_cli.getEMCVnxCli(
'FC',
configuration=self.configuration)
self.VERSION = self.cli.VERSION
def check_for_setup_error(self):
pass
def create_volume(self, volume):
"""Creates a volume."""
return self.cli.create_volume(volume)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
return self.cli.create_volume_from_snapshot(volume, snapshot)
def create_cloned_volume(self, volume, src_vref):
"""Creates a cloned volume."""
return self.cli.create_cloned_volume(volume, src_vref)
def extend_volume(self, volume, new_size):
"""Extend a volume."""
self.cli.extend_volume(volume, new_size)
def delete_volume(self, volume):
"""Deletes a volume."""
self.cli.delete_volume(volume)
def migrate_volume(self, ctxt, volume, host):
"""Migrate volume via EMC migration functionality."""
return self.cli.migrate_volume(ctxt, volume, host)
def retype(self, ctxt, volume, new_type, diff, host):
"""Convert the volume to be of the new type."""
return self.cli.retype(ctxt, volume, new_type, diff, host)
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
self.cli.create_snapshot(snapshot)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
self.cli.delete_snapshot(snapshot)
def ensure_export(self, context, volume):
"""Driver entry point to get the export info for an existing volume."""
pass
def create_export(self, context, volume):
"""Driver entry point to get the export info for a new volume."""
pass
def remove_export(self, context, volume):
"""Driver entry point to remove an export for a volume."""
pass
def check_for_export(self, context, volume_id):
"""Make sure volume is exported."""
pass
@zm_utils.AddFCZone
def initialize_connection(self, volume, connector):
"""Initializes the connection and returns connection info.
Assign any created volume to a compute node/host so that it can be
used from that host.
The driver returns a driver_volume_type of 'fibre_channel'.
The target_wwn can be a single entry or a list of wwns that
correspond to the list of remote wwn(s) that will export the volume.
The initiator_target_map is a map that represents the remote wwn(s)
and a list of wwns which are visible to the remote wwn(s).
Example return values:
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': '1234567890123',
'access_mode': 'rw'
'initiator_target_map': {
'1122334455667788': ['1234567890123']
}
}
}
or
{
'driver_volume_type': 'fibre_channel'
'data': {
'target_discovered': True,
'target_lun': 1,
'target_wwn': ['1234567890123', '0987654321321'],
'access_mode': 'rw'
'initiator_target_map': {
'1122334455667788': ['1234567890123',
'0987654321321']
}
}
}
"""
conn_info = self.cli.initialize_connection(volume,
connector)
LOG.debug("Exit initialize_connection"
" - Returning FC connection info: %(conn_info)s.",
{'conn_info': conn_info})
return conn_info
@zm_utils.RemoveFCZone
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector."""
conn_info = self.cli.terminate_connection(volume, connector)
LOG.debug("Exit terminate_connection"
" - Returning FC connection info: %(conn_info)s.",
{'conn_info': conn_info})
return conn_info
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self.update_volume_stats()
return self._stats
def update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug("Updating volume stats.")
data = self.cli.update_volume_stats()
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or 'EMCCLIFCDriver'
data['storage_protocol'] = 'FC'
self._stats = data
def manage_existing(self, volume, existing_ref):
"""Manage an existing lun in the array.
The lun should be in a manageable pool backend, otherwise
error would return.
Rename the backend storage object so that it matches the,
volume['name'] which is how drivers traditionally map between a
cinder volume and the associated backend storage object.
manage_existing_ref:{
'source-id':<lun id in VNX>
}
or
manage_existing_ref:{
'source-name':<lun name in VNX>
}
"""
self.cli.manage_existing(volume, existing_ref)
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of volume to be managed by manage_existing.
"""
return self.cli.manage_existing_get_size(volume, existing_ref)
def create_consistencygroup(self, context, group):
"""Creates a consistencygroup."""
return self.cli.create_consistencygroup(context, group)
def delete_consistencygroup(self, context, group):
"""Deletes a consistency group."""
return self.cli.delete_consistencygroup(
self, context, group)
def create_cgsnapshot(self, context, cgsnapshot):
"""Creates a cgsnapshot."""
return self.cli.create_cgsnapshot(
self, context, cgsnapshot)
def delete_cgsnapshot(self, context, cgsnapshot):
"""Deletes a cgsnapshot."""
return self.cli.delete_cgsnapshot(self, context, cgsnapshot)
def get_pool(self, volume):
"""Returns the pool name of a volume."""
return self.cli.get_pool(volume)
def update_consistencygroup(self, context, group,
add_volumes,
remove_volumes):
"""Updates LUNs in consistency group."""
return self.cli.update_consistencygroup(context, group,
add_volumes,
remove_volumes)
def unmanage(self, volume):
"""Unmanages a volume."""
return self.cli.unmanage(volume)
def create_consistencygroup_from_src(self, context, group, volumes,
cgsnapshot=None, snapshots=None):
"""Creates a consistency group from source."""
return self.cli.create_consistencygroup_from_src(context,
group,
volumes,
cgsnapshot,
snapshots)
| apache-2.0 |
EuropecoinEUORG/Europecoin-V3 | qa/rpc-tests/listtransactions.py | 72 | 4671 | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Exercise the listtransactions API
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
def check_array_result(object_array, to_match, expected):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
"""
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0:
raise AssertionError("No objects matched %s"%(str(to_match)))
class ListTransactionsTest(BitcoinTestFramework):
def run_test(self):
# Simple send, 0 to 1:
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
check_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":0})
check_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":0})
# mine a block, confirmations should change:
self.nodes[0].generate(1)
self.sync_all()
check_array_result(self.nodes[0].listtransactions(),
{"txid":txid},
{"category":"send","account":"","amount":Decimal("-0.1"),"confirmations":1})
check_array_result(self.nodes[1].listtransactions(),
{"txid":txid},
{"category":"receive","account":"","amount":Decimal("0.1"),"confirmations":1})
# send-to-self:
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
check_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"send"},
{"amount":Decimal("-0.2")})
check_array_result(self.nodes[0].listtransactions(),
{"txid":txid, "category":"receive"},
{"amount":Decimal("0.2")})
# sendmany from node1: twice to self, twice to node2:
send_to = { self.nodes[0].getnewaddress() : 0.11,
self.nodes[1].getnewaddress() : 0.22,
self.nodes[0].getaccountaddress("from1") : 0.33,
self.nodes[1].getaccountaddress("toself") : 0.44 }
txid = self.nodes[1].sendmany("", send_to)
self.sync_all()
check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.11")},
{"txid":txid} )
check_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.11")},
{"txid":txid} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.22")},
{"txid":txid} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.22")},
{"txid":txid} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.33")},
{"txid":txid} )
check_array_result(self.nodes[0].listtransactions(),
{"category":"receive","amount":Decimal("0.33")},
{"txid":txid, "account" : "from1"} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"send","amount":Decimal("-0.44")},
{"txid":txid, "account" : ""} )
check_array_result(self.nodes[1].listtransactions(),
{"category":"receive","amount":Decimal("0.44")},
{"txid":txid, "account" : "toself"} )
if __name__ == '__main__':
ListTransactionsTest().main()
| mit |
pointhi/searx | searx/engines/google_images.py | 1 | 2186 | """
Google (Images)
@website https://www.google.com
@provide-api yes (https://developers.google.com/custom-search/)
@using-api no
@results HTML chunks with JSON inside
@stable no
@parse url, title, img_src
"""
from urllib import urlencode
from json import loads
from lxml import html
# engine dependent config
categories = ['images']
paging = True
safesearch = True
time_range_support = True
number_of_results = 100
search_url = 'https://www.google.com/search'\
'?{query}'\
'&asearch=ichunk'\
'&async=_id:rg_s,_pms:s'\
'&tbm=isch'\
'&yv=2'\
'&{search_options}'
time_range_attr = "qdr:{range}"
time_range_dict = {'day': 'd',
'week': 'w',
'month': 'm'}
# do search-request
def request(query, params):
search_options = {
'ijn': params['pageno'] - 1,
'start': (params['pageno'] - 1) * number_of_results
}
if params['time_range'] in time_range_dict:
search_options['tbs'] = time_range_attr.format(range=time_range_dict[params['time_range']])
if safesearch and params['safesearch']:
search_options['safe'] = 'on'
params['url'] = search_url.format(query=urlencode({'q': query}),
search_options=urlencode(search_options))
return params
# get response from search-request
def response(resp):
results = []
g_result = loads(resp.text)
dom = html.fromstring(g_result[1][1])
# parse results
for result in dom.xpath('//div[@data-ved]'):
try:
metadata = loads(''.join(result.xpath('./div[@class="rg_meta"]/text()')))
except:
continue
thumbnail_src = metadata['tu']
# http to https
thumbnail_src = thumbnail_src.replace("http://", "https://")
# append result
results.append({'url': metadata['ru'],
'title': metadata['pt'],
'content': metadata['s'],
'thumbnail_src': thumbnail_src,
'img_src': metadata['ou'],
'template': 'images.html'})
# return results
return results
| agpl-3.0 |
dln/medida | test/gtest-1.6.0/test/gtest_help_test.py | 2968 | 5856 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the --help flag of Google C++ Testing Framework.
SYNOPSIS
gtest_help_test.py --build_dir=BUILD/DIR
# where BUILD/DIR contains the built gtest_help_test_ file.
gtest_help_test.py
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import gtest_test_utils
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
IS_WINDOWS = os.name == 'nt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_help_test_')
FLAG_PREFIX = '--gtest_'
DEATH_TEST_STYLE_FLAG = FLAG_PREFIX + 'death_test_style'
STREAM_RESULT_TO_FLAG = FLAG_PREFIX + 'stream_result_to'
UNKNOWN_FLAG = FLAG_PREFIX + 'unknown_flag_for_testing'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
INCORRECT_FLAG_VARIANTS = [re.sub('^--', '-', LIST_TESTS_FLAG),
re.sub('^--', '/', LIST_TESTS_FLAG),
re.sub('_', '-', LIST_TESTS_FLAG)]
INTERNAL_FLAG_FOR_TESTING = FLAG_PREFIX + 'internal_flag_for_testing'
SUPPORTS_DEATH_TESTS = "DeathTest" in gtest_test_utils.Subprocess(
[PROGRAM_PATH, LIST_TESTS_FLAG]).output
# The help message must match this regex.
HELP_REGEX = re.compile(
FLAG_PREFIX + r'list_tests.*' +
FLAG_PREFIX + r'filter=.*' +
FLAG_PREFIX + r'also_run_disabled_tests.*' +
FLAG_PREFIX + r'repeat=.*' +
FLAG_PREFIX + r'shuffle.*' +
FLAG_PREFIX + r'random_seed=.*' +
FLAG_PREFIX + r'color=.*' +
FLAG_PREFIX + r'print_time.*' +
FLAG_PREFIX + r'output=.*' +
FLAG_PREFIX + r'break_on_failure.*' +
FLAG_PREFIX + r'throw_on_failure.*' +
FLAG_PREFIX + r'catch_exceptions=0.*',
re.DOTALL)
def RunWithFlag(flag):
"""Runs gtest_help_test_ with the given flag.
Returns:
the exit code and the text output as a tuple.
Args:
flag: the command-line flag to pass to gtest_help_test_, or None.
"""
if flag is None:
command = [PROGRAM_PATH]
else:
command = [PROGRAM_PATH, flag]
child = gtest_test_utils.Subprocess(command)
return child.exit_code, child.output
class GTestHelpTest(gtest_test_utils.TestCase):
"""Tests the --help flag and its equivalent forms."""
def TestHelpFlag(self, flag):
"""Verifies correct behavior when help flag is specified.
The right message must be printed and the tests must
skipped when the given flag is specified.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assertEquals(0, exit_code)
self.assert_(HELP_REGEX.search(output), output)
if IS_LINUX:
self.assert_(STREAM_RESULT_TO_FLAG in output, output)
else:
self.assert_(STREAM_RESULT_TO_FLAG not in output, output)
if SUPPORTS_DEATH_TESTS and not IS_WINDOWS:
self.assert_(DEATH_TEST_STYLE_FLAG in output, output)
else:
self.assert_(DEATH_TEST_STYLE_FLAG not in output, output)
def TestNonHelpFlag(self, flag):
"""Verifies correct behavior when no help flag is specified.
Verifies that when no help flag is specified, the tests are run
and the help message is not printed.
Args:
flag: A flag to pass to the binary or None.
"""
exit_code, output = RunWithFlag(flag)
self.assert_(exit_code != 0)
self.assert_(not HELP_REGEX.search(output), output)
def testPrintsHelpWithFullFlag(self):
self.TestHelpFlag('--help')
def testPrintsHelpWithShortFlag(self):
self.TestHelpFlag('-h')
def testPrintsHelpWithQuestionFlag(self):
self.TestHelpFlag('-?')
def testPrintsHelpWithWindowsStyleQuestionFlag(self):
self.TestHelpFlag('/?')
def testPrintsHelpWithUnrecognizedGoogleTestFlag(self):
self.TestHelpFlag(UNKNOWN_FLAG)
def testPrintsHelpWithIncorrectFlagStyle(self):
for incorrect_flag in INCORRECT_FLAG_VARIANTS:
self.TestHelpFlag(incorrect_flag)
def testRunsTestsWithoutHelpFlag(self):
"""Verifies that when no help flag is specified, the tests are run
and the help message is not printed."""
self.TestNonHelpFlag(None)
def testRunsTestsWithGtestInternalFlag(self):
"""Verifies that the tests are run and no help message is printed when
a flag starting with Google Test prefix and 'internal_' is supplied."""
self.TestNonHelpFlag(INTERNAL_FLAG_FOR_TESTING)
if __name__ == '__main__':
gtest_test_utils.Main()
| apache-2.0 |
bob-the-hamster/commandergenius | project/jni/python/src/Lib/encodings/cp1258.py | 593 | 13620 | """ Python Character Mapping Codec cp1258 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1258.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1258',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u20ac' # 0x80 -> EURO SIGN
u'\ufffe' # 0x81 -> UNDEFINED
u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
u'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
u'\u2020' # 0x86 -> DAGGER
u'\u2021' # 0x87 -> DOUBLE DAGGER
u'\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u2030' # 0x89 -> PER MILLE SIGN
u'\ufffe' # 0x8A -> UNDEFINED
u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\u0152' # 0x8C -> LATIN CAPITAL LIGATURE OE
u'\ufffe' # 0x8D -> UNDEFINED
u'\ufffe' # 0x8E -> UNDEFINED
u'\ufffe' # 0x8F -> UNDEFINED
u'\ufffe' # 0x90 -> UNDEFINED
u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
u'\u2022' # 0x95 -> BULLET
u'\u2013' # 0x96 -> EN DASH
u'\u2014' # 0x97 -> EM DASH
u'\u02dc' # 0x98 -> SMALL TILDE
u'\u2122' # 0x99 -> TRADE MARK SIGN
u'\ufffe' # 0x9A -> UNDEFINED
u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\u0153' # 0x9C -> LATIN SMALL LIGATURE OE
u'\ufffe' # 0x9D -> UNDEFINED
u'\ufffe' # 0x9E -> UNDEFINED
u'\u0178' # 0x9F -> LATIN CAPITAL LETTER Y WITH DIAERESIS
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\xa5' # 0xA5 -> YEN SIGN
u'\xa6' # 0xA6 -> BROKEN BAR
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xAC -> NOT SIGN
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\xaf' # 0xAF -> MACRON
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
u'\xb4' # 0xB4 -> ACUTE ACCENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\xb8' # 0xB8 -> CEDILLA
u'\xb9' # 0xB9 -> SUPERSCRIPT ONE
u'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
u'\xbf' # 0xBF -> INVERTED QUESTION MARK
u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\u0102' # 0xC3 -> LATIN CAPITAL LETTER A WITH BREVE
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\u0300' # 0xCC -> COMBINING GRAVE ACCENT
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE
u'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
u'\u0309' # 0xD2 -> COMBINING HOOK ABOVE
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\u01a0' # 0xD5 -> LATIN CAPITAL LETTER O WITH HORN
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
u'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\u01af' # 0xDD -> LATIN CAPITAL LETTER U WITH HORN
u'\u0303' # 0xDE -> COMBINING TILDE
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\u0103' # 0xE3 -> LATIN SMALL LETTER A WITH BREVE
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\u0301' # 0xEC -> COMBINING ACUTE ACCENT
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
u'\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE
u'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
u'\u0323' # 0xF2 -> COMBINING DOT BELOW
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\u01a1' # 0xF5 -> LATIN SMALL LETTER O WITH HORN
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf7' # 0xF7 -> DIVISION SIGN
u'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u01b0' # 0xFD -> LATIN SMALL LETTER U WITH HORN
u'\u20ab' # 0xFE -> DONG SIGN
u'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| lgpl-2.1 |
zmughal/pygments-mirror | tests/test_lexers_other.py | 1 | 2188 | # -*- coding: utf-8 -*-
"""
Tests for other lexers
~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import glob
import os
import unittest
from pygments.lexers import guess_lexer
from pygments.lexers.scripting import RexxLexer
def _exampleFilePath(filename):
return os.path.join(os.path.dirname(__file__), 'examplefiles', filename)
class AnalyseTextTest(unittest.TestCase):
def _testCanRecognizeAndGuessExampleFiles(self, lexer):
assert lexer is not None
for pattern in lexer.filenames:
exampleFilesPattern = _exampleFilePath(pattern)
for exampleFilePath in glob.glob(exampleFilesPattern):
with open(exampleFilePath, 'rb') as fp:
text = fp.read().decode('utf-8')
probability = lexer.analyse_text(text)
self.assertTrue(probability > 0,
'%s must recognize %r' % (
lexer.name, exampleFilePath))
guessedLexer = guess_lexer(text)
self.assertEqual(guessedLexer.name, lexer.name)
def testCanRecognizeAndGuessExampleFiles(self):
self._testCanRecognizeAndGuessExampleFiles(RexxLexer)
class RexxLexerTest(unittest.TestCase):
def testCanGuessFromText(self):
self.assertAlmostEqual(0.01,
RexxLexer.analyse_text('/* */'))
self.assertAlmostEqual(1.0,
RexxLexer.analyse_text('''/* Rexx */
say "hello world"'''))
val = RexxLexer.analyse_text('/* */\n'
'hello:pRoceduRe\n'
' say "hello world"')
self.assertTrue(val > 0.5, val)
val = RexxLexer.analyse_text('''/* */
if 1 > 0 then do
say "ok"
end
else do
say "huh?"
end''')
self.assertTrue(val > 0.2, val)
val = RexxLexer.analyse_text('''/* */
greeting = "hello world!"
parse value greeting "hello" name "!"
say name''')
self.assertTrue(val > 0.2, val)
| bsd-2-clause |
corruptnova/namebench | nb_third_party/dns/tokenizer.py | 246 | 17962 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Tokenize DNS master file format"""
import cStringIO
import sys
import dns.exception
import dns.name
import dns.ttl
_DELIMITERS = {
' ' : True,
'\t' : True,
'\n' : True,
';' : True,
'(' : True,
')' : True,
'"' : True }
_QUOTING_DELIMITERS = { '"' : True }
EOF = 0
EOL = 1
WHITESPACE = 2
IDENTIFIER = 3
QUOTED_STRING = 4
COMMENT = 5
DELIMITER = 6
class UngetBufferFull(dns.exception.DNSException):
"""Raised when an attempt is made to unget a token when the unget
buffer is full."""
pass
class Token(object):
"""A DNS master file format token.
@ivar ttype: The token type
@type ttype: int
@ivar value: The token value
@type value: string
@ivar has_escape: Does the token value contain escapes?
@type has_escape: bool
"""
def __init__(self, ttype, value='', has_escape=False):
"""Initialize a token instance.
@param ttype: The token type
@type ttype: int
@ivar value: The token value
@type value: string
@ivar has_escape: Does the token value contain escapes?
@type has_escape: bool
"""
self.ttype = ttype
self.value = value
self.has_escape = has_escape
def is_eof(self):
return self.ttype == EOF
def is_eol(self):
return self.ttype == EOL
def is_whitespace(self):
return self.ttype == WHITESPACE
def is_identifier(self):
return self.ttype == IDENTIFIER
def is_quoted_string(self):
return self.ttype == QUOTED_STRING
def is_comment(self):
return self.ttype == COMMENT
def is_delimiter(self):
return self.ttype == DELIMITER
def is_eol_or_eof(self):
return (self.ttype == EOL or self.ttype == EOF)
def __eq__(self, other):
if not isinstance(other, Token):
return False
return (self.ttype == other.ttype and
self.value == other.value)
def __ne__(self, other):
if not isinstance(other, Token):
return True
return (self.ttype != other.ttype or
self.value != other.value)
def __str__(self):
return '%d "%s"' % (self.ttype, self.value)
def unescape(self):
if not self.has_escape:
return self
unescaped = ''
l = len(self.value)
i = 0
while i < l:
c = self.value[i]
i += 1
if c == '\\':
if i >= l:
raise dns.exception.UnexpectedEnd
c = self.value[i]
i += 1
if c.isdigit():
if i >= l:
raise dns.exception.UnexpectedEnd
c2 = self.value[i]
i += 1
if i >= l:
raise dns.exception.UnexpectedEnd
c3 = self.value[i]
i += 1
if not (c2.isdigit() and c3.isdigit()):
raise dns.exception.SyntaxError
c = chr(int(c) * 100 + int(c2) * 10 + int(c3))
unescaped += c
return Token(self.ttype, unescaped)
# compatibility for old-style tuple tokens
def __len__(self):
return 2
def __iter__(self):
return iter((self.ttype, self.value))
def __getitem__(self, i):
if i == 0:
return self.ttype
elif i == 1:
return self.value
else:
raise IndexError
class Tokenizer(object):
"""A DNS master file format tokenizer.
A token is a (type, value) tuple, where I{type} is an int, and
I{value} is a string. The valid types are EOF, EOL, WHITESPACE,
IDENTIFIER, QUOTED_STRING, COMMENT, and DELIMITER.
@ivar file: The file to tokenize
@type file: file
@ivar ungotten_char: The most recently ungotten character, or None.
@type ungotten_char: string
@ivar ungotten_token: The most recently ungotten token, or None.
@type ungotten_token: (int, string) token tuple
@ivar multiline: The current multiline level. This value is increased
by one every time a '(' delimiter is read, and decreased by one every time
a ')' delimiter is read.
@type multiline: int
@ivar quoting: This variable is true if the tokenizer is currently
reading a quoted string.
@type quoting: bool
@ivar eof: This variable is true if the tokenizer has encountered EOF.
@type eof: bool
@ivar delimiters: The current delimiter dictionary.
@type delimiters: dict
@ivar line_number: The current line number
@type line_number: int
@ivar filename: A filename that will be returned by the L{where} method.
@type filename: string
"""
def __init__(self, f=sys.stdin, filename=None):
"""Initialize a tokenizer instance.
@param f: The file to tokenize. The default is sys.stdin.
This parameter may also be a string, in which case the tokenizer
will take its input from the contents of the string.
@type f: file or string
@param filename: the name of the filename that the L{where} method
will return.
@type filename: string
"""
if isinstance(f, str):
f = cStringIO.StringIO(f)
if filename is None:
filename = '<string>'
else:
if filename is None:
if f is sys.stdin:
filename = '<stdin>'
else:
filename = '<file>'
self.file = f
self.ungotten_char = None
self.ungotten_token = None
self.multiline = 0
self.quoting = False
self.eof = False
self.delimiters = _DELIMITERS
self.line_number = 1
self.filename = filename
def _get_char(self):
"""Read a character from input.
@rtype: string
"""
if self.ungotten_char is None:
if self.eof:
c = ''
else:
c = self.file.read(1)
if c == '':
self.eof = True
elif c == '\n':
self.line_number += 1
else:
c = self.ungotten_char
self.ungotten_char = None
return c
def where(self):
"""Return the current location in the input.
@rtype: (string, int) tuple. The first item is the filename of
the input, the second is the current line number.
"""
return (self.filename, self.line_number)
def _unget_char(self, c):
"""Unget a character.
The unget buffer for characters is only one character large; it is
an error to try to unget a character when the unget buffer is not
empty.
@param c: the character to unget
@type c: string
@raises UngetBufferFull: there is already an ungotten char
"""
if not self.ungotten_char is None:
raise UngetBufferFull
self.ungotten_char = c
def skip_whitespace(self):
"""Consume input until a non-whitespace character is encountered.
The non-whitespace character is then ungotten, and the number of
whitespace characters consumed is returned.
If the tokenizer is in multiline mode, then newlines are whitespace.
@rtype: int
"""
skipped = 0
while True:
c = self._get_char()
if c != ' ' and c != '\t':
if (c != '\n') or not self.multiline:
self._unget_char(c)
return skipped
skipped += 1
def get(self, want_leading = False, want_comment = False):
"""Get the next token.
@param want_leading: If True, return a WHITESPACE token if the
first character read is whitespace. The default is False.
@type want_leading: bool
@param want_comment: If True, return a COMMENT token if the
first token read is a comment. The default is False.
@type want_comment: bool
@rtype: Token object
@raises dns.exception.UnexpectedEnd: input ended prematurely
@raises dns.exception.SyntaxError: input was badly formed
"""
if not self.ungotten_token is None:
token = self.ungotten_token
self.ungotten_token = None
if token.is_whitespace():
if want_leading:
return token
elif token.is_comment():
if want_comment:
return token
else:
return token
skipped = self.skip_whitespace()
if want_leading and skipped > 0:
return Token(WHITESPACE, ' ')
token = ''
ttype = IDENTIFIER
has_escape = False
while True:
c = self._get_char()
if c == '' or c in self.delimiters:
if c == '' and self.quoting:
raise dns.exception.UnexpectedEnd
if token == '' and ttype != QUOTED_STRING:
if c == '(':
self.multiline += 1
self.skip_whitespace()
continue
elif c == ')':
if not self.multiline > 0:
raise dns.exception.SyntaxError
self.multiline -= 1
self.skip_whitespace()
continue
elif c == '"':
if not self.quoting:
self.quoting = True
self.delimiters = _QUOTING_DELIMITERS
ttype = QUOTED_STRING
continue
else:
self.quoting = False
self.delimiters = _DELIMITERS
self.skip_whitespace()
continue
elif c == '\n':
return Token(EOL, '\n')
elif c == ';':
while 1:
c = self._get_char()
if c == '\n' or c == '':
break
token += c
if want_comment:
self._unget_char(c)
return Token(COMMENT, token)
elif c == '':
if self.multiline:
raise dns.exception.SyntaxError('unbalanced parentheses')
return Token(EOF)
elif self.multiline:
self.skip_whitespace()
token = ''
continue
else:
return Token(EOL, '\n')
else:
# This code exists in case we ever want a
# delimiter to be returned. It never produces
# a token currently.
token = c
ttype = DELIMITER
else:
self._unget_char(c)
break
elif self.quoting:
if c == '\\':
c = self._get_char()
if c == '':
raise dns.exception.UnexpectedEnd
if c.isdigit():
c2 = self._get_char()
if c2 == '':
raise dns.exception.UnexpectedEnd
c3 = self._get_char()
if c == '':
raise dns.exception.UnexpectedEnd
if not (c2.isdigit() and c3.isdigit()):
raise dns.exception.SyntaxError
c = chr(int(c) * 100 + int(c2) * 10 + int(c3))
elif c == '\n':
raise dns.exception.SyntaxError('newline in quoted string')
elif c == '\\':
#
# It's an escape. Put it and the next character into
# the token; it will be checked later for goodness.
#
token += c
has_escape = True
c = self._get_char()
if c == '' or c == '\n':
raise dns.exception.UnexpectedEnd
token += c
if token == '' and ttype != QUOTED_STRING:
if self.multiline:
raise dns.exception.SyntaxError('unbalanced parentheses')
ttype = EOF
return Token(ttype, token, has_escape)
def unget(self, token):
"""Unget a token.
The unget buffer for tokens is only one token large; it is
an error to try to unget a token when the unget buffer is not
empty.
@param token: the token to unget
@type token: Token object
@raises UngetBufferFull: there is already an ungotten token
"""
if not self.ungotten_token is None:
raise UngetBufferFull
self.ungotten_token = token
def next(self):
"""Return the next item in an iteration.
@rtype: (int, string)
"""
token = self.get()
if token.is_eof():
raise StopIteration
return token
def __iter__(self):
return self
# Helpers
def get_int(self):
"""Read the next token and interpret it as an integer.
@raises dns.exception.SyntaxError:
@rtype: int
"""
token = self.get().unescape()
if not token.is_identifier():
raise dns.exception.SyntaxError('expecting an identifier')
if not token.value.isdigit():
raise dns.exception.SyntaxError('expecting an integer')
return int(token.value)
def get_uint8(self):
"""Read the next token and interpret it as an 8-bit unsigned
integer.
@raises dns.exception.SyntaxError:
@rtype: int
"""
value = self.get_int()
if value < 0 or value > 255:
raise dns.exception.SyntaxError('%d is not an unsigned 8-bit integer' % value)
return value
def get_uint16(self):
"""Read the next token and interpret it as a 16-bit unsigned
integer.
@raises dns.exception.SyntaxError:
@rtype: int
"""
value = self.get_int()
if value < 0 or value > 65535:
raise dns.exception.SyntaxError('%d is not an unsigned 16-bit integer' % value)
return value
def get_uint32(self):
"""Read the next token and interpret it as a 32-bit unsigned
integer.
@raises dns.exception.SyntaxError:
@rtype: int
"""
token = self.get().unescape()
if not token.is_identifier():
raise dns.exception.SyntaxError('expecting an identifier')
if not token.value.isdigit():
raise dns.exception.SyntaxError('expecting an integer')
value = long(token.value)
if value < 0 or value > 4294967296L:
raise dns.exception.SyntaxError('%d is not an unsigned 32-bit integer' % value)
return value
def get_string(self, origin=None):
"""Read the next token and interpret it as a string.
@raises dns.exception.SyntaxError:
@rtype: string
"""
token = self.get().unescape()
if not (token.is_identifier() or token.is_quoted_string()):
raise dns.exception.SyntaxError('expecting a string')
return token.value
def get_identifier(self, origin=None):
"""Read the next token and raise an exception if it is not an identifier.
@raises dns.exception.SyntaxError:
@rtype: string
"""
token = self.get().unescape()
if not token.is_identifier():
raise dns.exception.SyntaxError('expecting an identifier')
return token.value
def get_name(self, origin=None):
"""Read the next token and interpret it as a DNS name.
@raises dns.exception.SyntaxError:
@rtype: dns.name.Name object"""
token = self.get()
if not token.is_identifier():
raise dns.exception.SyntaxError('expecting an identifier')
return dns.name.from_text(token.value, origin)
def get_eol(self):
"""Read the next token and raise an exception if it isn't EOL or
EOF.
@raises dns.exception.SyntaxError:
@rtype: string
"""
token = self.get()
if not token.is_eol_or_eof():
raise dns.exception.SyntaxError('expected EOL or EOF, got %d "%s"' % (token.ttype, token.value))
return token.value
def get_ttl(self):
token = self.get().unescape()
if not token.is_identifier():
raise dns.exception.SyntaxError('expecting an identifier')
return dns.ttl.from_text(token.value)
| apache-2.0 |
dushu1203/chromium.src | build/android/gyp/util/build_device.py | 50 | 3139 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" A simple device interface for build steps.
"""
import logging
import os
import re
import sys
from util import build_utils
BUILD_ANDROID_DIR = os.path.join(os.path.dirname(__file__), '..', '..')
sys.path.append(BUILD_ANDROID_DIR)
from pylib import android_commands
from pylib.device import device_errors
from pylib.device import device_utils
GetAttachedDevices = android_commands.GetAttachedDevices
class BuildDevice(object):
def __init__(self, configuration):
self.id = configuration['id']
self.description = configuration['description']
self.install_metadata = configuration['install_metadata']
self.device = device_utils.DeviceUtils(self.id)
def RunShellCommand(self, *args, **kwargs):
return self.device.RunShellCommand(*args, **kwargs)
def PushChangedFiles(self, *args, **kwargs):
return self.device.PushChangedFiles(*args, **kwargs)
def GetSerialNumber(self):
return self.id
def Install(self, *args, **kwargs):
return self.device.old_interface.Install(*args, **kwargs)
def GetInstallMetadata(self, apk_package):
"""Gets the metadata on the device for the apk_package apk."""
# Matches lines like:
# -rw-r--r-- system system 7376582 2013-04-19 16:34 \
# org.chromium.chrome.shell.apk
# -rw-r--r-- system system 7376582 2013-04-19 16:34 \
# org.chromium.chrome.shell-1.apk
apk_matcher = lambda s: re.match('.*%s(-[0-9]*)?.apk$' % apk_package, s)
matches = filter(apk_matcher, self.install_metadata)
return matches[0] if matches else None
def GetConfigurationForDevice(device_id):
device = device_utils.DeviceUtils(device_id)
configuration = None
has_root = False
is_online = device.IsOnline()
if is_online:
cmd = 'ls -l /data/app; getprop ro.build.description'
cmd_output = device.RunShellCommand(cmd)
has_root = not 'Permission denied' in cmd_output[0]
if not has_root:
# Disable warning log messages from EnableRoot()
logging.getLogger().disabled = True
try:
device.EnableRoot()
has_root = True
except device_errors.CommandFailedError:
has_root = False
finally:
logging.getLogger().disabled = False
cmd_output = device.RunShellCommand(cmd)
configuration = {
'id': device_id,
'description': cmd_output[-1],
'install_metadata': cmd_output[:-1],
}
return configuration, is_online, has_root
def WriteConfigurations(configurations, path):
# Currently we only support installing to the first device.
build_utils.WriteJson(configurations[:1], path, only_if_changed=True)
def ReadConfigurations(path):
return build_utils.ReadJson(path)
def GetBuildDevice(configurations):
assert len(configurations) == 1
return BuildDevice(configurations[0])
def GetBuildDeviceFromPath(path):
configurations = ReadConfigurations(path)
if len(configurations) > 0:
return GetBuildDevice(ReadConfigurations(path))
return None
| bsd-3-clause |
mistio/libcloud | libcloud/common/openstack_identity.py | 4 | 61740 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Common / shared code for handling authentication against OpenStack identity
service (Keystone).
"""
import datetime
from libcloud.utils.py3 import httplib
from libcloud.utils.iso8601 import parse_date
from libcloud.common.base import (ConnectionUserAndKey, Response,
CertificateConnection)
from libcloud.compute.types import (LibcloudError, InvalidCredsError,
MalformedResponseError)
try:
import simplejson as json
except ImportError:
import json # type: ignore
AUTH_API_VERSION = '1.1'
# Auth versions which contain token expiration information.
AUTH_VERSIONS_WITH_EXPIRES = [
'1.1',
'2.0',
'2.0_apikey',
'2.0_password',
'2.0_voms',
'3.0',
'3.x_password',
'3.x_oidc_access_token'
]
# How many seconds to subtract from the auth token expiration time before
# testing if the token is still valid.
# The time is subtracted to account for the HTTP request latency and prevent
# user from getting "InvalidCredsError" if token is about to expire.
AUTH_TOKEN_EXPIRES_GRACE_SECONDS = 5
__all__ = [
'OpenStackIdentityVersion',
'OpenStackIdentityDomain',
'OpenStackIdentityProject',
'OpenStackIdentityUser',
'OpenStackIdentityRole',
'OpenStackServiceCatalog',
'OpenStackServiceCatalogEntry',
'OpenStackServiceCatalogEntryEndpoint',
'OpenStackIdentityEndpointType',
'OpenStackIdentityConnection',
'OpenStackIdentity_1_0_Connection',
'OpenStackIdentity_1_1_Connection',
'OpenStackIdentity_2_0_Connection',
'OpenStackIdentity_2_0_Connection_VOMS',
'OpenStackIdentity_3_0_Connection',
'OpenStackIdentity_3_0_Connection_OIDC_access_token',
'get_class_for_auth_version'
]
class OpenStackIdentityEndpointType(object):
"""
Enum class for openstack identity endpoint type.
"""
INTERNAL = 'internal'
EXTERNAL = 'external'
ADMIN = 'admin'
class OpenStackIdentityTokenScope(object):
"""
Enum class for openstack identity token scope.
"""
PROJECT = 'project'
DOMAIN = 'domain'
UNSCOPED = 'unscoped'
class OpenStackIdentityVersion(object):
def __init__(self, version, status, updated, url):
self.version = version
self.status = status
self.updated = updated
self.url = url
def __repr__(self):
return (('<OpenStackIdentityVersion version=%s, status=%s, '
'updated=%s, url=%s>' %
(self.version, self.status, self.updated, self.url)))
class OpenStackIdentityDomain(object):
def __init__(self, id, name, enabled):
self.id = id
self.name = name
self.enabled = enabled
def __repr__(self):
return (('<OpenStackIdentityDomain id=%s, name=%s, enabled=%s>' %
(self.id, self.name, self.enabled)))
class OpenStackIdentityProject(object):
def __init__(self, id, name, description, enabled, domain_id=None):
self.id = id
self.name = name
self.description = description
self.enabled = enabled
self.domain_id = domain_id
def __repr__(self):
return (('<OpenStackIdentityProject id=%s, domain_id=%s, name=%s, '
'enabled=%s>' %
(self.id, self.domain_id, self.name, self.enabled)))
class OpenStackIdentityRole(object):
def __init__(self, id, name, description, enabled):
self.id = id
self.name = name
self.description = description
self.enabled = enabled
def __repr__(self):
return (('<OpenStackIdentityRole id=%s, name=%s, description=%s, '
'enabled=%s>' % (self.id, self.name, self.description,
self.enabled)))
class OpenStackIdentityUser(object):
def __init__(self, id, domain_id, name, email, description, enabled):
self.id = id
self.domain_id = domain_id
self.name = name
self.email = email
self.description = description
self.enabled = enabled
def __repr__(self):
return (('<OpenStackIdentityUser id=%s, domain_id=%s, name=%s, '
'email=%s, enabled=%s>' % (self.id, self.domain_id, self.name,
self.email, self.enabled)))
class OpenStackServiceCatalog(object):
"""
http://docs.openstack.org/api/openstack-identity-service/2.0/content/
This class should be instantiated with the contents of the
'serviceCatalog' in the auth response. This will do the work of figuring
out which services actually exist in the catalog as well as split them up
by type, name, and region if available
"""
_auth_version = None
_service_catalog = None
def __init__(self, service_catalog, auth_version=AUTH_API_VERSION):
self._auth_version = auth_version
# Check this way because there are a couple of different 2.0_*
# auth types.
if '3.x' in self._auth_version:
entries = self._parse_service_catalog_auth_v3(
service_catalog=service_catalog)
elif '2.0' in self._auth_version:
entries = self._parse_service_catalog_auth_v2(
service_catalog=service_catalog)
elif ('1.1' in self._auth_version) or ('1.0' in self._auth_version):
entries = self._parse_service_catalog_auth_v1(
service_catalog=service_catalog)
else:
raise LibcloudError('auth version "%s" not supported'
% (self._auth_version))
# Force consistent ordering by sorting the entries
entries = sorted(entries,
key=lambda x: x.service_type + (x.service_name or ''))
self._entries = entries # stories all the service catalog entries
def get_entries(self):
"""
Return all the entries for this service catalog.
:rtype: ``list`` of :class:`.OpenStackServiceCatalogEntry`
"""
return self._entries
def get_catalog(self):
"""
Deprecated in the favor of ``get_entries`` method.
"""
return self.get_entries()
def get_public_urls(self, service_type=None, name=None):
"""
Retrieve all the available public (external) URLs for the provided
service type and name.
"""
endpoints = self.get_endpoints(service_type=service_type,
name=name)
result = []
for endpoint in endpoints:
endpoint_type = endpoint.endpoint_type
if endpoint_type == OpenStackIdentityEndpointType.EXTERNAL:
result.append(endpoint.url)
return result
def get_endpoints(self, service_type=None, name=None):
"""
Retrieve all the endpoints for the provided service type and name.
:rtype: ``list`` of :class:`.OpenStackServiceCatalogEntryEndpoint`
"""
endpoints = []
for entry in self._entries:
# Note: "if XXX and YYY != XXX" comparison is used to support
# partial lookups.
# This allows user to pass in only one argument to the method (only
# service_type or name), both of them or neither.
if service_type and entry.service_type != service_type:
continue
if name and entry.service_name != name:
continue
for endpoint in entry.endpoints:
endpoints.append(endpoint)
return endpoints
def get_endpoint(self, service_type=None, name=None, region=None,
endpoint_type=OpenStackIdentityEndpointType.EXTERNAL):
"""
Retrieve a single endpoint using the provided criteria.
Note: If no or more than one matching endpoint is found, an exception
is thrown.
"""
endpoints = []
for entry in self._entries:
if service_type and entry.service_type != service_type:
continue
if name and entry.service_name != name:
continue
for endpoint in entry.endpoints:
if region and endpoint.region != region:
continue
if endpoint_type and endpoint.endpoint_type != endpoint_type:
continue
endpoints.append(endpoint)
if len(endpoints) == 1:
return endpoints[0]
elif len(endpoints) > 1:
raise ValueError('Found more than 1 matching endpoint')
else:
raise LibcloudError('Could not find specified endpoint')
def get_regions(self, service_type=None):
"""
Retrieve a list of all the available regions.
:param service_type: If specified, only return regions for this
service type.
:type service_type: ``str``
:rtype: ``list`` of ``str``
"""
regions = set()
for entry in self._entries:
if service_type and entry.service_type != service_type:
continue
for endpoint in entry.endpoints:
if endpoint.region:
regions.add(endpoint.region)
return sorted(list(regions))
def get_service_types(self, region=None):
"""
Retrieve all the available service types.
:param region: Optional region to retrieve service types for.
:type region: ``str``
:rtype: ``list`` of ``str``
"""
service_types = set()
for entry in self._entries:
include = True
for endpoint in entry.endpoints:
if region and endpoint.region != region:
include = False
break
if include:
service_types.add(entry.service_type)
return sorted(list(service_types))
def get_service_names(self, service_type=None, region=None):
"""
Retrieve list of service names that match service type and region.
:type service_type: ``str``
:type region: ``str``
:rtype: ``list`` of ``str``
"""
names = set()
if '2.0' not in self._auth_version:
raise ValueError('Unsupported version: %s' % (self._auth_version))
for entry in self._entries:
if service_type and entry.service_type != service_type:
continue
include = True
for endpoint in entry.endpoints:
if region and endpoint.region != region:
include = False
break
if include and entry.service_name:
names.add(entry.service_name)
return sorted(list(names))
def _parse_service_catalog_auth_v1(self, service_catalog):
entries = []
for service, endpoints in service_catalog.items():
entry_endpoints = []
for endpoint in endpoints:
region = endpoint.get('region', None)
public_url = endpoint.get('publicURL', None)
private_url = endpoint.get('internalURL', None)
if public_url:
entry_endpoint = OpenStackServiceCatalogEntryEndpoint(
region=region, url=public_url,
endpoint_type=OpenStackIdentityEndpointType.EXTERNAL)
entry_endpoints.append(entry_endpoint)
if private_url:
entry_endpoint = OpenStackServiceCatalogEntryEndpoint(
region=region, url=private_url,
endpoint_type=OpenStackIdentityEndpointType.INTERNAL)
entry_endpoints.append(entry_endpoint)
entry = OpenStackServiceCatalogEntry(service_type=service,
endpoints=entry_endpoints)
entries.append(entry)
return entries
def _parse_service_catalog_auth_v2(self, service_catalog):
entries = []
for service in service_catalog:
service_type = service['type']
service_name = service.get('name', None)
entry_endpoints = []
for endpoint in service.get('endpoints', []):
region = endpoint.get('region', None)
public_url = endpoint.get('publicURL', None)
private_url = endpoint.get('internalURL', None)
if public_url:
entry_endpoint = OpenStackServiceCatalogEntryEndpoint(
region=region, url=public_url,
endpoint_type=OpenStackIdentityEndpointType.EXTERNAL)
entry_endpoints.append(entry_endpoint)
if private_url:
entry_endpoint = OpenStackServiceCatalogEntryEndpoint(
region=region, url=private_url,
endpoint_type=OpenStackIdentityEndpointType.INTERNAL)
entry_endpoints.append(entry_endpoint)
entry = OpenStackServiceCatalogEntry(service_type=service_type,
endpoints=entry_endpoints,
service_name=service_name)
entries.append(entry)
return entries
def _parse_service_catalog_auth_v3(self, service_catalog):
entries = []
for item in service_catalog:
service_type = item['type']
service_name = item.get('name', None)
entry_endpoints = []
for endpoint in item['endpoints']:
region = endpoint.get('region', None)
url = endpoint['url']
endpoint_type = endpoint['interface']
if endpoint_type == 'internal':
endpoint_type = OpenStackIdentityEndpointType.INTERNAL
elif endpoint_type == 'public':
endpoint_type = OpenStackIdentityEndpointType.EXTERNAL
elif endpoint_type == 'admin':
endpoint_type = OpenStackIdentityEndpointType.ADMIN
entry_endpoint = OpenStackServiceCatalogEntryEndpoint(
region=region, url=url, endpoint_type=endpoint_type)
entry_endpoints.append(entry_endpoint)
entry = OpenStackServiceCatalogEntry(service_type=service_type,
service_name=service_name,
endpoints=entry_endpoints)
entries.append(entry)
return entries
class OpenStackServiceCatalogEntry(object):
def __init__(self, service_type, endpoints=None, service_name=None):
"""
:param service_type: Service type.
:type service_type: ``str``
:param endpoints: Endpoints belonging to this entry.
:type endpoints: ``list``
:param service_name: Optional service name.
:type service_name: ``str``
"""
self.service_type = service_type
self.endpoints = endpoints or []
self.service_name = service_name
# For consistency, sort the endpoints
self.endpoints = sorted(self.endpoints, key=lambda x: x.url or '')
def __eq__(self, other):
return (self.service_type == other.service_type and
self.endpoints == other.endpoints and
other.service_name == self.service_name)
def __ne__(self, other):
return not self.__eq__(other=other)
def __repr__(self):
return (('<OpenStackServiceCatalogEntry service_type=%s, '
'service_name=%s, endpoints=%s' %
(self.service_type, self.service_name, repr(self.endpoints))))
class OpenStackServiceCatalogEntryEndpoint(object):
VALID_ENDPOINT_TYPES = [
OpenStackIdentityEndpointType.INTERNAL,
OpenStackIdentityEndpointType.EXTERNAL,
OpenStackIdentityEndpointType.ADMIN,
]
def __init__(self, region, url, endpoint_type='external'):
"""
:param region: Endpoint region.
:type region: ``str``
:param url: Endpoint URL.
:type url: ``str``
:param endpoint_type: Endpoint type (external / internal / admin).
:type endpoint_type: ``str``
"""
if endpoint_type not in self.VALID_ENDPOINT_TYPES:
raise ValueError('Invalid type: %s' % (endpoint_type))
# TODO: Normalize / lowercase all the region names
self.region = region
self.url = url
self.endpoint_type = endpoint_type
def __eq__(self, other):
return (self.region == other.region and self.url == other.url and
self.endpoint_type == other.endpoint_type)
def __ne__(self, other):
return not self.__eq__(other=other)
def __repr__(self):
return (('<OpenStackServiceCatalogEntryEndpoint region=%s, url=%s, '
'type=%s' % (self.region, self.url, self.endpoint_type)))
class OpenStackAuthResponse(Response):
def success(self):
return self.status in [httplib.OK, httplib.CREATED,
httplib.ACCEPTED, httplib.NO_CONTENT,
httplib.MULTIPLE_CHOICES,
httplib.UNAUTHORIZED,
httplib.INTERNAL_SERVER_ERROR]
def parse_body(self):
if not self.body:
return None
if 'content-type' in self.headers:
key = 'content-type'
elif 'Content-Type' in self.headers:
key = 'Content-Type'
else:
raise LibcloudError('Missing content-type header',
driver=OpenStackIdentityConnection)
content_type = self.headers[key]
if content_type.find(';') != -1:
content_type = content_type.split(';')[0]
if content_type == 'application/json':
try:
data = json.loads(self.body)
except Exception:
driver = OpenStackIdentityConnection
raise MalformedResponseError('Failed to parse JSON',
body=self.body,
driver=driver)
elif content_type == 'text/plain':
data = self.body
else:
data = self.body
return data
class OpenStackIdentityConnection(ConnectionUserAndKey):
"""
Base identity connection class which contains common / shared logic.
Note: This class shouldn't be instantiated directly.
"""
responseCls = OpenStackAuthResponse
timeout = None
auth_version = None # type: str
def __init__(self, auth_url, user_id, key, tenant_name=None,
tenant_domain_id='default', domain_name='Default',
token_scope=OpenStackIdentityTokenScope.PROJECT,
timeout=None, proxy_url=None, parent_conn=None):
super(OpenStackIdentityConnection, self).__init__(user_id=user_id,
key=key,
url=auth_url,
timeout=timeout,
proxy_url=proxy_url)
self.parent_conn = parent_conn
# enable tests to use the same mock connection classes.
if parent_conn:
self.conn_class = parent_conn.conn_class
self.driver = parent_conn.driver
else:
self.driver = None
self.auth_url = auth_url
self.tenant_name = tenant_name
self.domain_name = domain_name
self.token_scope = token_scope
self.timeout = timeout
self.urls = {}
self.auth_token = None
self.auth_token_expires = None
self.auth_user_info = None
def authenticated_request(self, action, params=None, data=None,
headers=None, method='GET', raw=False):
"""
Perform an authenticated request against the identity API.
"""
if not self.auth_token:
raise ValueError('Not to be authenticated to perform this request')
headers = headers or {}
headers['X-Auth-Token'] = self.auth_token
return self.request(action=action, params=params, data=data,
headers=headers, method=method, raw=raw)
def morph_action_hook(self, action):
(_, _, _, request_path) = self._tuple_from_url(self.auth_url)
if request_path == '':
# No path is provided in the auth_url, use action passed to this
# method.
return action
return request_path
def add_default_headers(self, headers):
headers['Accept'] = 'application/json'
headers['Content-Type'] = 'application/json; charset=UTF-8'
return headers
def is_token_valid(self):
"""
Return True if the current auth token is already cached and hasn't
expired yet.
:return: ``True`` if the token is still valid, ``False`` otherwise.
:rtype: ``bool``
"""
if not self.auth_token:
return False
if not self.auth_token_expires:
return False
expires = self.auth_token_expires - \
datetime.timedelta(seconds=AUTH_TOKEN_EXPIRES_GRACE_SECONDS)
time_tuple_expires = expires.utctimetuple()
time_tuple_now = datetime.datetime.utcnow().utctimetuple()
if time_tuple_now < time_tuple_expires:
return True
return False
def authenticate(self, force=False):
"""
Authenticate against the identity API.
:param force: Forcefully update the token even if it's already cached
and still valid.
:type force: ``bool``
"""
raise NotImplementedError('authenticate not implemented')
def list_supported_versions(self):
"""
Retrieve a list of all the identity versions which are supported by
this installation.
:rtype: ``list`` of :class:`.OpenStackIdentityVersion`
"""
response = self.request('/', method='GET')
result = self._to_versions(data=response.object['versions']['values'])
result = sorted(result, key=lambda x: x.version)
return result
def _to_versions(self, data):
result = []
for item in data:
version = self._to_version(data=item)
result.append(version)
return result
def _to_version(self, data):
try:
updated = parse_date(data['updated'])
except Exception:
updated = None
try:
url = data['links'][0]['href']
except IndexError:
url = None
version = OpenStackIdentityVersion(version=data['id'],
status=data['status'],
updated=updated,
url=url)
return version
def _is_authentication_needed(self, force=False):
"""
Determine if the authentication is needed or if the existing token (if
any exists) is still valid.
"""
if force:
return True
if self.auth_version not in AUTH_VERSIONS_WITH_EXPIRES:
return True
if self.is_token_valid():
return False
return True
def _to_projects(self, data):
result = []
for item in data:
project = self._to_project(data=item)
result.append(project)
return result
def _to_project(self, data):
project = OpenStackIdentityProject(id=data['id'],
name=data['name'],
description=data['description'],
enabled=data['enabled'],
domain_id=data.get('domain_id',
None))
return project
class OpenStackIdentity_1_0_Connection(OpenStackIdentityConnection):
"""
Connection class for Keystone API v1.0.
"""
responseCls = OpenStackAuthResponse
name = 'OpenStack Identity API v1.0'
auth_version = '1.0'
def authenticate(self, force=False):
if not self._is_authentication_needed(force=force):
return self
headers = {
'X-Auth-User': self.user_id,
'X-Auth-Key': self.key,
}
resp = self.request('/v1.0', headers=headers, method='GET')
if resp.status == httplib.UNAUTHORIZED:
# HTTP UNAUTHORIZED (401): auth failed
raise InvalidCredsError()
elif resp.status not in [httplib.NO_CONTENT, httplib.OK]:
body = 'code: %s body:%s headers:%s' % (resp.status,
resp.body,
resp.headers)
raise MalformedResponseError('Malformed response', body=body,
driver=self.driver)
else:
headers = resp.headers
# emulate the auth 1.1 URL list
self.urls = {}
self.urls['cloudServers'] = \
[{'publicURL': headers.get('x-server-management-url', None)}]
self.urls['cloudFilesCDN'] = \
[{'publicURL': headers.get('x-cdn-management-url', None)}]
self.urls['cloudFiles'] = \
[{'publicURL': headers.get('x-storage-url', None)}]
self.auth_token = headers.get('x-auth-token', None)
self.auth_user_info = None
if not self.auth_token:
raise MalformedResponseError('Missing X-Auth-Token in'
' response headers')
return self
class OpenStackIdentity_1_1_Connection(OpenStackIdentityConnection):
"""
Connection class for Keystone API v1.1.
"""
responseCls = OpenStackAuthResponse
name = 'OpenStack Identity API v1.1'
auth_version = '1.1'
def authenticate(self, force=False):
if not self._is_authentication_needed(force=force):
return self
reqbody = json.dumps({'credentials': {'username': self.user_id,
'key': self.key}})
resp = self.request('/v1.1/auth', data=reqbody, headers={},
method='POST')
if resp.status == httplib.UNAUTHORIZED:
# HTTP UNAUTHORIZED (401): auth failed
raise InvalidCredsError()
elif resp.status != httplib.OK:
body = 'code: %s body:%s' % (resp.status, resp.body)
raise MalformedResponseError('Malformed response', body=body,
driver=self.driver)
else:
try:
body = json.loads(resp.body)
except Exception as e:
raise MalformedResponseError('Failed to parse JSON', e)
try:
expires = body['auth']['token']['expires']
self.auth_token = body['auth']['token']['id']
self.auth_token_expires = parse_date(expires)
self.urls = body['auth']['serviceCatalog']
self.auth_user_info = None
except KeyError as e:
raise MalformedResponseError('Auth JSON response is \
missing required elements', e)
return self
class OpenStackIdentity_2_0_Connection(OpenStackIdentityConnection):
"""
Connection class for Keystone API v2.0.
"""
responseCls = OpenStackAuthResponse
name = 'OpenStack Identity API v1.0'
auth_version = '2.0'
def authenticate(self, auth_type='api_key', force=False):
if not self._is_authentication_needed(force=force):
return self
if auth_type == 'api_key':
return self._authenticate_2_0_with_api_key()
elif auth_type == 'password':
return self._authenticate_2_0_with_password()
else:
raise ValueError('Invalid value for auth_type argument')
def _authenticate_2_0_with_api_key(self):
# API Key based authentication uses the RAX-KSKEY extension.
# http://s.apache.org/oAi
data = {'auth':
{'RAX-KSKEY:apiKeyCredentials':
{'username': self.user_id, 'apiKey': self.key}}}
if self.tenant_name:
data['auth']['tenantName'] = self.tenant_name
reqbody = json.dumps(data)
return self._authenticate_2_0_with_body(reqbody)
def _authenticate_2_0_with_password(self):
# Password based authentication is the only 'core' authentication
# method in Keystone at this time.
# 'keystone' - http://s.apache.org/e8h
data = {'auth':
{'passwordCredentials':
{'username': self.user_id, 'password': self.key}}}
if self.tenant_name:
data['auth']['tenantName'] = self.tenant_name
reqbody = json.dumps(data)
return self._authenticate_2_0_with_body(reqbody)
def _authenticate_2_0_with_body(self, reqbody):
resp = self.request('/v2.0/tokens', data=reqbody,
headers={'Content-Type': 'application/json'},
method='POST')
if resp.status == httplib.UNAUTHORIZED:
raise InvalidCredsError()
elif resp.status not in [httplib.OK,
httplib.NON_AUTHORITATIVE_INFORMATION]:
body = 'code: %s body: %s' % (resp.status, resp.body)
raise MalformedResponseError('Malformed response', body=body,
driver=self.driver)
else:
body = resp.object
try:
access = body['access']
expires = access['token']['expires']
self.auth_token = access['token']['id']
self.auth_token_expires = parse_date(expires)
self.urls = access['serviceCatalog']
self.auth_user_info = access.get('user', {})
except KeyError as e:
raise MalformedResponseError('Auth JSON response is \
missing required elements', e)
return self
def list_projects(self):
response = self.authenticated_request('/v2.0/tenants', method='GET')
result = self._to_projects(data=response.object['tenants'])
return result
def list_tenants(self):
return self.list_projects()
class OpenStackIdentity_3_0_Connection(OpenStackIdentityConnection):
"""
Connection class for Keystone API v3.x.
"""
responseCls = OpenStackAuthResponse
name = 'OpenStack Identity API v3.x'
auth_version = '3.0'
VALID_TOKEN_SCOPES = [
OpenStackIdentityTokenScope.PROJECT,
OpenStackIdentityTokenScope.DOMAIN,
OpenStackIdentityTokenScope.UNSCOPED
]
def __init__(self, auth_url, user_id, key, tenant_name=None,
domain_name='Default', tenant_domain_id='default',
token_scope=OpenStackIdentityTokenScope.PROJECT,
timeout=None, proxy_url=None, parent_conn=None):
"""
:param tenant_name: Name of the project this user belongs to. Note:
When token_scope is set to project, this argument
control to which project to scope the token to.
:type tenant_name: ``str``
:param domain_name: Domain the user belongs to. Note: Then token_scope
is set to token, this argument controls to which
domain to scope the token to.
:type domain_name: ``str``
:param token_scope: Whether to scope a token to a "project", a
"domain" or "unscoped"
:type token_scope: ``str``
"""
super(OpenStackIdentity_3_0_Connection,
self).__init__(auth_url=auth_url,
user_id=user_id,
key=key,
tenant_name=tenant_name,
domain_name=domain_name,
token_scope=token_scope,
timeout=timeout,
proxy_url=proxy_url,
parent_conn=parent_conn)
if self.token_scope not in self.VALID_TOKEN_SCOPES:
raise ValueError('Invalid value for "token_scope" argument: %s' %
(self.token_scope))
if (self.token_scope == OpenStackIdentityTokenScope.PROJECT and
(not self.tenant_name or not self.domain_name)):
raise ValueError('Must provide tenant_name and domain_name '
'argument')
elif (self.token_scope == OpenStackIdentityTokenScope.DOMAIN and
not self.domain_name):
raise ValueError('Must provide domain_name argument')
self.auth_user_roles = None
self.tenant_domain_id = tenant_domain_id
def authenticate(self, force=False):
"""
Perform authentication.
"""
if not self._is_authentication_needed(force=force):
return self
data = {
'auth': {
'identity': {
'methods': ['password'],
'password': {
'user': {
'domain': {
'name': self.domain_name
},
'name': self.user_id,
'password': self.key
}
}
}
}
}
if self.token_scope == OpenStackIdentityTokenScope.PROJECT:
# Scope token to project (tenant)
data['auth']['scope'] = {
'project': {
'domain': {
'id': self.tenant_domain_id
},
'name': self.tenant_name
}
}
elif self.token_scope == OpenStackIdentityTokenScope.DOMAIN:
# Scope token to domain
data['auth']['scope'] = {
'domain': {
'name': self.domain_name
}
}
elif self.token_scope == OpenStackIdentityTokenScope.UNSCOPED:
pass
else:
raise ValueError('Token needs to be scoped either to project or '
'a domain')
data = json.dumps(data)
response = self.request('/v3/auth/tokens', data=data,
headers={'Content-Type': 'application/json'},
method='POST')
if response.status == httplib.UNAUTHORIZED:
# Invalid credentials
raise InvalidCredsError()
elif response.status in [httplib.OK, httplib.CREATED]:
headers = response.headers
try:
body = json.loads(response.body)
except Exception as e:
raise MalformedResponseError('Failed to parse JSON', e)
try:
roles = self._to_roles(body['token']['roles'])
except Exception:
roles = []
try:
expires = body['token']['expires_at']
self.auth_token = headers['x-subject-token']
self.auth_token_expires = parse_date(expires)
# Note: catalog is not returned for unscoped tokens
self.urls = body['token'].get('catalog', None)
self.auth_user_info = body['token'].get('user', None)
self.auth_user_roles = roles
except KeyError as e:
raise MalformedResponseError('Auth JSON response is \
missing required elements', e)
body = 'code: %s body:%s' % (response.status, response.body)
elif response.status == 300:
# ambiguous version request
raise LibcloudError(
'Auth request returned ambiguous version error, try'
'using the version specific URL to connect,'
' e.g. identity/v3/auth/tokens')
else:
body = 'code: %s body:%s' % (response.status, response.body)
raise MalformedResponseError('Malformed response', body=body,
driver=self.driver)
return self
def list_domains(self):
"""
List the available domains.
:rtype: ``list`` of :class:`OpenStackIdentityDomain`
"""
response = self.authenticated_request('/v3/domains', method='GET')
result = self._to_domains(data=response.object['domains'])
return result
def list_projects(self):
"""
List the available projects.
Note: To perform this action, user you are currently authenticated with
needs to be an admin.
:rtype: ``list`` of :class:`OpenStackIdentityProject`
"""
response = self.authenticated_request('/v3/projects', method='GET')
result = self._to_projects(data=response.object['projects'])
return result
def list_users(self):
"""
List the available users.
:rtype: ``list`` of :class:`.OpenStackIdentityUser`
"""
response = self.authenticated_request('/v3/users', method='GET')
result = self._to_users(data=response.object['users'])
return result
def list_roles(self):
"""
List the available roles.
:rtype: ``list`` of :class:`.OpenStackIdentityRole`
"""
response = self.authenticated_request('/v3/roles', method='GET')
result = self._to_roles(data=response.object['roles'])
return result
def get_domain(self, domain_id):
"""
Retrieve information about a single domain.
:param domain_id: ID of domain to retrieve information for.
:type domain_id: ``str``
:rtype: :class:`.OpenStackIdentityDomain`
"""
response = self.authenticated_request('/v3/domains/%s' % (domain_id),
method='GET')
result = self._to_domain(data=response.object['domain'])
return result
def get_user(self, user_id):
"""
Get a user account by ID.
:param user_id: User's id.
:type name: ``str``
:return: Located user.
:rtype: :class:`.OpenStackIdentityUser`
"""
response = self.authenticated_request('/v3/users/%s' % user_id)
user = self._to_user(data=response.object['user'])
return user
def list_user_projects(self, user):
"""
Retrieve all the projects user belongs to.
:rtype: ``list`` of :class:`.OpenStackIdentityProject`
"""
path = '/v3/users/%s/projects' % (user.id)
response = self.authenticated_request(path, method='GET')
result = self._to_projects(data=response.object['projects'])
return result
def list_user_domain_roles(self, domain, user):
"""
Retrieve all the roles for a particular user on a domain.
:rtype: ``list`` of :class:`.OpenStackIdentityRole`
"""
# TODO: Also add "get users roles" and "get assginements" which are
# available in 3.1 and 3.3
path = '/v3/domains/%s/users/%s/roles' % (domain.id, user.id)
response = self.authenticated_request(path, method='GET')
result = self._to_roles(data=response.object['roles'])
return result
def grant_domain_role_to_user(self, domain, role, user):
"""
Grant domain role to a user.
Note: This function appears to be idempotent.
:param domain: Domain to grant the role to.
:type domain: :class:`.OpenStackIdentityDomain`
:param role: Role to grant.
:type role: :class:`.OpenStackIdentityRole`
:param user: User to grant the role to.
:type user: :class:`.OpenStackIdentityUser`
:return: ``True`` on success.
:rtype: ``bool``
"""
path = ('/v3/domains/%s/users/%s/roles/%s' %
(domain.id, user.id, role.id))
response = self.authenticated_request(path, method='PUT')
return response.status == httplib.NO_CONTENT
def revoke_domain_role_from_user(self, domain, user, role):
"""
Revoke domain role from a user.
:param domain: Domain to revoke the role from.
:type domain: :class:`.OpenStackIdentityDomain`
:param role: Role to revoke.
:type role: :class:`.OpenStackIdentityRole`
:param user: User to revoke the role from.
:type user: :class:`.OpenStackIdentityUser`
:return: ``True`` on success.
:rtype: ``bool``
"""
path = ('/v3/domains/%s/users/%s/roles/%s' %
(domain.id, user.id, role.id))
response = self.authenticated_request(path, method='DELETE')
return response.status == httplib.NO_CONTENT
def grant_project_role_to_user(self, project, role, user):
"""
Grant project role to a user.
Note: This function appears to be idempotent.
:param project: Project to grant the role to.
:type project: :class:`.OpenStackIdentityDomain`
:param role: Role to grant.
:type role: :class:`.OpenStackIdentityRole`
:param user: User to grant the role to.
:type user: :class:`.OpenStackIdentityUser`
:return: ``True`` on success.
:rtype: ``bool``
"""
path = ('/v3/projects/%s/users/%s/roles/%s' %
(project.id, user.id, role.id))
response = self.authenticated_request(path, method='PUT')
return response.status == httplib.NO_CONTENT
def revoke_project_role_from_user(self, project, role, user):
"""
Revoke project role from a user.
:param project: Project to revoke the role from.
:type project: :class:`.OpenStackIdentityDomain`
:param role: Role to revoke.
:type role: :class:`.OpenStackIdentityRole`
:param user: User to revoke the role from.
:type user: :class:`.OpenStackIdentityUser`
:return: ``True`` on success.
:rtype: ``bool``
"""
path = ('/v3/projects/%s/users/%s/roles/%s' %
(project.id, user.id, role.id))
response = self.authenticated_request(path, method='DELETE')
return response.status == httplib.NO_CONTENT
def create_user(self, email, password, name, description=None,
domain_id=None, default_project_id=None, enabled=True):
"""
Create a new user account.
:param email: User's mail address.
:type email: ``str``
:param password: User's password.
:type password: ``str``
:param name: User's name.
:type name: ``str``
:param description: Optional description.
:type description: ``str``
:param domain_id: ID of the domain to add the user to (optional).
:type domain_id: ``str``
:param default_project_id: ID of the default user project (optional).
:type default_project_id: ``str``
:param enabled: True to enable user after creation.
:type enabled: ``bool``
:return: Created user.
:rtype: :class:`.OpenStackIdentityUser`
"""
data = {
'email': email,
'password': password,
'name': name,
'enabled': enabled
}
if description:
data['description'] = description
if domain_id:
data['domain_id'] = domain_id
if default_project_id:
data['default_project_id'] = default_project_id
data = json.dumps({'user': data})
response = self.authenticated_request('/v3/users', data=data,
method='POST')
user = self._to_user(data=response.object['user'])
return user
def enable_user(self, user):
"""
Enable user account.
Note: This operation appears to be idempotent.
:param user: User to enable.
:type user: :class:`.OpenStackIdentityUser`
:return: User account which has been enabled.
:rtype: :class:`.OpenStackIdentityUser`
"""
data = {
'enabled': True
}
data = json.dumps({'user': data})
response = self.authenticated_request('/v3/users/%s' % (user.id),
data=data,
method='PATCH')
user = self._to_user(data=response.object['user'])
return user
def disable_user(self, user):
"""
Disable user account.
Note: This operation appears to be idempotent.
:param user: User to disable.
:type user: :class:`.OpenStackIdentityUser`
:return: User account which has been disabled.
:rtype: :class:`.OpenStackIdentityUser`
"""
data = {
'enabled': False
}
data = json.dumps({'user': data})
response = self.authenticated_request('/v3/users/%s' % (user.id),
data=data,
method='PATCH')
user = self._to_user(data=response.object['user'])
return user
def _to_domains(self, data):
result = []
for item in data:
domain = self._to_domain(data=item)
result.append(domain)
return result
def _to_domain(self, data):
domain = OpenStackIdentityDomain(id=data['id'],
name=data['name'],
enabled=data['enabled'])
return domain
def _to_users(self, data):
result = []
for item in data:
user = self._to_user(data=item)
result.append(user)
return result
def _to_user(self, data):
user = OpenStackIdentityUser(id=data['id'],
domain_id=data['domain_id'],
name=data['name'],
email=data.get('email'),
description=data.get('description',
None),
enabled=data.get('enabled'))
return user
def _to_roles(self, data):
result = []
for item in data:
user = self._to_role(data=item)
result.append(user)
return result
def _to_role(self, data):
role = OpenStackIdentityRole(id=data['id'],
name=data['name'],
description=data.get('description',
None),
enabled=data.get('enabled', True))
return role
class OpenStackIdentity_3_0_Connection_OIDC_access_token(
OpenStackIdentity_3_0_Connection):
"""
Connection class for Keystone API v3.x. using OpenID Connect tokens
The OIDC token must be set in the self.key attribute.
The identity provider name required to get the full path
must be set in the self.user_id attribute.
The protocol name required to get the full path
must be set in the self.tenant_name attribute.
The self.domain_name attribute can be used either to select the
domain name in case of domain scoped token or to select the project
name in case of project scoped token
"""
responseCls = OpenStackAuthResponse
name = 'OpenStack Identity API v3.x with OIDC support'
auth_version = '3.0'
def authenticate(self, force=False):
"""
Perform authentication.
"""
if not self._is_authentication_needed(force=force):
return self
subject_token = self._get_unscoped_token_from_oidc_token()
data = {
'auth': {
'identity': {
'methods': ['token'],
'token': {
'id': subject_token
}
}
}
}
if self.token_scope == OpenStackIdentityTokenScope.PROJECT:
# Scope token to project (tenant)
project_id = self._get_project_id(token=subject_token)
data['auth']['scope'] = {
'project': {
'id': project_id
}
}
elif self.token_scope == OpenStackIdentityTokenScope.DOMAIN:
# Scope token to domain
data['auth']['scope'] = {
'domain': {
'name': self.domain_name
}
}
elif self.token_scope == OpenStackIdentityTokenScope.UNSCOPED:
pass
else:
raise ValueError('Token needs to be scoped either to project or '
'a domain')
data = json.dumps(data)
response = self.request('/v3/auth/tokens', data=data,
headers={'Content-Type': 'application/json'},
method='POST')
if response.status == httplib.UNAUTHORIZED:
# Invalid credentials
raise InvalidCredsError()
elif response.status in [httplib.OK, httplib.CREATED]:
headers = response.headers
try:
body = json.loads(response.body)
except Exception as e:
raise MalformedResponseError('Failed to parse JSON', e)
try:
roles = self._to_roles(body['token']['roles'])
except Exception:
roles = []
try:
expires = body['token']['expires_at']
self.auth_token = headers['x-subject-token']
self.auth_token_expires = parse_date(expires)
# Note: catalog is not returned for unscoped tokens
self.urls = body['token'].get('catalog', None)
self.auth_user_info = body['token'].get('user', None)
self.auth_user_roles = roles
except KeyError as e:
raise MalformedResponseError('Auth JSON response is \
missing required elements', e)
body = 'code: %s body:%s' % (response.status, response.body)
else:
body = 'code: %s body:%s' % (response.status, response.body)
raise MalformedResponseError('Malformed response', body=body,
driver=self.driver)
return self
def _get_unscoped_token_from_oidc_token(self):
"""
Get unscoped token from OIDC access token
"""
path = ('/v3/OS-FEDERATION/identity_providers/%s/protocols/%s/auth' %
(self.user_id, self.tenant_name))
response = self.request(path,
headers={'Content-Type': 'application/json',
'Authorization': 'Bearer %s' %
self.key},
method='GET')
if response.status == httplib.UNAUTHORIZED:
# Invalid credentials
raise InvalidCredsError()
elif response.status in [httplib.OK, httplib.CREATED]:
if 'x-subject-token' in response.headers:
return response.headers['x-subject-token']
else:
raise MalformedResponseError('No x-subject-token returned',
driver=self.driver)
else:
raise MalformedResponseError('Malformed response',
driver=self.driver,
body=response.body)
def _get_project_id(self, token):
"""
Get the first project ID accessible with the specified access token
"""
# Try new path first (from ver 1.1)
path = '/v3/auth/projects'
response = self.request(path,
headers={'Content-Type': 'application/json',
'X-Auth-Token': token},
method='GET')
if response.status not in [httplib.UNAUTHORIZED, httplib.OK,
httplib.CREATED]:
# In case of error try old one
path = '/v3/OS-FEDERATION/projects'
response = self.request(path,
headers={'Content-Type':
'application/json',
'X-Auth-Token': token},
method='GET')
if response.status == httplib.UNAUTHORIZED:
# Invalid credentials
raise InvalidCredsError()
elif response.status in [httplib.OK, httplib.CREATED]:
try:
body = json.loads(response.body)
# We use domain_name in both cases of the scoped tokens
# as we have used tenant as the protocol
if self.domain_name and self.domain_name != 'Default':
for project in body['projects']:
if self.domain_name in [project['name'],
project['id']]:
return project['id']
raise ValueError('Project %s not found' %
(self.domain_name))
else:
return body['projects'][0]['id']
except ValueError as e:
raise e
except Exception as e:
raise MalformedResponseError('Failed to parse JSON', e)
else:
raise MalformedResponseError('Malformed response',
driver=self.driver,
body=response.body)
class OpenStackIdentity_2_0_Connection_VOMS(OpenStackIdentityConnection,
CertificateConnection):
"""
Connection class for Keystone API v2.0. with VOMS proxy support
In this case the key parameter will be the path of the VOMS proxy file.
"""
responseCls = OpenStackAuthResponse
name = 'OpenStack Identity API v2.0 VOMS support'
auth_version = '2.0'
def __init__(self, auth_url, user_id, key, tenant_name=None,
domain_name='Default',
token_scope=OpenStackIdentityTokenScope.PROJECT,
timeout=None, proxy_url=None, parent_conn=None):
CertificateConnection.__init__(self, cert_file=key,
url=auth_url,
proxy_url=proxy_url,
timeout=timeout)
self.parent_conn = parent_conn
# enable tests to use the same mock connection classes.
if parent_conn:
self.conn_class = parent_conn.conn_class
self.driver = parent_conn.driver
else:
self.driver = None
self.auth_url = auth_url
self.tenant_name = tenant_name
self.domain_name = domain_name
self.token_scope = token_scope
self.timeout = timeout
self.proxy_url = proxy_url
self.urls = {}
self.auth_token = None
self.auth_token_expires = None
self.auth_user_info = None
def authenticate(self, force=False):
if not self._is_authentication_needed(force=force):
return self
tenant = self.tenant_name
if not tenant:
# if the tenant name is not specified look for it
token = self._get_unscoped_token()
tenant = self._get_tenant_name(token)
data = {'auth': {'voms': True, 'tenantName': tenant}}
reqbody = json.dumps(data)
return self._authenticate_2_0_with_body(reqbody)
def _get_unscoped_token(self):
"""
Get unscoped token from VOMS proxy
"""
data = {'auth': {'voms': True}}
reqbody = json.dumps(data)
response = self.request('/v2.0/tokens', data=reqbody,
headers={'Content-Type': 'application/json'},
method='POST')
if response.status == httplib.UNAUTHORIZED:
# Invalid credentials
raise InvalidCredsError()
elif response.status in [httplib.OK, httplib.CREATED]:
try:
body = json.loads(response.body)
return body['access']['token']['id']
except Exception as e:
raise MalformedResponseError('Failed to parse JSON', e)
else:
raise MalformedResponseError('Malformed response',
driver=self.driver,
body=response.body)
def _get_tenant_name(self, token):
"""
Get the first available tenant name (usually there are only one)
"""
headers = {'Accept': 'application/json',
'Content-Type': 'application/json',
'X-Auth-Token': token}
response = self.request('/v2.0/tenants', headers=headers, method='GET')
if response.status == httplib.UNAUTHORIZED:
# Invalid credentials
raise InvalidCredsError()
elif response.status in [httplib.OK, httplib.CREATED]:
try:
body = json.loads(response.body)
return body["tenants"][0]["name"]
except Exception as e:
raise MalformedResponseError('Failed to parse JSON', e)
else:
raise MalformedResponseError('Malformed response',
driver=self.driver,
body=response.body)
def _authenticate_2_0_with_body(self, reqbody):
resp = self.request('/v2.0/tokens', data=reqbody,
headers={'Content-Type': 'application/json'},
method='POST')
if resp.status == httplib.UNAUTHORIZED:
raise InvalidCredsError()
elif resp.status not in [httplib.OK,
httplib.NON_AUTHORITATIVE_INFORMATION]:
body = 'code: %s body: %s' % (resp.status, resp.body)
raise MalformedResponseError('Malformed response', body=body,
driver=self.driver)
else:
body = resp.object
try:
access = body['access']
expires = access['token']['expires']
self.auth_token = access['token']['id']
self.auth_token_expires = parse_date(expires)
self.urls = access['serviceCatalog']
self.auth_user_info = access.get('user', {})
except KeyError as e:
raise MalformedResponseError('Auth JSON response is \
missing required elements', e)
return self
def get_class_for_auth_version(auth_version):
"""
Retrieve class for the provided auth version.
"""
if auth_version == '1.0':
cls = OpenStackIdentity_1_0_Connection
elif auth_version == '1.1':
cls = OpenStackIdentity_1_1_Connection
elif auth_version == '2.0' or auth_version == '2.0_apikey':
cls = OpenStackIdentity_2_0_Connection
elif auth_version == '2.0_password':
cls = OpenStackIdentity_2_0_Connection
elif auth_version == '2.0_voms':
cls = OpenStackIdentity_2_0_Connection_VOMS
elif auth_version == '3.x_password':
cls = OpenStackIdentity_3_0_Connection
elif auth_version == '3.x_oidc_access_token':
cls = OpenStackIdentity_3_0_Connection_OIDC_access_token
else:
raise LibcloudError('Unsupported Auth Version requested: %s' %
(auth_version))
return cls
| apache-2.0 |
ecell/ecell3 | ecell/pyecell/ecell/__init__.py | 2 | 1106 | #::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#
# This file is part of the E-Cell System
#
# Copyright (C) 1996-2016 Keio University
# Copyright (C) 2008-2016 RIKEN
# Copyright (C) 2005-2009 The Molecular Sciences Institute
#
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#
#
# E-Cell System is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# E-Cell System is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with E-Cell System -- see the file COPYING.
# If not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
#END_HEADER
| lgpl-3.0 |
bnq4ever/gypgoogle | pylib/gyp/ordered_dict.py | 2354 | 10366 | # Unmodified from http://code.activestate.com/recipes/576693/
# other than to add MIT license header (as specified on page, but not in code).
# Linked from Python documentation here:
# http://docs.python.org/2/library/collections.html#collections.OrderedDict
#
# This should be deleted once Py2.7 is available on all bots, see
# http://crbug.com/241769.
#
# Copyright (c) 2009 Raymond Hettinger.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
# Suppress 'OrderedDict.update: Method has no argument':
# pylint: disable=E0211
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
| bsd-3-clause |
jepegit/cellpy | cellpy/parameters/prms.py | 1 | 6381 | """cellpy parameters"""
import os
from pathlib import Path
import sys
import box
# class Parameter(object):
# """class for storing parameters"""
# def __init__(self, name, prm_dict):
# self.name = name
# for key in prm_dict:
# setattr(self, key, prm_dict[key])
#
# def __repr__(self):
# return "<cellpy_prms: %s>" % self.__dict__
# locations etc for reading custom parameters
script_dir = os.path.abspath(os.path.dirname(__file__))
cur_dir = os.path.abspath(os.path.dirname(sys.argv[0]))
user_dir = os.path.expanduser("~")
# search_path = dict()
# search_path["curdir"] = cur_dir
# search_path["filedir"] = script_dir
# search_path["userdir"] = user_dir
#
# search_order = ["curdir", "filedir", "userdir"]
# default_name = "_cellpy_prms_default.ini"
# prm_default = os.path.join(script_dir, default_name)
# prm_filename = prm_default
# --------------------------
# Paths
# --------------------------
Paths = {
"outdatadir": cur_dir,
"rawdatadir": cur_dir,
"cellpydatadir": cur_dir,
"db_path": cur_dir,
"filelogdir": cur_dir,
"examplesdir": cur_dir,
"notebookdir": cur_dir,
"batchfiledir": cur_dir,
"db_filename": "cellpy_db.xlsx",
}
Paths = box.Box(Paths)
# --------------------------
# FileNames
# --------------------------
FileNames = {"file_name_format": "YYYYMMDD_[NAME]EEE_CC_TT_RR"}
FileNames = box.Box(FileNames)
# --------------------------
# Reader
# --------------------------
Reader = {
"diagnostics": False,
"filestatuschecker": "size",
"force_step_table_creation": True,
"force_all": False, # not used yet - should be used when saving
"sep": ";",
"cycle_mode": "anode", # used in cellreader (593)
"sorted_data": True, # finding step-types assumes sorted data
"load_only_summary": False,
"select_minimal": False,
"limit_loaded_cycles": None,
"ensure_step_table": False,
"daniel_number": 5,
"voltage_interpolation_step": 0.01,
"time_interpolation_step": 10.0,
"capacity_interpolation_step": 2.0,
"use_cellpy_stat_file": False,
"raw_datadir": None,
"cellpy_datadir": None,
"auto_dirs": True, # search in prm-file for res and hdf5 dirs in loadcell
}
Reader = box.Box(Reader)
# --------------------------
# DataSet
# --------------------------
DataSet = {
"nom_cap": 3579
} # mAh/g (used for finding c-rates) [should be moved to Materials]
DataSet = box.Box(DataSet)
# --------------------------
# Db
# --------------------------
Db = {
"db_type": "simple_excel_reader",
"db_table_name": "db_table",
"db_header_row": 0,
"db_unit_row": 1,
"db_data_start_row": 2,
"db_search_start_row": 2,
"db_search_end_row": -1,
}
Db = box.Box(Db)
# -----------------------------
# New Excel Reader
# attribute = (header, dtype)
# -----------------------------
DbCols = {
"id": ("id", "int"),
"exists": ("exists", "bol"),
"batch": ("batch", "str"),
"sub_batch_01": ("b01", "str"),
"sub_batch_02": ("b02", "str"),
"sub_batch_03": ("b03", "str"),
"sub_batch_04": ("b04", "str"),
"sub_batch_05": ("b05", "str"),
"sub_batch_06": ("b06", "str"),
"sub_batch_07": ("b07", "str"),
"project": ("project", "str"),
"label": ("label", "str"),
"group": ("group", "int"),
"selected": ("selected", "bol"),
"cell_name": ("cell", "str"),
"cell_type": ("cell_type", "cat"),
"experiment_type": ("experiment_type", "cat"),
"active_material": ("mass_active_material", "float"),
"total_material": ("mass_total", "float"),
"loading": ("loading_active_material", "float"),
"nom_cap": ("nominal_capacity", "float"),
"file_name_indicator": ("file_name_indicator", "str"),
"instrument": ("instrument", "str"),
"raw_file_names": ("raw_file_names", "list"),
"cellpy_file_name": ("cellpy_file_name", "str"),
"comment_slurry": ("comment_slurry", "str"),
"comment_cell": ("comment_cell", "str"),
"comment_general": ("comment_general", "str"),
"freeze": ("freeze", "bol"),
}
DbCols = box.Box(DbCols)
# --------------------------
# Instruments
# --------------------------
Instruments = {"tester": "arbin", "custom_instrument_definitions_file": None}
Instruments = box.Box(Instruments)
# Pre-defined instruments:
Arbin = {
"max_res_filesize": 150_000_000,
"chunk_size": None,
"max_chunks": None,
"use_subprocess": False,
"detect_subprocess_need": False,
"sub_process_path": None,
"office_version": "64bit",
"SQL_server": r"localhost\SQLEXPRESS",
}
# Register pre-defined instruments:
Instruments["Arbin"] = Arbin
# --------------------------
# Materials
# --------------------------
Materials = {"cell_class": "Li-Ion", "default_material": "silicon", "default_mass": 1.0}
Materials = box.Box(Materials)
# --------------------------
# Batch-options
# --------------------------
Batch = {
"template": "standard",
"fig_extension": "png",
"backend": "bokeh",
"notebook": True,
"dpi": 300,
"markersize": 4,
"symbol_label": "simple",
"color_style_label": "seaborn-deep",
"figure_type": "unlimited",
"summary_plot_width": 900,
"summary_plot_height": 800,
"summary_plot_height_fractions": [0.2, 0.5, 0.3],
}
Batch = box.Box(Batch)
# --------------------------
# Other non-config
# --------------------------
_variable_that_is_not_saved_to_config = "Hei"
_prm_default_name = ".cellpy_prms_default.conf"
_prm_globtxt = ".cellpy_prms*.conf"
_odbcs = ["pyodbc", "ado", "pypyodbc"]
_odbc = "pyodbc"
_search_for_odbc_driver = True
_allow_multi_test_file = False
_use_filename_cache = True
_sub_process_path = Path(__file__) / "../../../bin/mdbtools-win/mdb-export"
_sub_process_path = _sub_process_path.resolve()
_sort_if_subprocess = True
_cellpyfile_root = "CellpyData"
_cellpyfile_raw = "/raw"
_cellpyfile_step = "/steps"
_cellpyfile_summary = "/summary"
_cellpyfile_fid = "/fid"
_cellpyfile_complevel = 1
_cellpyfile_complib = None # currently defaults to "zlib"
_cellpyfile_raw_format = "table"
_cellpyfile_summary_format = "table"
_cellpyfile_stepdata_format = "table"
_cellpyfile_infotable_format = "fixed"
_cellpyfile_fidtable_format = "fixed"
# used as global variables
_globals_status = ""
_globals_errors = []
_globals_message = []
# used during development for testing new features
_res_chunk = 0
| mit |
maartenq/ansible | lib/ansible/modules/network/avi/avi_errorpageprofile.py | 20 | 4469 | #!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_errorpageprofile
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of ErrorPageProfile Avi RESTful Object
description:
- This module is used to configure ErrorPageProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.5"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
app_name:
description:
- Name of the virtual service which generated the error page.
- Field introduced in 17.2.4.
- Default value when not specified in API or module is interpreted by Avi Controller as VS Name.
company_name:
description:
- Name of the company to show in error page.
- Field introduced in 17.2.4.
- Default value when not specified in API or module is interpreted by Avi Controller as Avi Networks.
error_pages:
description:
- Defined error pages for http status codes.
- Field introduced in 17.2.4.
host_name:
description:
- Fully qualified domain name for which the error page is generated.
- Field introduced in 17.2.4.
- Default value when not specified in API or module is interpreted by Avi Controller as Host Header.
name:
description:
- Field introduced in 17.2.4.
required: true
tenant_ref:
description:
- It is a reference to an object of type tenant.
- Field introduced in 17.2.4.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Field introduced in 17.2.4.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create ErrorPageProfile object
avi_errorpageprofile:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_errorpageprofile
"""
RETURN = '''
obj:
description: ErrorPageProfile (api/errorpageprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
app_name=dict(type='str',),
company_name=dict(type='str',),
error_pages=dict(type='list',),
host_name=dict(type='str',),
name=dict(type='str', required=True),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'errorpageprofile',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 |
nirmeshk/oh-mainline | vendor/packages/twisted/twisted/test/test_log.py | 18 | 20663 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.python.log}.
"""
import os, sys, time, logging, warnings
from cStringIO import StringIO
from twisted.trial import unittest
from twisted.python import log, failure
class FakeWarning(Warning):
"""
A unique L{Warning} subclass used by tests for interactions of
L{twisted.python.log} with the L{warnings} module.
"""
class LogTest(unittest.TestCase):
def setUp(self):
self.catcher = []
self.observer = self.catcher.append
log.addObserver(self.observer)
self.addCleanup(log.removeObserver, self.observer)
def testObservation(self):
catcher = self.catcher
log.msg("test", testShouldCatch=True)
i = catcher.pop()
self.assertEquals(i["message"][0], "test")
self.assertEquals(i["testShouldCatch"], True)
self.failUnless(i.has_key("time"))
self.assertEquals(len(catcher), 0)
def testContext(self):
catcher = self.catcher
log.callWithContext({"subsystem": "not the default",
"subsubsystem": "a",
"other": "c"},
log.callWithContext,
{"subsubsystem": "b"}, log.msg, "foo", other="d")
i = catcher.pop()
self.assertEquals(i['subsubsystem'], 'b')
self.assertEquals(i['subsystem'], 'not the default')
self.assertEquals(i['other'], 'd')
self.assertEquals(i['message'][0], 'foo')
def testErrors(self):
for e, ig in [("hello world","hello world"),
(KeyError(), KeyError),
(failure.Failure(RuntimeError()), RuntimeError)]:
log.err(e)
i = self.catcher.pop()
self.assertEquals(i['isError'], 1)
self.flushLoggedErrors(ig)
def testErrorsWithWhy(self):
for e, ig in [("hello world","hello world"),
(KeyError(), KeyError),
(failure.Failure(RuntimeError()), RuntimeError)]:
log.err(e, 'foobar')
i = self.catcher.pop()
self.assertEquals(i['isError'], 1)
self.assertEquals(i['why'], 'foobar')
self.flushLoggedErrors(ig)
def test_erroneousErrors(self):
"""
Exceptions raised by log observers are logged but the observer which
raised the exception remains registered with the publisher. These
exceptions do not prevent the event from being sent to other observers
registered with the publisher.
"""
L1 = []
L2 = []
def broken(events):
1 / 0
for observer in [L1.append, broken, L2.append]:
log.addObserver(observer)
self.addCleanup(log.removeObserver, observer)
for i in xrange(3):
# Reset the lists for simpler comparison.
L1[:] = []
L2[:] = []
# Send out the event which will break one of the observers.
log.msg("Howdy, y'all.")
# The broken observer should have caused this to be logged. There
# is a slight bug with LogPublisher - when it logs an error from an
# observer, it uses the global "err", which is not necessarily
# associated with it, but which may be associated with a different
# LogPublisher! See #3307.
excs = self.flushLoggedErrors(ZeroDivisionError)
self.assertEqual(len(excs), 1)
# Both other observers should have seen the message.
self.assertEquals(len(L1), 2)
self.assertEquals(len(L2), 2)
# The order is slightly wrong here. The first event should be
# delivered to all observers; then, errors should be delivered.
self.assertEquals(L1[1]['message'], ("Howdy, y'all.",))
self.assertEquals(L2[0]['message'], ("Howdy, y'all.",))
def test_showwarning(self):
"""
L{twisted.python.log.showwarning} emits the warning as a message
to the Twisted logging system.
"""
publisher = log.LogPublisher()
publisher.addObserver(self.observer)
publisher.showwarning(
FakeWarning("unique warning message"), FakeWarning,
"warning-filename.py", 27)
event = self.catcher.pop()
self.assertEqual(
event['format'] % event,
'warning-filename.py:27: twisted.test.test_log.FakeWarning: '
'unique warning message')
self.assertEqual(self.catcher, [])
# Python 2.6 requires that any function used to override the
# warnings.showwarning API accept a "line" parameter or a
# deprecation warning is emitted.
publisher.showwarning(
FakeWarning("unique warning message"), FakeWarning,
"warning-filename.py", 27, line=object())
event = self.catcher.pop()
self.assertEqual(
event['format'] % event,
'warning-filename.py:27: twisted.test.test_log.FakeWarning: '
'unique warning message')
self.assertEqual(self.catcher, [])
def test_warningToFile(self):
"""
L{twisted.python.log.showwarning} passes warnings with an explicit file
target on to the underlying Python warning system.
"""
message = "another unique message"
category = FakeWarning
filename = "warning-filename.py"
lineno = 31
output = StringIO()
log.showwarning(message, category, filename, lineno, file=output)
self.assertEqual(
output.getvalue(),
warnings.formatwarning(message, category, filename, lineno))
# In Python 2.6, warnings.showwarning accepts a "line" argument which
# gives the source line the warning message is to include.
if sys.version_info >= (2, 6):
line = "hello world"
output = StringIO()
log.showwarning(message, category, filename, lineno, file=output,
line=line)
self.assertEqual(
output.getvalue(),
warnings.formatwarning(message, category, filename, lineno,
line))
class FakeFile(list):
def write(self, bytes):
self.append(bytes)
def flush(self):
pass
class EvilStr:
def __str__(self):
1/0
class EvilRepr:
def __str__(self):
return "Happy Evil Repr"
def __repr__(self):
1/0
class EvilReprStr(EvilStr, EvilRepr):
pass
class LogPublisherTestCaseMixin:
def setUp(self):
"""
Add a log observer which records log events in C{self.out}. Also,
make sure the default string encoding is ASCII so that
L{testSingleUnicode} can test the behavior of logging unencodable
unicode messages.
"""
self.out = FakeFile()
self.lp = log.LogPublisher()
self.flo = log.FileLogObserver(self.out)
self.lp.addObserver(self.flo.emit)
try:
str(u'\N{VULGAR FRACTION ONE HALF}')
except UnicodeEncodeError:
# This is the behavior we want - don't change anything.
self._origEncoding = None
else:
reload(sys)
self._origEncoding = sys.getdefaultencoding()
sys.setdefaultencoding('ascii')
def tearDown(self):
"""
Verify that everything written to the fake file C{self.out} was a
C{str}. Also, restore the default string encoding to its previous
setting, if it was modified by L{setUp}.
"""
for chunk in self.out:
self.failUnless(isinstance(chunk, str), "%r was not a string" % (chunk,))
if self._origEncoding is not None:
sys.setdefaultencoding(self._origEncoding)
del sys.setdefaultencoding
class LogPublisherTestCase(LogPublisherTestCaseMixin, unittest.TestCase):
def testSingleString(self):
self.lp.msg("Hello, world.")
self.assertEquals(len(self.out), 1)
def testMultipleString(self):
# Test some stupid behavior that will be deprecated real soon.
# If you are reading this and trying to learn how the logging
# system works, *do not use this feature*.
self.lp.msg("Hello, ", "world.")
self.assertEquals(len(self.out), 1)
def testSingleUnicode(self):
self.lp.msg(u"Hello, \N{VULGAR FRACTION ONE HALF} world.")
self.assertEquals(len(self.out), 1)
self.assertIn('with str error', self.out[0])
self.assertIn('UnicodeEncodeError', self.out[0])
class FileObserverTestCase(LogPublisherTestCaseMixin, unittest.TestCase):
def test_getTimezoneOffset(self):
"""
Attempt to verify that L{FileLogObserver.getTimezoneOffset} returns
correct values for the current C{TZ} environment setting. Do this
by setting C{TZ} to various well-known values and asserting that the
reported offset is correct.
"""
localDaylightTuple = (2006, 6, 30, 0, 0, 0, 4, 181, 1)
utcDaylightTimestamp = time.mktime(localDaylightTuple)
localStandardTuple = (2007, 1, 31, 0, 0, 0, 2, 31, 0)
utcStandardTimestamp = time.mktime(localStandardTuple)
originalTimezone = os.environ.get('TZ', None)
try:
# Test something west of UTC
os.environ['TZ'] = 'America/New_York'
time.tzset()
self.assertEqual(
self.flo.getTimezoneOffset(utcDaylightTimestamp),
14400)
self.assertEqual(
self.flo.getTimezoneOffset(utcStandardTimestamp),
18000)
# Test something east of UTC
os.environ['TZ'] = 'Europe/Berlin'
time.tzset()
self.assertEqual(
self.flo.getTimezoneOffset(utcDaylightTimestamp),
-7200)
self.assertEqual(
self.flo.getTimezoneOffset(utcStandardTimestamp),
-3600)
# Test a timezone that doesn't have DST
os.environ['TZ'] = 'Africa/Johannesburg'
time.tzset()
self.assertEqual(
self.flo.getTimezoneOffset(utcDaylightTimestamp),
-7200)
self.assertEqual(
self.flo.getTimezoneOffset(utcStandardTimestamp),
-7200)
finally:
if originalTimezone is None:
del os.environ['TZ']
else:
os.environ['TZ'] = originalTimezone
time.tzset()
if getattr(time, 'tzset', None) is None:
test_getTimezoneOffset.skip = (
"Platform cannot change timezone, cannot verify correct offsets "
"in well-known timezones.")
def test_timeFormatting(self):
"""
Test the method of L{FileLogObserver} which turns a timestamp into a
human-readable string.
"""
# There is no function in the time module which converts a UTC time
# tuple to a timestamp.
when = time.mktime((2001, 2, 3, 4, 5, 6, 7, 8, 0)) - time.timezone
# Pretend to be in US/Eastern for a moment
self.flo.getTimezoneOffset = lambda when: 18000
self.assertEquals(self.flo.formatTime(when), '2001-02-02 23:05:06-0500')
# Okay now we're in Eastern Europe somewhere
self.flo.getTimezoneOffset = lambda when: -3600
self.assertEquals(self.flo.formatTime(when), '2001-02-03 05:05:06+0100')
# And off in the Pacific or someplace like that
self.flo.getTimezoneOffset = lambda when: -39600
self.assertEquals(self.flo.formatTime(when), '2001-02-03 15:05:06+1100')
# One of those weird places with a half-hour offset timezone
self.flo.getTimezoneOffset = lambda when: 5400
self.assertEquals(self.flo.formatTime(when), '2001-02-03 02:35:06-0130')
# Half-hour offset in the other direction
self.flo.getTimezoneOffset = lambda when: -5400
self.assertEquals(self.flo.formatTime(when), '2001-02-03 05:35:06+0130')
# Test an offset which is between 0 and 60 minutes to make sure the
# sign comes out properly in that case.
self.flo.getTimezoneOffset = lambda when: 1800
self.assertEquals(self.flo.formatTime(when), '2001-02-03 03:35:06-0030')
# Test an offset between 0 and 60 minutes in the other direction.
self.flo.getTimezoneOffset = lambda when: -1800
self.assertEquals(self.flo.formatTime(when), '2001-02-03 04:35:06+0030')
# If a strftime-format string is present on the logger, it should
# use that instead. Note we don't assert anything about day, hour
# or minute because we cannot easily control what time.strftime()
# thinks the local timezone is.
self.flo.timeFormat = '%Y %m'
self.assertEquals(self.flo.formatTime(when), '2001 02')
def test_loggingAnObjectWithBroken__str__(self):
#HELLO, MCFLY
self.lp.msg(EvilStr())
self.assertEquals(len(self.out), 1)
# Logging system shouldn't need to crap itself for this trivial case
self.assertNotIn('UNFORMATTABLE', self.out[0])
def test_formattingAnObjectWithBroken__str__(self):
self.lp.msg(format='%(blat)s', blat=EvilStr())
self.assertEquals(len(self.out), 1)
self.assertIn('Invalid format string or unformattable object', self.out[0])
def test_brokenSystem__str__(self):
self.lp.msg('huh', system=EvilStr())
self.assertEquals(len(self.out), 1)
self.assertIn('Invalid format string or unformattable object', self.out[0])
def test_formattingAnObjectWithBroken__repr__Indirect(self):
self.lp.msg(format='%(blat)s', blat=[EvilRepr()])
self.assertEquals(len(self.out), 1)
self.assertIn('UNFORMATTABLE OBJECT', self.out[0])
def test_systemWithBroker__repr__Indirect(self):
self.lp.msg('huh', system=[EvilRepr()])
self.assertEquals(len(self.out), 1)
self.assertIn('UNFORMATTABLE OBJECT', self.out[0])
def test_simpleBrokenFormat(self):
self.lp.msg(format='hooj %s %s', blat=1)
self.assertEquals(len(self.out), 1)
self.assertIn('Invalid format string or unformattable object', self.out[0])
def test_ridiculousFormat(self):
self.lp.msg(format=42, blat=1)
self.assertEquals(len(self.out), 1)
self.assertIn('Invalid format string or unformattable object', self.out[0])
def test_evilFormat__repr__And__str__(self):
self.lp.msg(format=EvilReprStr(), blat=1)
self.assertEquals(len(self.out), 1)
self.assertIn('PATHOLOGICAL', self.out[0])
def test_strangeEventDict(self):
"""
This kind of eventDict used to fail silently, so test it does.
"""
self.lp.msg(message='', isError=False)
self.assertEquals(len(self.out), 0)
def test_startLoggingTwice(self):
"""
There are some obscure error conditions that can occur when logging is
started twice. See http://twistedmatrix.com/trac/ticket/3289 for more
information.
"""
# The bug is particular to the way that the t.p.log 'global' function
# handle stdout. If we use our own stream, the error doesn't occur. If
# we use our own LogPublisher, the error doesn't occur.
sys.stdout = StringIO()
self.addCleanup(setattr, sys, 'stdout', sys.__stdout__)
def showError(eventDict):
if eventDict['isError']:
sys.__stdout__.write(eventDict['failure'].getTraceback())
log.addObserver(showError)
self.addCleanup(log.removeObserver, showError)
observer = log.startLogging(sys.stdout)
self.addCleanup(observer.stop)
# At this point, we expect that sys.stdout is a StdioOnnaStick object.
self.assertIsInstance(sys.stdout, log.StdioOnnaStick)
fakeStdout = sys.stdout
observer = log.startLogging(sys.stdout)
self.assertIdentical(sys.stdout, fakeStdout)
class PythonLoggingObserverTestCase(unittest.TestCase):
"""
Test the bridge with python logging module.
"""
def setUp(self):
self.out = StringIO()
rootLogger = logging.getLogger("")
self.originalLevel = rootLogger.getEffectiveLevel()
rootLogger.setLevel(logging.DEBUG)
self.hdlr = logging.StreamHandler(self.out)
fmt = logging.Formatter(logging.BASIC_FORMAT)
self.hdlr.setFormatter(fmt)
rootLogger.addHandler(self.hdlr)
self.lp = log.LogPublisher()
self.obs = log.PythonLoggingObserver()
self.lp.addObserver(self.obs.emit)
def tearDown(self):
rootLogger = logging.getLogger("")
rootLogger.removeHandler(self.hdlr)
rootLogger.setLevel(self.originalLevel)
logging.shutdown()
def test_singleString(self):
"""
Test simple output, and default log level.
"""
self.lp.msg("Hello, world.")
self.assertIn("Hello, world.", self.out.getvalue())
self.assertIn("INFO", self.out.getvalue())
def test_errorString(self):
"""
Test error output.
"""
self.lp.msg(failure=failure.Failure(ValueError("That is bad.")), isError=True)
self.assertIn("ERROR", self.out.getvalue())
def test_formatString(self):
"""
Test logging with a format.
"""
self.lp.msg(format="%(bar)s oo %(foo)s", bar="Hello", foo="world")
self.assertIn("Hello oo world", self.out.getvalue())
def test_customLevel(self):
"""
Test the logLevel keyword for customizing level used.
"""
self.lp.msg("Spam egg.", logLevel=logging.DEBUG)
self.assertIn("Spam egg.", self.out.getvalue())
self.assertIn("DEBUG", self.out.getvalue())
self.out.reset()
self.lp.msg("Foo bar.", logLevel=logging.WARNING)
self.assertIn("Foo bar.", self.out.getvalue())
self.assertIn("WARNING", self.out.getvalue())
def test_strangeEventDict(self):
"""
Verify that an event dictionary which is not an error and has an empty
message isn't recorded.
"""
self.lp.msg(message='', isError=False)
self.assertEquals(self.out.getvalue(), '')
class PythonLoggingIntegrationTestCase(unittest.TestCase):
"""
Test integration of python logging bridge.
"""
def test_startStopObserver(self):
"""
Test that start and stop methods of the observer actually register
and unregister to the log system.
"""
oldAddObserver = log.addObserver
oldRemoveObserver = log.removeObserver
l = []
try:
log.addObserver = l.append
log.removeObserver = l.remove
obs = log.PythonLoggingObserver()
obs.start()
self.assertEquals(l[0], obs.emit)
obs.stop()
self.assertEquals(len(l), 0)
finally:
log.addObserver = oldAddObserver
log.removeObserver = oldRemoveObserver
def test_inheritance(self):
"""
Test that we can inherit L{log.PythonLoggingObserver} and use super:
that's basically a validation that L{log.PythonLoggingObserver} is
new-style class.
"""
class MyObserver(log.PythonLoggingObserver):
def emit(self, eventDict):
super(MyObserver, self).emit(eventDict)
obs = MyObserver()
l = []
oldEmit = log.PythonLoggingObserver.emit
try:
log.PythonLoggingObserver.emit = l.append
obs.emit('foo')
self.assertEquals(len(l), 1)
finally:
log.PythonLoggingObserver.emit = oldEmit
class DefaultObserverTestCase(unittest.TestCase):
"""
Test the default observer.
"""
def test_failureLogger(self):
"""
The reason argument passed to log.err() appears in the report
generated by DefaultObserver.
"""
from StringIO import StringIO
obs = log.DefaultObserver()
obs.stderr = StringIO()
obs.start()
reason = "The reason."
log.err(Exception(), reason)
errors = self.flushLoggedErrors()
self.assertSubstring(reason, obs.stderr.getvalue())
self.assertEquals(len(errors), 1)
obs.stop()
| agpl-3.0 |
ubermichael/coppulpln | docs/source/conf.py | 1 | 9453 | # -*- coding: utf-8 -*-
#
# COPPUL PLN Staging Server documentation build configuration file, created by
# sphinx-quickstart on Tue Mar 1 15:55:03 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'COPPUL PLN Staging Server'
copyright = u'2016, Michael Joyce'
author = u'Michael Joyce'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'COPPULPLNStagingServerdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'COPPULPLNStagingServer.tex', u'COPPUL PLN Staging Server Documentation',
u'Michael Joyce', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'coppulplnstagingserver', u'COPPUL PLN Staging Server Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'COPPULPLNStagingServer', u'COPPUL PLN Staging Server Documentation',
author, 'COPPULPLNStagingServer', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| gpl-3.0 |
jonadiazz/spamFilterApp | venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.py | 920 | 8469 | from __future__ import absolute_import, division, unicode_literals
from xml.dom import minidom, Node
import weakref
from . import _base
from .. import constants
from ..constants import namespaces
from ..utils import moduleFactoryFactory
def getDomBuilder(DomImplementation):
Dom = DomImplementation
class AttrList(object):
def __init__(self, element):
self.element = element
def __iter__(self):
return list(self.element.attributes.items()).__iter__()
def __setitem__(self, name, value):
self.element.setAttribute(name, value)
def __len__(self):
return len(list(self.element.attributes.items()))
def items(self):
return [(item[0], item[1]) for item in
list(self.element.attributes.items())]
def keys(self):
return list(self.element.attributes.keys())
def __getitem__(self, name):
return self.element.getAttribute(name)
def __contains__(self, name):
if isinstance(name, tuple):
raise NotImplementedError
else:
return self.element.hasAttribute(name)
class NodeBuilder(_base.Node):
def __init__(self, element):
_base.Node.__init__(self, element.nodeName)
self.element = element
namespace = property(lambda self: hasattr(self.element, "namespaceURI")
and self.element.namespaceURI or None)
def appendChild(self, node):
node.parent = self
self.element.appendChild(node.element)
def insertText(self, data, insertBefore=None):
text = self.element.ownerDocument.createTextNode(data)
if insertBefore:
self.element.insertBefore(text, insertBefore.element)
else:
self.element.appendChild(text)
def insertBefore(self, node, refNode):
self.element.insertBefore(node.element, refNode.element)
node.parent = self
def removeChild(self, node):
if node.element.parentNode == self.element:
self.element.removeChild(node.element)
node.parent = None
def reparentChildren(self, newParent):
while self.element.hasChildNodes():
child = self.element.firstChild
self.element.removeChild(child)
newParent.element.appendChild(child)
self.childNodes = []
def getAttributes(self):
return AttrList(self.element)
def setAttributes(self, attributes):
if attributes:
for name, value in list(attributes.items()):
if isinstance(name, tuple):
if name[0] is not None:
qualifiedName = (name[0] + ":" + name[1])
else:
qualifiedName = name[1]
self.element.setAttributeNS(name[2], qualifiedName,
value)
else:
self.element.setAttribute(
name, value)
attributes = property(getAttributes, setAttributes)
def cloneNode(self):
return NodeBuilder(self.element.cloneNode(False))
def hasContent(self):
return self.element.hasChildNodes()
def getNameTuple(self):
if self.namespace is None:
return namespaces["html"], self.name
else:
return self.namespace, self.name
nameTuple = property(getNameTuple)
class TreeBuilder(_base.TreeBuilder):
def documentClass(self):
self.dom = Dom.getDOMImplementation().createDocument(None, None, None)
return weakref.proxy(self)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
domimpl = Dom.getDOMImplementation()
doctype = domimpl.createDocumentType(name, publicId, systemId)
self.document.appendChild(NodeBuilder(doctype))
if Dom == minidom:
doctype.ownerDocument = self.dom
def elementClass(self, name, namespace=None):
if namespace is None and self.defaultNamespace is None:
node = self.dom.createElement(name)
else:
node = self.dom.createElementNS(namespace, name)
return NodeBuilder(node)
def commentClass(self, data):
return NodeBuilder(self.dom.createComment(data))
def fragmentClass(self):
return NodeBuilder(self.dom.createDocumentFragment())
def appendChild(self, node):
self.dom.appendChild(node.element)
def testSerializer(self, element):
return testSerializer(element)
def getDocument(self):
return self.dom
def getFragment(self):
return _base.TreeBuilder.getFragment(self).element
def insertText(self, data, parent=None):
data = data
if parent != self:
_base.TreeBuilder.insertText(self, data, parent)
else:
# HACK: allow text nodes as children of the document node
if hasattr(self.dom, '_child_node_types'):
if Node.TEXT_NODE not in self.dom._child_node_types:
self.dom._child_node_types = list(self.dom._child_node_types)
self.dom._child_node_types.append(Node.TEXT_NODE)
self.dom.appendChild(self.dom.createTextNode(data))
implementation = DomImplementation
name = None
def testSerializer(element):
element.normalize()
rv = []
def serializeElement(element, indent=0):
if element.nodeType == Node.DOCUMENT_TYPE_NODE:
if element.name:
if element.publicId or element.systemId:
publicId = element.publicId or ""
systemId = element.systemId or ""
rv.append("""|%s<!DOCTYPE %s "%s" "%s">""" %
(' ' * indent, element.name, publicId, systemId))
else:
rv.append("|%s<!DOCTYPE %s>" % (' ' * indent, element.name))
else:
rv.append("|%s<!DOCTYPE >" % (' ' * indent,))
elif element.nodeType == Node.DOCUMENT_NODE:
rv.append("#document")
elif element.nodeType == Node.DOCUMENT_FRAGMENT_NODE:
rv.append("#document-fragment")
elif element.nodeType == Node.COMMENT_NODE:
rv.append("|%s<!-- %s -->" % (' ' * indent, element.nodeValue))
elif element.nodeType == Node.TEXT_NODE:
rv.append("|%s\"%s\"" % (' ' * indent, element.nodeValue))
else:
if (hasattr(element, "namespaceURI") and
element.namespaceURI is not None):
name = "%s %s" % (constants.prefixes[element.namespaceURI],
element.nodeName)
else:
name = element.nodeName
rv.append("|%s<%s>" % (' ' * indent, name))
if element.hasAttributes():
attributes = []
for i in range(len(element.attributes)):
attr = element.attributes.item(i)
name = attr.nodeName
value = attr.value
ns = attr.namespaceURI
if ns:
name = "%s %s" % (constants.prefixes[ns], attr.localName)
else:
name = attr.nodeName
attributes.append((name, value))
for name, value in sorted(attributes):
rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
indent += 2
for child in element.childNodes:
serializeElement(child, indent)
serializeElement(element, 0)
return "\n".join(rv)
return locals()
# The actual means to get a module!
getDomModule = moduleFactoryFactory(getDomBuilder)
| unlicense |
rane-hs/fabric-py3 | fabric/task_utils.py | 1 | 2825 | from fabric.utils import abort, indent
from fabric import state
import collections
# For attribute tomfoolery
class _Dict(dict):
pass
def _crawl(name, mapping):
"""
``name`` of ``'a.b.c'`` => ``mapping['a']['b']['c']``
"""
key, _, rest = name.partition('.')
value = mapping[key]
if not rest:
return value
return _crawl(rest, value)
def crawl(name, mapping):
try:
result = _crawl(name, mapping)
# Handle default tasks
if isinstance(result, _Dict):
if getattr(result, 'default', False):
result = result.default
# Ensure task modules w/ no default are treated as bad targets
else:
result = None
return result
except (KeyError, TypeError):
return None
def merge(hosts, roles, exclude, roledefs):
"""
Merge given host and role lists into one list of deduped hosts.
"""
# Abort if any roles don't exist
bad_roles = [x for x in roles if x not in roledefs]
if bad_roles:
abort("The following specified roles do not exist:\n%s" % (
indent(bad_roles)
))
# Coerce strings to one-item lists
if isinstance(hosts, str):
hosts = [hosts]
# Look up roles, turn into flat list of hosts
role_hosts = []
for role in roles:
value = roledefs[role]
# Handle dict style roledefs
if isinstance(value, dict):
value = value['hosts']
# Handle "lazy" roles (callables)
if isinstance(value, collections.Callable):
value = value()
role_hosts += value
# Strip whitespace from host strings.
cleaned_hosts = [x.strip() for x in list(hosts) + list(role_hosts)]
# Return deduped combo of hosts and role_hosts, preserving order within
# them (vs using set(), which may lose ordering) and skipping hosts to be
# excluded.
# But only if the user hasn't indicated they want this behavior disabled.
all_hosts = cleaned_hosts
if state.env.dedupe_hosts:
deduped_hosts = []
for host in cleaned_hosts:
if host not in deduped_hosts and host not in exclude:
deduped_hosts.append(host)
all_hosts = deduped_hosts
return all_hosts
def parse_kwargs(kwargs):
new_kwargs = {}
hosts = []
roles = []
exclude_hosts = []
for key, value in list(kwargs.items()):
if key == 'host':
hosts = [value]
elif key == 'hosts':
hosts = value
elif key == 'role':
roles = [value]
elif key == 'roles':
roles = value
elif key == 'exclude_hosts':
exclude_hosts = value
else:
new_kwargs[key] = value
return new_kwargs, hosts, roles, exclude_hosts
| bsd-2-clause |
jckhang/gensim | gensim/test/test_corpora_dictionary.py | 46 | 9161 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Unit tests for the `corpora.Dictionary` class.
"""
from collections import Mapping
import logging
import tempfile
import unittest
import os
import os.path
import scipy
import gensim
from gensim.corpora import Dictionary
from six import PY3
from six.moves import zip
# sample data files are located in the same folder
module_path = os.path.dirname(__file__)
def get_tmpfile(suffix):
return os.path.join(tempfile.gettempdir(), suffix)
class TestDictionary(unittest.TestCase):
def setUp(self):
self.texts = [
['human', 'interface', 'computer'],
['survey', 'user', 'computer', 'system', 'response', 'time'],
['eps', 'user', 'interface', 'system'],
['system', 'human', 'system', 'eps'],
['user', 'response', 'time'],
['trees'],
['graph', 'trees'],
['graph', 'minors', 'trees'],
['graph', 'minors', 'survey']]
def testDocFreqOneDoc(self):
texts = [['human', 'interface', 'computer']]
d = Dictionary(texts)
expected = {0: 1, 1: 1, 2: 1}
self.assertEqual(d.dfs, expected)
def testDocFreqAndToken2IdForSeveralDocsWithOneWord(self):
# two docs
texts = [['human'], ['human']]
d = Dictionary(texts)
expected = {0: 2}
self.assertEqual(d.dfs, expected)
# only one token (human) should exist
expected = {'human': 0}
self.assertEqual(d.token2id, expected)
# three docs
texts = [['human'], ['human'], ['human']]
d = Dictionary(texts)
expected = {0: 3}
self.assertEqual(d.dfs, expected)
# only one token (human) should exist
expected = {'human': 0}
self.assertEqual(d.token2id, expected)
# four docs
texts = [['human'], ['human'], ['human'], ['human']]
d = Dictionary(texts)
expected = {0: 4}
self.assertEqual(d.dfs, expected)
# only one token (human) should exist
expected = {'human': 0}
self.assertEqual(d.token2id, expected)
def testDocFreqForOneDocWithSeveralWord(self):
# two words
texts = [['human', 'cat']]
d = Dictionary(texts)
expected = {0: 1, 1: 1}
self.assertEqual(d.dfs, expected)
# three words
texts = [['human', 'cat', 'minors']]
d = Dictionary(texts)
expected = {0: 1, 1: 1, 2: 1}
self.assertEqual(d.dfs, expected)
def testBuild(self):
d = Dictionary(self.texts)
# Since we don't specify the order in which dictionaries are built,
# we cannot reliably test for the mapping; only the keys and values.
expected_keys = list(range(12))
expected_values = [2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3]
self.assertEqual(sorted(d.dfs.keys()), expected_keys)
self.assertEqual(sorted(d.dfs.values()), expected_values)
expected_keys = sorted(['computer', 'eps', 'graph', 'human',
'interface', 'minors', 'response', 'survey',
'system', 'time', 'trees', 'user'])
expected_values = list(range(12))
self.assertEqual(sorted(d.token2id.keys()), expected_keys)
self.assertEqual(sorted(d.token2id.values()), expected_values)
def testMerge(self):
d = Dictionary(self.texts)
f = Dictionary(self.texts[:3])
g = Dictionary(self.texts[3:])
f.merge_with(g)
self.assertEqual(sorted(d.token2id.keys()), sorted(f.token2id.keys()))
def testFilter(self):
d = Dictionary(self.texts)
d.filter_extremes(no_below=2, no_above=1.0, keep_n=4)
expected = {0: 3, 1: 3, 2: 3, 3: 3}
self.assertEqual(d.dfs, expected)
def testFilterTokens(self):
self.maxDiff = 10000
d = Dictionary(self.texts)
removed_word = d[0]
d.filter_tokens([0])
expected = {'computer': 0, 'eps': 8, 'graph': 10, 'human': 1,
'interface': 2, 'minors': 11, 'response': 3, 'survey': 4,
'system': 5, 'time': 6, 'trees': 9, 'user': 7}
del expected[removed_word]
self.assertEqual(sorted(d.token2id.keys()), sorted(expected.keys()))
expected[removed_word] = len(expected)
d.add_documents([[removed_word]])
self.assertEqual(sorted(d.token2id.keys()), sorted(expected.keys()))
def test_doc2bow(self):
d = Dictionary([["žluťoučký"], ["žluťoučký"]])
# pass a utf8 string
self.assertEqual(d.doc2bow(["žluťoučký"]), [(0, 1)])
# doc2bow must raise a TypeError if passed a string instead of array of strings by accident
self.assertRaises(TypeError, d.doc2bow, "žluťoučký")
# unicode must be converted to utf8
self.assertEqual(d.doc2bow([u'\u017elu\u0165ou\u010dk\xfd']), [(0, 1)])
def test_saveAsText_and_loadFromText(self):
"""`Dictionary` can be saved as textfile and loaded again from textfile. """
tmpf = get_tmpfile('dict_test.txt')
for sort_by_word in [True, False]:
d = Dictionary(self.texts)
d.save_as_text(tmpf, sort_by_word=sort_by_word)
self.assertTrue(os.path.exists(tmpf))
d_loaded = Dictionary.load_from_text(tmpf)
self.assertNotEqual(d_loaded, None)
self.assertEqual(d_loaded.token2id, d.token2id)
def test_from_corpus(self):
"""build `Dictionary` from an existing corpus"""
documents = ["Human machine interface for lab abc computer applications",
"A survey of user opinion of computer system response time",
"The EPS user interface management system",
"System and human system engineering testing of EPS",
"Relation of user perceived response time to error measurement",
"The generation of random binary unordered trees",
"The intersection graph of paths in trees",
"Graph minors IV Widths of trees and well quasi ordering",
"Graph minors A survey"]
stoplist = set('for a of the and to in'.split())
texts = [[word for word in document.lower().split() if word not in stoplist]
for document in documents]
# remove words that appear only once
all_tokens = sum(texts, [])
tokens_once = set(word for word in set(all_tokens) if all_tokens.count(word) == 1)
texts = [[word for word in text if word not in tokens_once]
for text in texts]
dictionary = Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
# Create dictionary from corpus without a token map
dictionary_from_corpus = Dictionary.from_corpus(corpus)
dict_token2id_vals = sorted(dictionary.token2id.values())
dict_from_corpus_vals = sorted(dictionary_from_corpus.token2id.values())
self.assertEqual(dict_token2id_vals, dict_from_corpus_vals)
self.assertEqual(dictionary.dfs, dictionary_from_corpus.dfs)
self.assertEqual(dictionary.num_docs, dictionary_from_corpus.num_docs)
self.assertEqual(dictionary.num_pos, dictionary_from_corpus.num_pos)
self.assertEqual(dictionary.num_nnz, dictionary_from_corpus.num_nnz)
# Create dictionary from corpus with an id=>token map
dictionary_from_corpus_2 = Dictionary.from_corpus(corpus, id2word=dictionary)
self.assertEqual(dictionary.token2id, dictionary_from_corpus_2.token2id)
self.assertEqual(dictionary.dfs, dictionary_from_corpus_2.dfs)
self.assertEqual(dictionary.num_docs, dictionary_from_corpus_2.num_docs)
self.assertEqual(dictionary.num_pos, dictionary_from_corpus_2.num_pos)
self.assertEqual(dictionary.num_nnz, dictionary_from_corpus_2.num_nnz)
# Ensure Sparse2Corpus is compatible with from_corpus
bow = gensim.matutils.Sparse2Corpus(scipy.sparse.rand(10, 100))
dictionary = Dictionary.from_corpus(bow)
self.assertEqual(dictionary.num_docs, 100)
def test_dict_interface(self):
"""Test Python 2 dict-like interface in both Python 2 and 3."""
d = Dictionary(self.texts)
self.assertTrue(isinstance(d, Mapping))
self.assertEqual(list(zip(d.keys(), d.values())), list(d.items()))
# Even in Py3, we want the iter* members.
self.assertEqual(list(d.items()), list(d.iteritems()))
self.assertEqual(list(d.keys()), list(d.iterkeys()))
self.assertEqual(list(d.values()), list(d.itervalues()))
# XXX Do we want list results from the dict members in Py3 too?
if not PY3:
self.assertTrue(isinstance(d.items(), list))
self.assertTrue(isinstance(d.keys(), list))
self.assertTrue(isinstance(d.values(), list))
#endclass TestDictionary
if __name__ == '__main__':
logging.basicConfig(level=logging.WARNING)
unittest.main()
| gpl-3.0 |
MounirMesselmeni/django | tests/custom_columns/tests.py | 30 | 4108 | from __future__ import unicode_literals
from django.core.exceptions import FieldError
from django.test import TestCase
from django.utils import six
from .models import Article, Author
class CustomColumnsTests(TestCase):
def setUp(self):
self.a1 = Author.objects.create(first_name="John", last_name="Smith")
self.a2 = Author.objects.create(first_name="Peter", last_name="Jones")
self.authors = [self.a1, self.a2]
self.article = Article.objects.create(headline="Django lets you build Web apps easily", primary_author=self.a1)
self.article.authors.set(self.authors)
def test_query_all_available_authors(self):
self.assertQuerysetEqual(
Author.objects.all(), [
"Peter Jones", "John Smith",
],
six.text_type
)
def test_get_first_name(self):
self.assertEqual(
Author.objects.get(first_name__exact="John"),
self.a1,
)
def test_filter_first_name(self):
self.assertQuerysetEqual(
Author.objects.filter(first_name__exact="John"), [
"John Smith",
],
six.text_type
)
def test_field_error(self):
self.assertRaises(
FieldError,
lambda: Author.objects.filter(firstname__exact="John")
)
def test_attribute_error(self):
with self.assertRaises(AttributeError):
self.a1.firstname
with self.assertRaises(AttributeError):
self.a1.last
def test_get_all_authors_for_an_article(self):
self.assertQuerysetEqual(
self.article.authors.all(), [
"Peter Jones",
"John Smith",
],
six.text_type
)
def test_get_all_articles_for_an_author(self):
self.assertQuerysetEqual(
self.a1.article_set.all(), [
"Django lets you build Web apps easily",
],
lambda a: a.headline
)
def test_get_author_m2m_relation(self):
self.assertQuerysetEqual(
self.article.authors.filter(last_name='Jones'), [
"Peter Jones"
],
six.text_type
)
def test_author_querying(self):
self.assertQuerysetEqual(
Author.objects.all().order_by('last_name'),
['<Author: Peter Jones>', '<Author: John Smith>']
)
def test_author_filtering(self):
self.assertQuerysetEqual(
Author.objects.filter(first_name__exact='John'),
['<Author: John Smith>']
)
def test_author_get(self):
self.assertEqual(self.a1, Author.objects.get(first_name__exact='John'))
def test_filter_on_nonexistent_field(self):
self.assertRaisesMessage(
FieldError,
"Cannot resolve keyword 'firstname' into field. Choices are: "
"Author_ID, article, first_name, last_name, primary_set",
Author.objects.filter,
firstname__exact='John'
)
def test_author_get_attributes(self):
a = Author.objects.get(last_name__exact='Smith')
self.assertEqual('John', a.first_name)
self.assertEqual('Smith', a.last_name)
self.assertRaisesMessage(
AttributeError,
"'Author' object has no attribute 'firstname'",
getattr,
a, 'firstname'
)
self.assertRaisesMessage(
AttributeError,
"'Author' object has no attribute 'last'",
getattr,
a, 'last'
)
def test_m2m_table(self):
self.assertQuerysetEqual(
self.article.authors.all().order_by('last_name'),
['<Author: Peter Jones>', '<Author: John Smith>']
)
self.assertQuerysetEqual(
self.a1.article_set.all(),
['<Article: Django lets you build Web apps easily>']
)
self.assertQuerysetEqual(
self.article.authors.filter(last_name='Jones'),
['<Author: Peter Jones>']
)
| bsd-3-clause |
Xeralux/tensorflow | tensorflow/examples/how_tos/reading_data/convert_to_records.py | 55 | 3282 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts MNIST data to TFRecords file format with Example protos."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.datasets import mnist
FLAGS = None
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def convert_to(data_set, name):
"""Converts a dataset to tfrecords."""
images = data_set.images
labels = data_set.labels
num_examples = data_set.num_examples
if images.shape[0] != num_examples:
raise ValueError('Images size %d does not match label size %d.' %
(images.shape[0], num_examples))
rows = images.shape[1]
cols = images.shape[2]
depth = images.shape[3]
filename = os.path.join(FLAGS.directory, name + '.tfrecords')
print('Writing', filename)
with tf.python_io.TFRecordWriter(filename) as writer:
for index in range(num_examples):
image_raw = images[index].tostring()
example = tf.train.Example(
features=tf.train.Features(
feature={
'height': _int64_feature(rows),
'width': _int64_feature(cols),
'depth': _int64_feature(depth),
'label': _int64_feature(int(labels[index])),
'image_raw': _bytes_feature(image_raw)
}))
writer.write(example.SerializeToString())
def main(unused_argv):
# Get the data.
data_sets = mnist.read_data_sets(FLAGS.directory,
dtype=tf.uint8,
reshape=False,
validation_size=FLAGS.validation_size)
# Convert to Examples and write the result to TFRecords.
convert_to(data_sets.train, 'train')
convert_to(data_sets.validation, 'validation')
convert_to(data_sets.test, 'test')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--directory',
type=str,
default='/tmp/data',
help='Directory to download data files and write the converted result'
)
parser.add_argument(
'--validation_size',
type=int,
default=5000,
help="""\
Number of examples to separate from the training data for the validation
set.\
"""
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
RonnyPfannschmidt/pip | src/pip/_vendor/urllib3/fields.py | 288 | 5943 | from __future__ import absolute_import
import email.utils
import mimetypes
from .packages import six
def guess_content_type(filename, default='application/octet-stream'):
"""
Guess the "Content-Type" of a file.
:param filename:
The filename to guess the "Content-Type" of using :mod:`mimetypes`.
:param default:
If no "Content-Type" can be guessed, default to `default`.
"""
if filename:
return mimetypes.guess_type(filename)[0] or default
return default
def format_header_param(name, value):
"""
Helper function to format and quote a single header parameter.
Particularly useful for header parameters which might contain
non-ASCII values, like file names. This follows RFC 2231, as
suggested by RFC 2388 Section 4.4.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
if not any(ch in value for ch in '"\\\r\n'):
result = '%s="%s"' % (name, value)
try:
result.encode('ascii')
except (UnicodeEncodeError, UnicodeDecodeError):
pass
else:
return result
if not six.PY3 and isinstance(value, six.text_type): # Python 2:
value = value.encode('utf-8')
value = email.utils.encode_rfc2231(value, 'utf-8')
value = '%s*=%s' % (name, value)
return value
class RequestField(object):
"""
A data container for request body parameters.
:param name:
The name of this request field.
:param data:
The data/value body.
:param filename:
An optional filename of the request field.
:param headers:
An optional dict-like object of headers to initially use for the field.
"""
def __init__(self, name, data, filename=None, headers=None):
self._name = name
self._filename = filename
self.data = data
self.headers = {}
if headers:
self.headers = dict(headers)
@classmethod
def from_tuples(cls, fieldname, value):
"""
A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
Supports constructing :class:`~urllib3.fields.RequestField` from
parameter of key/value strings AND key/filetuple. A filetuple is a
(filename, data, MIME type) tuple where the MIME type is optional.
For example::
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
Field names and filenames must be unicode.
"""
if isinstance(value, tuple):
if len(value) == 3:
filename, data, content_type = value
else:
filename, data = value
content_type = guess_content_type(filename)
else:
filename = None
content_type = None
data = value
request_param = cls(fieldname, data, filename=filename)
request_param.make_multipart(content_type=content_type)
return request_param
def _render_part(self, name, value):
"""
Overridable helper function to format a single header parameter.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
return format_header_param(name, value)
def _render_parts(self, header_parts):
"""
Helper function to format and quote a single header.
Useful for single headers that are composed of multiple items. E.g.,
'Content-Disposition' fields.
:param header_parts:
A sequence of (k, v) typles or a :class:`dict` of (k, v) to format
as `k1="v1"; k2="v2"; ...`.
"""
parts = []
iterable = header_parts
if isinstance(header_parts, dict):
iterable = header_parts.items()
for name, value in iterable:
if value is not None:
parts.append(self._render_part(name, value))
return '; '.join(parts)
def render_headers(self):
"""
Renders the headers for this request field.
"""
lines = []
sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']
for sort_key in sort_keys:
if self.headers.get(sort_key, False):
lines.append('%s: %s' % (sort_key, self.headers[sort_key]))
for header_name, header_value in self.headers.items():
if header_name not in sort_keys:
if header_value:
lines.append('%s: %s' % (header_name, header_value))
lines.append('\r\n')
return '\r\n'.join(lines)
def make_multipart(self, content_disposition=None, content_type=None,
content_location=None):
"""
Makes this request field into a multipart request field.
This method overrides "Content-Disposition", "Content-Type" and
"Content-Location" headers to the request parameter.
:param content_type:
The 'Content-Type' of the request body.
:param content_location:
The 'Content-Location' of the request body.
"""
self.headers['Content-Disposition'] = content_disposition or 'form-data'
self.headers['Content-Disposition'] += '; '.join([
'', self._render_parts(
(('name', self._name), ('filename', self._filename))
)
])
self.headers['Content-Type'] = content_type
self.headers['Content-Location'] = content_location
| mit |
nurey/disclosed | app2/search/tests.py | 1 | 2896 | # -*- coding: utf-8 -*-
from google.appengine.api import apiproxy_stub_map
from google.appengine.ext import db
from django.core.urlresolvers import resolve
from django.http import HttpRequest, QueryDict
from ragendja.testutils import ModelTestCase
from search.core import SearchIndexProperty
import base64
class Indexed(db.Model):
# Test normal and prefix index
one = db.StringProperty()
two = db.StringProperty()
one_two_index = SearchIndexProperty(('one', 'two'))
check = db.BooleanProperty()
# Test relation index
value = db.StringProperty()
value_index = SearchIndexProperty('value', integrate=('one', 'check'))
def run_tasks():
stub = apiproxy_stub_map.apiproxy.GetStub('taskqueue')
tasks = stub.GetTasks('default')
for task in tasks:
view, args, kwargs = resolve(task['url'])
request = HttpRequest()
request.POST = QueryDict(base64.b64decode(task['body']))
view(request)
stub.DeleteTask('default', task['name'])
class TestIndexed(ModelTestCase):
model = Indexed.value_index._relation_index_model
def setUp(self):
apiproxy_stub_map.apiproxy.GetStub('taskqueue').FlushQueue('default')
for i in range(3):
Indexed(one=u'OneOne%d' % i).put()
for i in range(3):
Indexed(one=u'one%d' % i, two='two%d' % i).put()
for i in range(3):
Indexed(one=(None, u'ÜÄÖ-+!#><|', 'blub')[i],
check=bool(i%2), value=u'value%d test-word' % i).put()
run_tasks()
def test_setup(self):
self.assertEqual(len(Indexed.one_two_index.search('one2')), 1)
self.assertEqual(len(Indexed.one_two_index.search('two')), 0)
self.assertEqual(len(Indexed.one_two_index.search('two1')), 1)
self.assertEqual(len(Indexed.value_index.search('word')), 3)
self.assertEqual(len(Indexed.value_index.search('test-word')), 3)
self.assertEqual(len(Indexed.value_index.search('value0',
filters=('check =', False))), 1)
self.assertEqual(len(Indexed.value_index.search('value1',
filters=('check =', True, 'one =', u'ÜÄÖ-+!#><|'))), 1)
self.assertEqual(len(Indexed.value_index.search('value2',
filters=('check =', False, 'one =', 'blub'))), 1)
def test_change(self):
value = Indexed.value_index.search('value0').get()
value.value = 'value1 test-word'
value.put()
value.one = 'shidori'
value.value = 'value3 rasengan/shidori'
value.put()
run_tasks()
self.assertEqual(len(Indexed.value_index.search('rasengan')), 1)
self.assertEqual(len(Indexed.value_index.search('value3')), 1)
value = Indexed.value_index.search('value3').get()
value.delete()
run_tasks()
self.assertEqual(len(Indexed.value_index.search('value3')), 0)
| mit |
cdgallahue/atomic-turbine | web/lib/python2.7/site-packages/wheel/signatures/__init__.py | 565 | 3779 | """
Create and verify jws-js format Ed25519 signatures.
"""
__all__ = [ 'sign', 'verify' ]
import json
from ..util import urlsafe_b64decode, urlsafe_b64encode, native, binary
ed25519ll = None
ALG = "Ed25519"
def get_ed25519ll():
"""Lazy import-and-test of ed25519 module"""
global ed25519ll
if not ed25519ll:
try:
import ed25519ll # fast (thousands / s)
except (ImportError, OSError): # pragma nocover
from . import ed25519py as ed25519ll # pure Python (hundreds / s)
test()
return ed25519ll
def sign(payload, keypair):
"""Return a JWS-JS format signature given a JSON-serializable payload and
an Ed25519 keypair."""
get_ed25519ll()
#
header = {
"alg": ALG,
"jwk": {
"kty": ALG, # alg -> kty in jwk-08.
"vk": native(urlsafe_b64encode(keypair.vk))
}
}
encoded_header = urlsafe_b64encode(binary(json.dumps(header, sort_keys=True)))
encoded_payload = urlsafe_b64encode(binary(json.dumps(payload, sort_keys=True)))
secured_input = b".".join((encoded_header, encoded_payload))
sig_msg = ed25519ll.crypto_sign(secured_input, keypair.sk)
signature = sig_msg[:ed25519ll.SIGNATUREBYTES]
encoded_signature = urlsafe_b64encode(signature)
return {"recipients":
[{"header":native(encoded_header),
"signature":native(encoded_signature)}],
"payload": native(encoded_payload)}
def assertTrue(condition, message=""):
if not condition:
raise ValueError(message)
def verify(jwsjs):
"""Return (decoded headers, payload) if all signatures in jwsjs are
consistent, else raise ValueError.
Caller must decide whether the keys are actually trusted."""
get_ed25519ll()
# XXX forbid duplicate keys in JSON input using object_pairs_hook (2.7+)
recipients = jwsjs["recipients"]
encoded_payload = binary(jwsjs["payload"])
headers = []
for recipient in recipients:
assertTrue(len(recipient) == 2, "Unknown recipient key {0}".format(recipient))
h = binary(recipient["header"])
s = binary(recipient["signature"])
header = json.loads(native(urlsafe_b64decode(h)))
assertTrue(header["alg"] == ALG,
"Unexpected algorithm {0}".format(header["alg"]))
if "alg" in header["jwk"] and not "kty" in header["jwk"]:
header["jwk"]["kty"] = header["jwk"]["alg"] # b/w for JWK < -08
assertTrue(header["jwk"]["kty"] == ALG, # true for Ed25519
"Unexpected key type {0}".format(header["jwk"]["kty"]))
vk = urlsafe_b64decode(binary(header["jwk"]["vk"]))
secured_input = b".".join((h, encoded_payload))
sig = urlsafe_b64decode(s)
sig_msg = sig+secured_input
verified_input = native(ed25519ll.crypto_sign_open(sig_msg, vk))
verified_header, verified_payload = verified_input.split('.')
verified_header = binary(verified_header)
decoded_header = native(urlsafe_b64decode(verified_header))
headers.append(json.loads(decoded_header))
verified_payload = binary(verified_payload)
# only return header, payload that have passed through the crypto library.
payload = json.loads(native(urlsafe_b64decode(verified_payload)))
return headers, payload
def test():
kp = ed25519ll.crypto_sign_keypair()
payload = {'test': 'onstartup'}
jwsjs = json.loads(json.dumps(sign(payload, kp)))
verify(jwsjs)
jwsjs['payload'] += 'x'
try:
verify(jwsjs)
except ValueError:
pass
else: # pragma no cover
raise RuntimeError("No error from bad wheel.signatures payload.")
| mit |
PredictiveScienceLab/GPy | GPy/likelihoods/gamma.py | 15 | 5833 | # Copyright (c) 2012 - 2014, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from scipy import stats,special
import scipy as sp
from ..core.parameterization import Param
from . import link_functions
from .likelihood import Likelihood
class Gamma(Likelihood):
"""
Gamma likelihood
.. math::
p(y_{i}|\\lambda(f_{i})) = \\frac{\\beta^{\\alpha_{i}}}{\\Gamma(\\alpha_{i})}y_{i}^{\\alpha_{i}-1}e^{-\\beta y_{i}}\\\\
\\alpha_{i} = \\beta y_{i}
"""
def __init__(self,gp_link=None,beta=1.):
if gp_link is None:
gp_link = link_functions.Log()
super(Gamma, self).__init__(gp_link, 'Gamma')
self.beta = Param('beta', beta)
self.link_parameter(self.beta)
self.beta.fix()#TODO: gradients!
def pdf_link(self, link_f, y, Y_metadata=None):
"""
Likelihood function given link(f)
.. math::
p(y_{i}|\\lambda(f_{i})) = \\frac{\\beta^{\\alpha_{i}}}{\\Gamma(\\alpha_{i})}y_{i}^{\\alpha_{i}-1}e^{-\\beta y_{i}}\\\\
\\alpha_{i} = \\beta y_{i}
:param link_f: latent variables link(f)
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in poisson distribution
:returns: likelihood evaluated for this point
:rtype: float
"""
assert np.atleast_1d(link_f).shape == np.atleast_1d(y).shape
#return stats.gamma.pdf(obs,a = self.gp_link.transf(gp)/self.variance,scale=self.variance)
alpha = link_f*self.beta
objective = (y**(alpha - 1.) * np.exp(-self.beta*y) * self.beta**alpha)/ special.gamma(alpha)
return np.exp(np.sum(np.log(objective)))
def logpdf_link(self, link_f, y, Y_metadata=None):
"""
Log Likelihood Function given link(f)
.. math::
\\ln p(y_{i}|\lambda(f_{i})) = \\alpha_{i}\\log \\beta - \\log \\Gamma(\\alpha_{i}) + (\\alpha_{i} - 1)\\log y_{i} - \\beta y_{i}\\\\
\\alpha_{i} = \\beta y_{i}
:param link_f: latent variables (link(f))
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in poisson distribution
:returns: likelihood evaluated for this point
:rtype: float
"""
#alpha = self.gp_link.transf(gp)*self.beta
#return (1. - alpha)*np.log(obs) + self.beta*obs - alpha * np.log(self.beta) + np.log(special.gamma(alpha))
alpha = link_f*self.beta
log_objective = alpha*np.log(self.beta) - np.log(special.gamma(alpha)) + (alpha - 1)*np.log(y) - self.beta*y
return log_objective
def dlogpdf_dlink(self, link_f, y, Y_metadata=None):
"""
Gradient of the log likelihood function at y, given link(f) w.r.t link(f)
.. math::
\\frac{d \\ln p(y_{i}|\\lambda(f_{i}))}{d\\lambda(f)} = \\beta (\\log \\beta y_{i}) - \\Psi(\\alpha_{i})\\beta\\\\
\\alpha_{i} = \\beta y_{i}
:param link_f: latent variables (f)
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in gamma distribution
:returns: gradient of likelihood evaluated at points
:rtype: Nx1 array
"""
grad = self.beta*np.log(self.beta*y) - special.psi(self.beta*link_f)*self.beta
#old
#return -self.gp_link.dtransf_df(gp)*self.beta*np.log(obs) + special.psi(self.gp_link.transf(gp)*self.beta) * self.gp_link.dtransf_df(gp)*self.beta
return grad
def d2logpdf_dlink2(self, link_f, y, Y_metadata=None):
"""
Hessian at y, given link(f), w.r.t link(f)
i.e. second derivative logpdf at y given link(f_i) and link(f_j) w.r.t link(f_i) and link(f_j)
The hessian will be 0 unless i == j
.. math::
\\frac{d^{2} \\ln p(y_{i}|\lambda(f_{i}))}{d^{2}\\lambda(f)} = -\\beta^{2}\\frac{d\\Psi(\\alpha_{i})}{d\\alpha_{i}}\\\\
\\alpha_{i} = \\beta y_{i}
:param link_f: latent variables link(f)
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in gamma distribution
:returns: Diagonal of hessian matrix (second derivative of likelihood evaluated at points f)
:rtype: Nx1 array
.. Note::
Will return diagonal of hessian, since every where else it is 0, as the likelihood factorizes over cases
(the distribution for y_i depends only on link(f_i) not on link(f_(j!=i))
"""
hess = -special.polygamma(1, self.beta*link_f)*(self.beta**2)
#old
#return -self.gp_link.d2transf_df2(gp)*self.beta*np.log(obs) + special.polygamma(1,self.gp_link.transf(gp)*self.beta)*(self.gp_link.dtransf_df(gp)*self.beta)**2 + special.psi(self.gp_link.transf(gp)*self.beta)*self.gp_link.d2transf_df2(gp)*self.beta
return hess
def d3logpdf_dlink3(self, link_f, y, Y_metadata=None):
"""
Third order derivative log-likelihood function at y given link(f) w.r.t link(f)
.. math::
\\frac{d^{3} \\ln p(y_{i}|\lambda(f_{i}))}{d^{3}\\lambda(f)} = -\\beta^{3}\\frac{d^{2}\\Psi(\\alpha_{i})}{d\\alpha_{i}}\\\\
\\alpha_{i} = \\beta y_{i}
:param link_f: latent variables link(f)
:type link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata which is not used in gamma distribution
:returns: third derivative of likelihood evaluated at points f
:rtype: Nx1 array
"""
d3lik_dlink3 = -special.polygamma(2, self.beta*link_f)*(self.beta**3)
return d3lik_dlink3
| bsd-3-clause |
ajaniv/django-core-utils | django_core_utils/forms.py | 1 | 4362 | """
.. module:: django_core_utils.forms
:synopsis: django_core_utils form utilities.
django_core_utils form utilities.
"""
from __future__ import absolute_import
from django.core import validators
from django import forms
from django.contrib.admin.widgets import FilteredSelectMultiple
from django.contrib.auth.models import Group, User
from python_core_utils.core import dict_merge
from . import models
from . import text
from . import fields
class VersionedModelAdminForm(forms.ModelForm):
"""Versioned model admin form class.
"""
class Meta:
"""Meta class declaration."""
model = models.VersionedModel
labels = text.versioned_model_labels
help_texts = text.versioned_model_help_texts
fields = '__all__'
@classmethod
def labels(clasz):
return clasz.Meta.labels
@classmethod
def help_texts(clasz):
return clasz.help_texts
def _update_group(self, group, set_name, field_name, commit):
"""
Utility method for updating many-to-many model field
Allows grouping of elements to manage which elements
are in the group
"""
if commit:
setattr(group, set_name,
self.cleaned_data[field_name])
else:
old_save_m2m = self.save_m2m
def new_save_m2m():
old_save_m2m()
setattr(group, set_name, self.cleaned_data[field_name])
self.save_m2m = new_save_m2m
return group
class BasedNamedModelAdminForm(VersionedModelAdminForm):
"""Base named model admin form class.
"""
class Meta(VersionedModelAdminForm.Meta):
"""Meta class declaration."""
fields = '__all__'
widgets = {
'description': forms.Textarea(
attrs={'rows': 3, 'cols': 40})
}
labels = dict_merge(
VersionedModelAdminForm.Meta.labels,
text.named_model_labels)
help_texts = dict_merge(
VersionedModelAdminForm.Meta.help_texts,
text.named_model_help_texts)
class NamedModelAdminForm(BasedNamedModelAdminForm):
"""Named model admin form class.
"""
class Meta(BasedNamedModelAdminForm.Meta):
"""Meta class declaration."""
model = models.NamedModel
class OptionalNamedModelAdminForm(BasedNamedModelAdminForm):
"""Optional named model admin form class.
"""
class Meta(BasedNamedModelAdminForm.Meta):
"""Meta class declaration."""
model = models.OptionalNamedModel
class GroupAdminForm(forms.ModelForm):
"""
Admin form with editable user list
"""
users = forms.ModelMultipleChoiceField(
queryset=User.objects.all(),
widget=FilteredSelectMultiple('Users', False),
required=False)
class Meta:
"""Meta class declaration."""
model = Group
fields = '__all__'
def __init__(self, *args, **kwargs):
instance = kwargs.get('instance', None)
if instance is not None:
initial = kwargs.get('initial', {})
initial['users'] = instance.user_set.all()
kwargs['initial'] = initial
super(GroupAdminForm, self).__init__(*args, **kwargs)
def save(self, commit=True):
group = super(GroupAdminForm, self).save(commit=commit)
if commit:
group.user_set = self.cleaned_data['users']
else:
old_save_m2m = self.save_m2m
def new_save_m2m():
old_save_m2m()
group.user_set = self.cleaned_data['users']
self.save_m2m = new_save_m2m
return group
class PrioritizedModelAdminForm(VersionedModelAdminForm):
"""Prioritized model admin form class.
"""
class Meta(VersionedModelAdminForm.Meta):
"""Meta class declaration."""
models = models.PrioritizedModel
labels = dict_merge(
VersionedModelAdminForm.Meta.labels,
text.prioritized_model_labels)
help_texts = dict_merge(
VersionedModelAdminForm.Meta.help_texts,
text.prioritized_model_help_texts)
fields = '__all__'
class InstantMessagingField(forms.URLField):
"""Instant messaging forms field class."""
default_validators = [validators.URLValidator(schemes=fields.im_schemes)]
| mit |
apache/incubator-airflow | airflow/providers/amazon/aws/log/s3_task_handler.py | 6 | 8006 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from cached_property import cached_property
from airflow.configuration import conf
from airflow.utils.log.file_task_handler import FileTaskHandler
from airflow.utils.log.logging_mixin import LoggingMixin
class S3TaskHandler(FileTaskHandler, LoggingMixin):
"""
S3TaskHandler is a python log handler that handles and reads
task instance logs. It extends airflow FileTaskHandler and
uploads to and reads from S3 remote storage.
"""
def __init__(self, base_log_folder: str, s3_log_folder: str, filename_template: str):
super().__init__(base_log_folder, filename_template)
self.remote_base = s3_log_folder
self.log_relative_path = ''
self._hook = None
self.closed = False
self.upload_on_close = True
@cached_property
def hook(self):
"""Returns S3Hook."""
remote_conn_id = conf.get('logging', 'REMOTE_LOG_CONN_ID')
try:
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
return S3Hook(remote_conn_id)
except Exception: # pylint: disable=broad-except
self.log.exception(
'Could not create an S3Hook with connection id "%s". '
'Please make sure that airflow[aws] is installed and '
'the S3 connection exists.',
remote_conn_id,
)
def set_context(self, ti):
super().set_context(ti)
# Local location and remote location is needed to open and
# upload local log file to S3 remote storage.
self.log_relative_path = self._render_filename(ti, ti.try_number)
self.upload_on_close = not ti.raw
# Clear the file first so that duplicate data is not uploaded
# when re-using the same path (e.g. with rescheduled sensors)
if self.upload_on_close:
with open(self.handler.baseFilename, 'w'):
pass
def close(self):
"""Close and upload local log file to remote storage S3."""
# When application exit, system shuts down all handlers by
# calling close method. Here we check if logger is already
# closed to prevent uploading the log to remote storage multiple
# times when `logging.shutdown` is called.
if self.closed:
return
super().close()
if not self.upload_on_close:
return
local_loc = os.path.join(self.local_base, self.log_relative_path)
remote_loc = os.path.join(self.remote_base, self.log_relative_path)
if os.path.exists(local_loc):
# read log and remove old logs to get just the latest additions
with open(local_loc) as logfile:
log = logfile.read()
self.s3_write(log, remote_loc)
# Mark closed so we don't double write if close is called twice
self.closed = True
def _read(self, ti, try_number, metadata=None):
"""
Read logs of given task instance and try_number from S3 remote storage.
If failed, read the log from task instance host machine.
:param ti: task instance object
:param try_number: task instance try_number to read logs from
:param metadata: log metadata,
can be used for steaming log reading and auto-tailing.
"""
# Explicitly getting log relative path is necessary as the given
# task instance might be different than task instance passed in
# in set_context method.
log_relative_path = self._render_filename(ti, try_number)
remote_loc = os.path.join(self.remote_base, log_relative_path)
log_exists = False
log = ""
try:
log_exists = self.s3_log_exists(remote_loc)
except Exception as error: # pylint: disable=broad-except
self.log.exception(error)
log = '*** Failed to verify remote log exists {}.\n{}\n'.format(remote_loc, str(error))
if log_exists:
# If S3 remote file exists, we do not fetch logs from task instance
# local machine even if there are errors reading remote logs, as
# returned remote_log will contain error messages.
remote_log = self.s3_read(remote_loc, return_error=True)
log = f'*** Reading remote log from {remote_loc}.\n{remote_log}\n'
return log, {'end_of_log': True}
else:
log += '*** Falling back to local log\n'
local_log, metadata = super()._read(ti, try_number)
return log + local_log, metadata
def s3_log_exists(self, remote_log_location: str) -> bool:
"""
Check if remote_log_location exists in remote storage
:param remote_log_location: log's location in remote storage
:type remote_log_location: str
:return: True if location exists else False
"""
return self.hook.check_for_key(remote_log_location)
def s3_read(self, remote_log_location: str, return_error: bool = False) -> str:
"""
Returns the log found at the remote_log_location. Returns '' if no
logs are found or there is an error.
:param remote_log_location: the log's location in remote storage
:type remote_log_location: str (path)
:param return_error: if True, returns a string error message if an
error occurs. Otherwise returns '' when an error occurs.
:type return_error: bool
:return: the log found at the remote_log_location
"""
try:
return self.hook.read_key(remote_log_location)
except Exception as error: # pylint: disable=broad-except
msg = f'Could not read logs from {remote_log_location} with error: {error}'
self.log.exception(msg)
# return error if needed
if return_error:
return msg
return ''
def s3_write(self, log: str, remote_log_location: str, append: bool = True):
"""
Writes the log to the remote_log_location. Fails silently if no hook
was created.
:param log: the log to write to the remote_log_location
:type log: str
:param remote_log_location: the log's location in remote storage
:type remote_log_location: str (path)
:param append: if False, any existing log file is overwritten. If True,
the new log is appended to any existing logs.
:type append: bool
"""
try:
if append and self.s3_log_exists(remote_log_location):
old_log = self.s3_read(remote_log_location)
log = '\n'.join([old_log, log]) if old_log else log
except Exception as error: # pylint: disable=broad-except
self.log.exception('Could not verify previous log to append: %s', str(error))
try:
self.hook.load_string(
log,
key=remote_log_location,
replace=True,
encrypt=conf.getboolean('logging', 'ENCRYPT_S3_LOGS'),
)
except Exception: # pylint: disable=broad-except
self.log.exception('Could not write logs to %s', remote_log_location)
| apache-2.0 |
nordri/check_domains | lib/python2.7/site-packages/django/forms/formsets.py | 3 | 17459 | from __future__ import unicode_literals
from django.core.exceptions import ValidationError
from django.forms import Form
from django.forms.fields import IntegerField, BooleanField
from django.forms.utils import ErrorList
from django.forms.widgets import HiddenInput
from django.utils.encoding import python_2_unicode_compatible
from django.utils.functional import cached_property
from django.utils.safestring import mark_safe
from django.utils import six
from django.utils.six.moves import xrange
from django.utils.translation import ungettext, ugettext as _
__all__ = ('BaseFormSet', 'formset_factory', 'all_valid')
# special field names
TOTAL_FORM_COUNT = 'TOTAL_FORMS'
INITIAL_FORM_COUNT = 'INITIAL_FORMS'
MIN_NUM_FORM_COUNT = 'MIN_NUM_FORMS'
MAX_NUM_FORM_COUNT = 'MAX_NUM_FORMS'
ORDERING_FIELD_NAME = 'ORDER'
DELETION_FIELD_NAME = 'DELETE'
# default minimum number of forms in a formset
DEFAULT_MIN_NUM = 0
# default maximum number of forms in a formset, to prevent memory exhaustion
DEFAULT_MAX_NUM = 1000
class ManagementForm(Form):
"""
``ManagementForm`` is used to keep track of how many form instances
are displayed on the page. If adding new forms via javascript, you should
increment the count field of this form as well.
"""
def __init__(self, *args, **kwargs):
self.base_fields[TOTAL_FORM_COUNT] = IntegerField(widget=HiddenInput)
self.base_fields[INITIAL_FORM_COUNT] = IntegerField(widget=HiddenInput)
# MIN_NUM_FORM_COUNT and MAX_NUM_FORM_COUNT are output with the rest of
# the management form, but only for the convenience of client-side
# code. The POST value of them returned from the client is not checked.
self.base_fields[MIN_NUM_FORM_COUNT] = IntegerField(required=False, widget=HiddenInput)
self.base_fields[MAX_NUM_FORM_COUNT] = IntegerField(required=False, widget=HiddenInput)
super(ManagementForm, self).__init__(*args, **kwargs)
@python_2_unicode_compatible
class BaseFormSet(object):
"""
A collection of instances of the same Form class.
"""
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList):
self.is_bound = data is not None or files is not None
self.prefix = prefix or self.get_default_prefix()
self.auto_id = auto_id
self.data = data or {}
self.files = files or {}
self.initial = initial
self.error_class = error_class
self._errors = None
self._non_form_errors = None
def __str__(self):
return self.as_table()
def __iter__(self):
"""Yields the forms in the order they should be rendered"""
return iter(self.forms)
def __getitem__(self, index):
"""Returns the form at the given index, based on the rendering order"""
return self.forms[index]
def __len__(self):
return len(self.forms)
def __bool__(self):
"""All formsets have a management form which is not included in the length"""
return True
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
@property
def management_form(self):
"""Returns the ManagementForm instance for this FormSet."""
if self.is_bound:
form = ManagementForm(self.data, auto_id=self.auto_id, prefix=self.prefix)
if not form.is_valid():
raise ValidationError(
_('ManagementForm data is missing or has been tampered with'),
code='missing_management_form',
)
else:
form = ManagementForm(auto_id=self.auto_id, prefix=self.prefix, initial={
TOTAL_FORM_COUNT: self.total_form_count(),
INITIAL_FORM_COUNT: self.initial_form_count(),
MIN_NUM_FORM_COUNT: self.min_num,
MAX_NUM_FORM_COUNT: self.max_num
})
return form
def total_form_count(self):
"""Returns the total number of forms in this FormSet."""
if self.is_bound:
# return absolute_max if it is lower than the actual total form
# count in the data; this is DoS protection to prevent clients
# from forcing the server to instantiate arbitrary numbers of
# forms
return min(self.management_form.cleaned_data[TOTAL_FORM_COUNT], self.absolute_max)
else:
initial_forms = self.initial_form_count()
total_forms = initial_forms + self.extra
# Allow all existing related objects/inlines to be displayed,
# but don't allow extra beyond max_num.
if initial_forms > self.max_num >= 0:
total_forms = initial_forms
elif total_forms > self.max_num >= 0:
total_forms = self.max_num
return total_forms
def initial_form_count(self):
"""Returns the number of forms that are required in this FormSet."""
if self.is_bound:
return self.management_form.cleaned_data[INITIAL_FORM_COUNT]
else:
# Use the length of the initial data if it's there, 0 otherwise.
initial_forms = len(self.initial) if self.initial else 0
return initial_forms
@cached_property
def forms(self):
"""
Instantiate forms at first property access.
"""
# DoS protection is included in total_form_count()
forms = [self._construct_form(i) for i in xrange(self.total_form_count())]
return forms
def _construct_form(self, i, **kwargs):
"""
Instantiates and returns the i-th form instance in a formset.
"""
defaults = {
'auto_id': self.auto_id,
'prefix': self.add_prefix(i),
'error_class': self.error_class,
}
if self.is_bound:
defaults['data'] = self.data
defaults['files'] = self.files
if self.initial and 'initial' not in kwargs:
try:
defaults['initial'] = self.initial[i]
except IndexError:
pass
# Allow extra forms to be empty.
if i >= self.initial_form_count():
defaults['empty_permitted'] = True
defaults.update(kwargs)
form = self.form(**defaults)
self.add_fields(form, i)
return form
@property
def initial_forms(self):
"""Return a list of all the initial forms in this formset."""
return self.forms[:self.initial_form_count()]
@property
def extra_forms(self):
"""Return a list of all the extra forms in this formset."""
return self.forms[self.initial_form_count():]
@property
def empty_form(self):
form = self.form(
auto_id=self.auto_id,
prefix=self.add_prefix('__prefix__'),
empty_permitted=True,
)
self.add_fields(form, None)
return form
@property
def cleaned_data(self):
"""
Returns a list of form.cleaned_data dicts for every form in self.forms.
"""
if not self.is_valid():
raise AttributeError("'%s' object has no attribute 'cleaned_data'" % self.__class__.__name__)
return [form.cleaned_data for form in self.forms]
@property
def deleted_forms(self):
"""
Returns a list of forms that have been marked for deletion.
"""
if not self.is_valid() or not self.can_delete:
return []
# construct _deleted_form_indexes which is just a list of form indexes
# that have had their deletion widget set to True
if not hasattr(self, '_deleted_form_indexes'):
self._deleted_form_indexes = []
for i in range(0, self.total_form_count()):
form = self.forms[i]
# if this is an extra form and hasn't changed, don't consider it
if i >= self.initial_form_count() and not form.has_changed():
continue
if self._should_delete_form(form):
self._deleted_form_indexes.append(i)
return [self.forms[i] for i in self._deleted_form_indexes]
@property
def ordered_forms(self):
"""
Returns a list of form in the order specified by the incoming data.
Raises an AttributeError if ordering is not allowed.
"""
if not self.is_valid() or not self.can_order:
raise AttributeError("'%s' object has no attribute 'ordered_forms'" % self.__class__.__name__)
# Construct _ordering, which is a list of (form_index, order_field_value)
# tuples. After constructing this list, we'll sort it by order_field_value
# so we have a way to get to the form indexes in the order specified
# by the form data.
if not hasattr(self, '_ordering'):
self._ordering = []
for i in range(0, self.total_form_count()):
form = self.forms[i]
# if this is an extra form and hasn't changed, don't consider it
if i >= self.initial_form_count() and not form.has_changed():
continue
# don't add data marked for deletion to self.ordered_data
if self.can_delete and self._should_delete_form(form):
continue
self._ordering.append((i, form.cleaned_data[ORDERING_FIELD_NAME]))
# After we're done populating self._ordering, sort it.
# A sort function to order things numerically ascending, but
# None should be sorted below anything else. Allowing None as
# a comparison value makes it so we can leave ordering fields
# blank.
def compare_ordering_key(k):
if k[1] is None:
return (1, 0) # +infinity, larger than any number
return (0, k[1])
self._ordering.sort(key=compare_ordering_key)
# Return a list of form.cleaned_data dicts in the order specified by
# the form data.
return [self.forms[i[0]] for i in self._ordering]
@classmethod
def get_default_prefix(cls):
return 'form'
def non_form_errors(self):
"""
Returns an ErrorList of errors that aren't associated with a particular
form -- i.e., from formset.clean(). Returns an empty ErrorList if there
are none.
"""
if self._non_form_errors is None:
self.full_clean()
return self._non_form_errors
@property
def errors(self):
"""
Returns a list of form.errors for every form in self.forms.
"""
if self._errors is None:
self.full_clean()
return self._errors
def total_error_count(self):
"""
Returns the number of errors across all forms in the formset.
"""
return len(self.non_form_errors()) +\
sum(len(form_errors) for form_errors in self.errors)
def _should_delete_form(self, form):
"""
Returns whether or not the form was marked for deletion.
"""
return form.cleaned_data.get(DELETION_FIELD_NAME, False)
def is_valid(self):
"""
Returns True if every form in self.forms is valid.
"""
if not self.is_bound:
return False
# We loop over every form.errors here rather than short circuiting on the
# first failure to make sure validation gets triggered for every form.
forms_valid = True
# This triggers a full clean.
self.errors
for i in range(0, self.total_form_count()):
form = self.forms[i]
if self.can_delete:
if self._should_delete_form(form):
# This form is going to be deleted so any of its errors
# should not cause the entire formset to be invalid.
continue
forms_valid &= form.is_valid()
return forms_valid and not bool(self.non_form_errors())
def full_clean(self):
"""
Cleans all of self.data and populates self._errors and
self._non_form_errors.
"""
self._errors = []
self._non_form_errors = self.error_class()
if not self.is_bound: # Stop further processing.
return
for i in range(0, self.total_form_count()):
form = self.forms[i]
self._errors.append(form.errors)
try:
if (self.validate_max and
self.total_form_count() - len(self.deleted_forms) > self.max_num) or \
self.management_form.cleaned_data[TOTAL_FORM_COUNT] > self.absolute_max:
raise ValidationError(ungettext(
"Please submit %d or fewer forms.",
"Please submit %d or fewer forms.", self.max_num) % self.max_num,
code='too_many_forms',
)
if (self.validate_min and
self.total_form_count() - len(self.deleted_forms) < self.min_num):
raise ValidationError(ungettext(
"Please submit %d or more forms.",
"Please submit %d or more forms.", self.min_num) % self.min_num,
code='too_few_forms')
# Give self.clean() a chance to do cross-form validation.
self.clean()
except ValidationError as e:
self._non_form_errors = self.error_class(e.error_list)
def clean(self):
"""
Hook for doing any extra formset-wide cleaning after Form.clean() has
been called on every form. Any ValidationError raised by this method
will not be associated with a particular form; it will be accessible
via formset.non_form_errors()
"""
pass
def has_changed(self):
"""
Returns true if data in any form differs from initial.
"""
return any(form.has_changed() for form in self)
def add_fields(self, form, index):
"""A hook for adding extra fields on to each form instance."""
if self.can_order:
# Only pre-fill the ordering field for initial forms.
if index is not None and index < self.initial_form_count():
form.fields[ORDERING_FIELD_NAME] = IntegerField(label=_('Order'), initial=index + 1, required=False)
else:
form.fields[ORDERING_FIELD_NAME] = IntegerField(label=_('Order'), required=False)
if self.can_delete:
form.fields[DELETION_FIELD_NAME] = BooleanField(label=_('Delete'), required=False)
def add_prefix(self, index):
return '%s-%s' % (self.prefix, index)
def is_multipart(self):
"""
Returns True if the formset needs to be multipart, i.e. it
has FileInput. Otherwise, False.
"""
if self.forms:
return self.forms[0].is_multipart()
else:
return self.empty_form.is_multipart()
@property
def media(self):
# All the forms on a FormSet are the same, so you only need to
# interrogate the first form for media.
if self.forms:
return self.forms[0].media
else:
return self.empty_form.media
def as_table(self):
"Returns this formset rendered as HTML <tr>s -- excluding the <table></table>."
# XXX: there is no semantic division between forms here, there
# probably should be. It might make sense to render each form as a
# table row with each field as a td.
forms = ' '.join(form.as_table() for form in self)
return mark_safe('\n'.join([six.text_type(self.management_form), forms]))
def as_p(self):
"Returns this formset rendered as HTML <p>s."
forms = ' '.join(form.as_p() for form in self)
return mark_safe('\n'.join([six.text_type(self.management_form), forms]))
def as_ul(self):
"Returns this formset rendered as HTML <li>s."
forms = ' '.join(form.as_ul() for form in self)
return mark_safe('\n'.join([six.text_type(self.management_form), forms]))
def formset_factory(form, formset=BaseFormSet, extra=1, can_order=False,
can_delete=False, max_num=None, validate_max=False,
min_num=None, validate_min=False):
"""Return a FormSet for the given form class."""
if min_num is None:
min_num = DEFAULT_MIN_NUM
if max_num is None:
max_num = DEFAULT_MAX_NUM
# hard limit on forms instantiated, to prevent memory-exhaustion attacks
# limit is simply max_num + DEFAULT_MAX_NUM (which is 2*DEFAULT_MAX_NUM
# if max_num is None in the first place)
absolute_max = max_num + DEFAULT_MAX_NUM
extra += min_num
attrs = {'form': form, 'extra': extra,
'can_order': can_order, 'can_delete': can_delete,
'min_num': min_num, 'max_num': max_num,
'absolute_max': absolute_max, 'validate_min': validate_min,
'validate_max': validate_max}
return type(form.__name__ + str('FormSet'), (formset,), attrs)
def all_valid(formsets):
"""Returns true if every formset in formsets is valid."""
valid = True
for formset in formsets:
if not formset.is_valid():
valid = False
return valid
| gpl-3.0 |
jfbelisle/triosante | node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/easy_xml.py | 1558 | 4945 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import os
def XmlToString(content, encoding='utf-8', pretty=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Visual Studio files have a lot of pre-defined structures. This function makes
it easy to represent these structures as Python data structures, instead of
having to create a lot of function calls.
Each XML element of the content is represented as a list composed of:
1. The name of the element, a string,
2. The attributes of the element, a dictionary (optional), and
3+. The content of the element, if any. Strings are simple text nodes and
lists are child elements.
Example 1:
<test/>
becomes
['test']
Example 2:
<myelement a='value1' b='value2'>
<childtype>This is</childtype>
<childtype>it!</childtype>
</myelement>
becomes
['myelement', {'a':'value1', 'b':'value2'},
['childtype', 'This is'],
['childtype', 'it!'],
]
Args:
content: The structured content to be converted.
encoding: The encoding to report on the first XML line.
pretty: True if we want pretty printing with indents and new lines.
Returns:
The XML content as a string.
"""
# We create a huge list of all the elements of the file.
xml_parts = ['<?xml version="1.0" encoding="%s"?>' % encoding]
if pretty:
xml_parts.append('\n')
_ConstructContentList(xml_parts, content, pretty)
# Convert it to a string
return ''.join(xml_parts)
def _ConstructContentList(xml_parts, specification, pretty, level=0):
""" Appends the XML parts corresponding to the specification.
Args:
xml_parts: A list of XML parts to be appended to.
specification: The specification of the element. See EasyXml docs.
pretty: True if we want pretty printing with indents and new lines.
level: Indentation level.
"""
# The first item in a specification is the name of the element.
if pretty:
indentation = ' ' * level
new_line = '\n'
else:
indentation = ''
new_line = ''
name = specification[0]
if not isinstance(name, str):
raise Exception('The first item of an EasyXml specification should be '
'a string. Specification was ' + str(specification))
xml_parts.append(indentation + '<' + name)
# Optionally in second position is a dictionary of the attributes.
rest = specification[1:]
if rest and isinstance(rest[0], dict):
for at, val in sorted(rest[0].iteritems()):
xml_parts.append(' %s="%s"' % (at, _XmlEscape(val, attr=True)))
rest = rest[1:]
if rest:
xml_parts.append('>')
all_strings = reduce(lambda x, y: x and isinstance(y, str), rest, True)
multi_line = not all_strings
if multi_line and new_line:
xml_parts.append(new_line)
for child_spec in rest:
# If it's a string, append a text node.
# Otherwise recurse over that child definition
if isinstance(child_spec, str):
xml_parts.append(_XmlEscape(child_spec))
else:
_ConstructContentList(xml_parts, child_spec, pretty, level + 1)
if multi_line and indentation:
xml_parts.append(indentation)
xml_parts.append('</%s>%s' % (name, new_line))
else:
xml_parts.append('/>%s' % new_line)
def WriteXmlIfChanged(content, path, encoding='utf-8', pretty=False,
win32=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Args:
content: The structured content to be written.
path: Location of the file.
encoding: The encoding to report on the first line of the XML file.
pretty: True if we want pretty printing with indents and new lines.
"""
xml_string = XmlToString(content, encoding, pretty)
if win32 and os.linesep != '\r\n':
xml_string = xml_string.replace('\n', '\r\n')
try:
xml_string = xml_string.encode(encoding)
except Exception:
xml_string = unicode(xml_string, 'latin-1').encode(encoding)
# Get the old content
try:
f = open(path, 'r')
existing = f.read()
f.close()
except:
existing = None
# It has changed, write it
if existing != xml_string:
f = open(path, 'w')
f.write(xml_string)
f.close()
_xml_escape_map = {
'"': '"',
"'": ''',
'<': '<',
'>': '>',
'&': '&',
'\n': '
',
'\r': '
',
}
_xml_escape_re = re.compile(
"(%s)" % "|".join(map(re.escape, _xml_escape_map.keys())))
def _XmlEscape(value, attr=False):
""" Escape a string for inclusion in XML."""
def replace(match):
m = match.string[match.start() : match.end()]
# don't replace single quotes in attrs
if attr and m == "'":
return m
return _xml_escape_map[m]
return _xml_escape_re.sub(replace, value)
| apache-2.0 |
leftbrainstrain/PyBBIO | bbio/platform/beaglebone/config.py | 3 | 16921 | # PyBBIO config file for bealebone
#---------------------------------------------------#
# Changes to this file may lead to permanent damage #
# to you Beaglebone, edit with care. #
#---------------------------------------------------#
import glob
########################################
##--- Start control module config: ---##
CONF_SLEW_SLOW = 1<<6
CONF_RX_ACTIVE = 1<<5
CONF_PULLUP = 1<<4
CONF_PULLDOWN = 0x00
CONF_PULL_DISABLE = 1<<3
CONF_GPIO_MODE = 0x07
CONF_GPIO_OUTPUT = CONF_GPIO_MODE
CONF_GPIO_INPUT = CONF_GPIO_MODE | CONF_RX_ACTIVE
CONF_ADC_PIN = CONF_RX_ACTIVE | CONF_PULL_DISABLE
##--- End control module config ------##
########################################
########################################
##--- Start device tree: ---##
SLOTS_FILE = glob.glob('/sys/devices/bone_capemgr.*/slots')
SLOTS_FILE = SLOTS_FILE[0] if len(SLOTS_FILE) else None
OCP_PATH = glob.glob('/sys/devices/ocp.*')[0]
##--- End device tree config ------##
########################################
##############################
##--- Start GPIO config: ---##
GPIO_FILE_BASE = '/sys/class/gpio'
EXPORT_FILE = GPIO_FILE_BASE + '/export'
UNEXPORT_FILE = GPIO_FILE_BASE + '/unexport'
GET_USR_LED_DIRECTORY = lambda USRX : \
"/sys/class/leds/beaglebone:green:%s" % USRX.lower()
# Digital IO keywords:
INPUT = 1
OUTPUT = 0
PULLDOWN = -1
NOPULL = 0
PULLUP = 1
HIGH = 1
LOW = 0
RISING = 1
FALLING = -1
BOTH = 0
MSBFIRST = 1
LSBFIRST = -1
## GPIO pins:
# GPIO pins must be in form:
# [signal_name, dt_offset, gpio_num], where 'dt_offset' is
# the control module register offset from 44e10800 as used in the device
# tree, and 'gpio_num' is the pin number used by the kernel driver, e.g.:
# "GPIO1_4" = [ 'gpmc_ad4', 0x10, 32*1 + 4]
GPIO = {
"USR0" : {
'signal' : 'gpmc_a5',
'offset' : 0x054,
'gpio_num' : 1*32+21,
'header_pin' : None
},
"USR1" : {
'signal' : 'gpmc_a6',
'offset' : 0x058,
'gpio_num' : 1*32+22,
'header_pin' : None
},
"USR2" : {
'signal' : 'gpmc_a7',
'offset' : 0x05c,
'gpio_num' : 1*32+23,
'header_pin' : None
},
"USR3" : {
'signal' : 'gpmc_a8',
'offset' : 0x060,
'gpio_num' : 1*32+24,
'header_pin' : None
},
"GPIO0_2" : {
'signal' : 'spi0_sclk',
'offset' : 0x150,
'gpio_num' : 0*32+2,
'header_pin' : 'P9_22'
},
"GPIO0_3" : {
'signal' : 'spi0_d0',
'offset' : 0x154,
'gpio_num' : 0*32+3,
'header_pin' : 'P9_21'
},
"GPIO0_4" : {
'signal' : 'spi0_d1',
'offset' : 0x158,
'gpio_num' : 0*32+4,
'header_pin' : 'P9_18'
},
"GPIO0_5" : {
'signal' : 'spi0_cs0',
'offset' : 0x15c,
'gpio_num' : 0*32+5,
'header_pin' : 'P9_17'
},
"GPIO0_7" : {
'signal' : 'ecap0_in_pwm0_out',
'offset' : 0x164,
'gpio_num' : 0*32+7,
'header_pin' : 'P9_42'
},
"GPIO0_8" : {
'signal' : 'lcd_data12',
'offset' : 0x0d0,
'gpio_num' : 0*32+8,
'header_pin' : 'P8_35'
},
"GPIO0_9" : {
'signal' : 'lcd_data13',
'offset' : 0x0d4,
'gpio_num' : 0*32+9,
'header_pin' : 'P8_33'
},
"GPIO0_10" : {
'signal' : 'lcd_data14',
'offset' : 0x0d8,
'gpio_num' : 0*32+10,
'header_pin' : 'P8_31'
},
"GPIO0_11" : {
'signal' : 'lcd_data15',
'offset' : 0x0dc,
'gpio_num' : 0*32+11,
'header_pin' : 'P8_32'
},
"GPIO0_12" : {
'signal' : 'uart1_ctsn',
'offset' : 0x178,
'gpio_num' : 0*32+12,
'header_pin' : 'P9_20'
},
"GPIO0_13" : {
'signal' : 'uart1_rtsn',
'offset' : 0x17c,
'gpio_num' : 0*32+13,
'header_pin' : 'P9_19'
},
"GPIO0_14" : {
'signal' : 'uart1_rxd',
'offset' : 0x180,
'gpio_num' : 0*32+14,
'header_pin' : 'P9_26'
},
"GPIO0_15" : {
'signal' : 'uart1_txd',
'offset' : 0x184,
'gpio_num' : 0*32+15,
'header_pin' : 'P9_24'
},
"GPIO0_20" : {
'signal' : 'xdma_event_intr1',
'offset' : 0x1b4,
'gpio_num' : 0*32+20,
'header_pin' : 'P9_41'
},
"GPIO0_22" : {
'signal' : 'gpmc_ad8',
'offset' : 0x020,
'gpio_num' : 0*32+22,
'header_pin' : 'P8_19'
},
"GPIO0_23" : {
'signal' : 'gpmc_ad9',
'offset' : 0x024,
'gpio_num' : 0*32+23,
'header_pin' : 'P8_13'
},
"GPIO0_26" : {
'signal' : 'gpmc_ad10',
'offset' : 0x028,
'gpio_num' : 0*32+26,
'header_pin' : 'P8_14'
},
"GPIO0_27" : {
'signal' : 'gpmc_ad11',
'offset' : 0x02c,
'gpio_num' : 0*32+27,
'header_pin' : 'P8_17'
},
"GPIO0_30" : {
'signal' : 'gpmc_wait0',
'offset' : 0x070,
'gpio_num' : 0*32+30,
'header_pin' : 'P9_11'
},
"GPIO0_31" : {
'signal' : 'gpmc_wpn',
'offset' : 0x074,
'gpio_num' : 0*32+31,
'header_pin' : 'P9_13'
},
"GPIO1_0" : {
'signal' : 'gpmc_ad0',
'offset' : 0x000,
'gpio_num' : 1*32+0,
'header_pin' : 'P8_25'
},
"GPIO1_1" : {
'signal' : 'gpmc_ad1',
'offset' : 0x004,
'gpio_num' : 1*32+1,
'header_pin' : 'P8_24'
},
"GPIO1_2" : {
'signal' : 'gpmc_ad2',
'offset' : 0x008,
'gpio_num' : 1*32+2,
'header_pin' : 'P8_5'
},
"GPIO1_3" : {
'signal' : 'gpmc_ad3',
'offset' : 0x00c,
'gpio_num' : 1*32+3,
'header_pin' : 'P8_6'
},
"GPIO1_4" : {
'signal' : 'gpmc_ad4',
'offset' : 0x010,
'gpio_num' : 1*32+4,
'header_pin' : 'P8_23'
},
"GPIO1_5" : {
'signal' : 'gpmc_ad5',
'offset' : 0x014,
'gpio_num' : 1*32+ 5,
'header_pin' : 'P8_22'
},
"GPIO1_6" : {
'signal' : 'gpmc_ad6',
'offset' : 0x018,
'gpio_num' : 1*32+6,
'header_pin' : 'P8_3'
},
"GPIO1_7" : {
'signal' : 'gpmc_ad7',
'offset' : 0x01c,
'gpio_num' : 1*32+7,
'header_pin' : 'P8_4'
},
"GPIO1_12" : {
'signal' : 'gpmc_ad12',
'offset' : 0x030,
'gpio_num' : 1*32+12,
'header_pin' : 'P8_12'
},
"GPIO1_13" : {
'signal' : 'gpmc_ad13',
'offset' : 0x034,
'gpio_num' : 1*32+13,
'header_pin' : 'P8_11'
},
"GPIO1_14" : {
'signal' : 'gpmc_ad14',
'offset' : 0x038,
'gpio_num' : 1*32+14,
'header_pin' : 'P8_16'
},
"GPIO1_15" : {
'signal' : 'gpmc_ad15',
'offset' : 0x03c,
'gpio_num' : 1*32+15,
'header_pin' : 'P8_15'
},
"GPIO1_16" : {
'signal' : 'gpmc_a0',
'offset' : 0x040,
'gpio_num' : 1*32+16,
'header_pin' : 'P9_15'
},
"GPIO1_17" : {
'signal' : 'gpmc_a1',
'offset' : 0x044,
'gpio_num' : 1*32+17,
'header_pin' : 'P9_23'
},
"GPIO1_18" : {
'signal' : 'gpmc_a2',
'offset' : 0x048,
'gpio_num' : 1*32+18,
'header_pin' : 'P9_14'
},
"GPIO1_19" : {
'signal' : 'gpmc_a3',
'offset' : 0x04c,
'gpio_num' : 1*32+19,
'header_pin' : 'P9_16'
},
"GPIO1_28" : {
'signal' : 'gpmc_ben1',
'offset' : 0x078,
'gpio_num' : 1*32+28,
'header_pin' : 'P9_12'
},
"GPIO1_29" : {
'signal' : 'gpmc_csn0',
'offset' : 0x07c,
'gpio_num' : 1*32+29,
'header_pin' : 'P8_26'
},
"GPIO1_30" : {
'signal' : 'gpmc_csn1',
'offset' : 0x080,
'gpio_num' : 1*32+30,
'header_pin' : 'P8_21'
},
"GPIO1_31" : {
'signal' : 'gpmc_csn2',
'offset' : 0x084,
'gpio_num' : 1*32+31,
'header_pin' : 'P8_20'
},
"GPIO2_1" : {
'signal' : 'gpmc_clk',
'offset' : 0x08c,
'gpio_num' : 2*32+1,
'header_pin' : 'P8_18'
},
"GPIO2_2" : {
'signal' : 'gpmc_advn_ale',
'offset' : 0x090,
'gpio_num' : 2*32+2,
'header_pin' : 'P8_7'
},
"GPIO2_3" : {
'signal' : 'gpmc_oen_ren',
'offset' : 0x094,
'gpio_num' : 2*32+3,
'header_pin' : 'P8_8'
},
"GPIO2_4" : {
'signal' : 'gpmc_wen',
'offset' : 0x098,
'gpio_num' : 2*32+4,
'header_pin' : 'P8_10'
},
"GPIO2_5" : {
'signal' : 'gpmc_ben0_cle',
'offset' : 0x09c,
'gpio_num' : 2*32+5,
'header_pin' : 'P8_9'
},
"GPIO2_6" : {
'signal' : 'lcd_data0',
'offset' : 0x0a0,
'gpio_num' : 2*32+6,
'header_pin' : 'P8_45'
},
"GPIO2_7" : {
'signal' : 'lcd_data1',
'offset' : 0x0a4,
'gpio_num' : 2*32+7,
'header_pin' : 'P8_46'
},
"GPIO2_8" : {
'signal' : 'lcd_data2',
'offset' : 0x0a8,
'gpio_num' : 2*32+8,
'header_pin' : 'P8_43'
},
"GPIO2_9" : {
'signal' : 'lcd_data3',
'offset' : 0x0ac,
'gpio_num' : 2*32+9,
'header_pin' : 'P8_44'
},
"GPIO2_10" : {
'signal' : 'lcd_data4',
'offset' : 0x0b0,
'gpio_num' : 2*32+10,
'header_pin' : 'P8_41'
},
"GPIO2_11" : {
'signal' : 'lcd_data5',
'offset' : 0x0b4,
'gpio_num' : 2*32+11,
'header_pin' : 'P8_42'
},
"GPIO2_12" : {
'signal' : 'lcd_data6',
'offset' : 0x0b8,
'gpio_num' : 2*32+12,
'header_pin' : 'P8_39'
},
"GPIO2_13" : {
'signal' : 'lcd_data7',
'offset' : 0x0bc,
'gpio_num' : 2*32+13,
'header_pin' : 'P8_40'
},
"GPIO2_14" : {
'signal' : 'lcd_data8',
'offset' : 0x0c0,
'gpio_num' : 2*32+14,
'header_pin' : 'P8_37'
},
"GPIO2_15" : {
'signal' : 'lcd_data9',
'offset' : 0x0c4,
'gpio_num' : 2*32+15,
'header_pin' : 'P8_38'
},
"GPIO2_16" : {
'signal' : 'lcd_data10',
'offset' : 0x0c8,
'gpio_num' : 2*32+16,
'header_pin' : 'P8_36'
},
"GPIO2_17" : {
'signal' : 'lcd_data11',
'offset' : 0x0cc,
'gpio_num' : 2*32+17,
'header_pin' : 'P8_34'
},
"GPIO2_22" : {
'signal' : 'lcd_vsync',
'offset' : 0x0e0,
'gpio_num' : 2*32+22,
'header_pin' : 'P8_27'
},
"GPIO2_23" : {
'signal' : 'lcd_hsync',
'offset' : 0x0e4,
'gpio_num' : 2*32+23,
'header_pin' : 'P8_29'
},
"GPIO2_24" : {
'signal' : 'lcd_pclk',
'offset' : 0x0e8,
'gpio_num' : 2*32+24,
'header_pin' : 'P8_28'
},
"GPIO2_25" : {
'signal' : 'lcd_ac_bias_en',
'offset' : 0x0ec,
'gpio_num' : 2*32+25,
'header_pin' : 'P8_30'
},
"GPIO3_14" : {
'signal' : 'mcasp0_aclkx',
'offset' : 0x190,
'gpio_num' : 3*32+14,
'header_pin' : 'P9_31'
},
"GPIO3_15" : {
'signal' : 'mcasp0_fsx',
'offset' : 0x194,
'gpio_num' : 3*32+15,
'header_pin' : 'P9_29'
},
"GPIO3_16" : {
'signal' : 'mcasp0_axr0',
'offset' : 0x198,
'gpio_num' : 3*32+16,
'header_pin' : 'P9_30'
},
"GPIO3_17" : {
'signal' : 'mcasp0_ahclkr',
'offset' : 0x19c,
'gpio_num' : 3*32+17,
'header_pin' : 'P9_28'
},
"GPIO3_19" : {
'signal' : 'mcasp0_fsr',
'offset' : 0x1a4,
'gpio_num' : 3*32+19,
'header_pin' : 'P9_27'
},
"GPIO3_21" : {
'signal' : 'mcasp0_ahclkx',
'offset' : 0x1ac,
'gpio_num' : 3*32+21,
'header_pin' : 'P9_25'
},
}
def getGPIODirectory(gpio_pin):
""" Returns the sysfs kernel driver base directory for the given pin. """
if 'USR' in gpio_pin:
# USR LEDs use a different driver
return GET_USR_LED_DIRECTORY(gpio_pin)
gpio_num = GPIO[gpio_pin]['gpio_num']
return '%s/gpio%i' % (GPIO_FILE_BASE, gpio_num)
def getGPIODirectionFile(gpio_pin):
""" Returns the absolute path to the state control file for the given pin. """
if 'USR' in gpio_pin:
# USR LED driver doesn't have a direction file
return ''
d = getGPIODirectory(gpio_pin)
return '%s/direction' % d
def getGPIOStateFile(gpio_pin):
""" Returns the absolute path to the state control file for the given pin. """
d = getGPIODirectory(gpio_pin)
if 'USR' in gpio_pin:
# USR LEDs use a different driver
return '%s/brightness' % d
return '%s/value' % d
for pin in GPIO.keys():
GPIO[pin]['direction_file'] = getGPIODirectionFile(pin)
GPIO[pin]['state_file'] = getGPIOStateFile(pin)
# Having available pins in a dictionary makes it easy to
# check for invalid pins, but it's nice not to have to pass
# around strings, so here's some friendly constants:
USR0 = "USR0"
USR1 = "USR1"
USR2 = "USR2"
USR3 = "USR3"
GPIO0_2 = "GPIO0_2"
GPIO0_3 = "GPIO0_3"
GPIO0_4 = "GPIO0_4"
GPIO0_5 = "GPIO0_5"
GPIO0_7 = "GPIO0_7"
GPIO0_8 = "GPIO0_8"
GPIO0_9 = "GPIO0_9"
GPIO0_10 = "GPIO0_10"
GPIO0_11 = "GPIO0_11"
GPIO0_12 = "GPIO0_12"
GPIO0_13 = "GPIO0_13"
GPIO0_14 = "GPIO0_14"
GPIO0_15 = "GPIO0_15"
GPIO0_20 = "GPIO0_20"
GPIO0_22 = "GPIO0_22"
GPIO0_23 = "GPIO0_23"
GPIO0_26 = "GPIO0_26"
GPIO0_27 = "GPIO0_27"
GPIO0_30 = "GPIO0_30"
GPIO0_31 = "GPIO0_31"
GPIO1_0 = "GPIO1_0"
GPIO1_1 = "GPIO1_1"
GPIO1_2 = "GPIO1_2"
GPIO1_3 = "GPIO1_3"
GPIO1_4 = "GPIO1_4"
GPIO1_5 = "GPIO1_5"
GPIO1_6 = "GPIO1_6"
GPIO1_7 = "GPIO1_7"
GPIO1_12 = "GPIO1_12"
GPIO1_13 = "GPIO1_13"
GPIO1_14 = "GPIO1_14"
GPIO1_15 = "GPIO1_15"
GPIO1_16 = "GPIO1_16"
GPIO1_17 = "GPIO1_17"
GPIO1_18 = "GPIO1_18"
GPIO1_19 = "GPIO1_19"
GPIO1_28 = "GPIO1_28"
GPIO1_29 = "GPIO1_29"
GPIO1_30 = "GPIO1_30"
GPIO1_31 = "GPIO1_31"
GPIO2_1 = "GPIO2_1"
GPIO2_2 = "GPIO2_2"
GPIO2_3 = "GPIO2_3"
GPIO2_4 = "GPIO2_4"
GPIO2_5 = "GPIO2_5"
GPIO2_6 = "GPIO2_6"
GPIO2_7 = "GPIO2_7"
GPIO2_8 = "GPIO2_8"
GPIO2_9 = "GPIO2_9"
GPIO2_10 = "GPIO2_10"
GPIO2_11 = "GPIO2_11"
GPIO2_12 = "GPIO2_12"
GPIO2_13 = "GPIO2_13"
GPIO2_14 = "GPIO2_14"
GPIO2_15 = "GPIO2_15"
GPIO2_16 = "GPIO2_16"
GPIO2_17 = "GPIO2_17"
GPIO2_22 = "GPIO2_22"
GPIO2_23 = "GPIO2_23"
GPIO2_24 = "GPIO2_24"
GPIO2_25 = "GPIO2_25"
GPIO3_14 = "GPIO3_14"
GPIO3_15 = "GPIO3_15"
GPIO3_16 = "GPIO3_16"
GPIO3_17 = "GPIO3_17"
GPIO3_19 = "GPIO3_19"
GPIO3_21 = "GPIO3_21"
##--- End GPIO config ------##
##############################
#############################
##--- Start ADC config: ---##
ADC_ENABLE_DTS_OVERLAY = 'PyBBIO-ADC'
# ADC pins should be in the form:
# ['path/to/adc-file', 'Channel-enable-overlay', 'header_pin']
ADC = {
'AIN0' : ['%s/PyBBIO-AIN0.*/AIN0' % OCP_PATH, 'PyBBIO-AIN0', 'P9.39'],
'AIN1' : ['%s/PyBBIO-AIN1.*/AIN1' % OCP_PATH, 'PyBBIO-AIN1', 'P9.40'],
'AIN2' : ['%s/PyBBIO-AIN2.*/AIN2' % OCP_PATH, 'PyBBIO-AIN2', 'P9.37'],
'AIN3' : ['%s/PyBBIO-AIN3.*/AIN3' % OCP_PATH, 'PyBBIO-AIN3', 'P9.38'],
'AIN4' : ['%s/PyBBIO-AIN4.*/AIN4' % OCP_PATH, 'PyBBIO-AIN4', 'P9.33'],
'AIN5' : ['%s/PyBBIO-AIN5.*/AIN5' % OCP_PATH, 'PyBBIO-AIN5', 'P9.36'],
'AIN6' : ['%s/PyBBIO-AIN6.*/AIN6' % OCP_PATH, 'PyBBIO-AIN6', 'P9.35'],
'AIN7' : ['%s/PyBBIO-AIN7.*/AIN7' % OCP_PATH, 'PyBBIO-AIN7', 'vsys'],
}
# And some constants so the user doesn't need to use strings:
AIN0 = A0 = 'AIN0'
AIN1 = A1 = 'AIN1'
AIN2 = A2 = 'AIN2'
AIN3 = A3 = 'AIN3'
AIN4 = A4 = 'AIN4'
AIN5 = A5 = 'AIN5'
AIN6 = A6 = 'AIN6'
AIN7 = A7 = VSYS = 'AIN7'
##--- End ADC config ------##
#############################
##############################
##--- Start UART config: ---##
# Formatting constants to mimic Arduino's serial.print() formatting:
DEC = 'DEC'
BIN = 'BIN'
OCT = 'OCT'
HEX = 'HEX'
UART = {
'UART1' : ['/dev/ttyO1', 'BB-UART1'],
'UART2' : ['/dev/ttyO2', 'BB-UART2'],
'UART4' : ['/dev/ttyO4', 'BB-UART4'],
'UART5' : ['/dev/ttyO5', 'BB-UART5']
}
##--- End UART config ------##
##############################
##############################
##--- Start PWM config: ----##
# Predefined resolutions for analogWrite():
RES_16BIT = 2**16
RES_8BIT = 2**8
PERCENT = 100
# Default frequency in Hz of PWM modules (must be >0):
PWM_DEFAULT_FREQ = 100000
# PWM config dict in form:
# ['overlay_file', 'path/to/ocp_helper_dir', ['required', 'overlays']]
PWM_PINS = {
'PWM1A' : ['bone_pwm_P9_14', '%s/pwm_test_P9_14.*' % OCP_PATH,
['PyBBIO-epwmss1', 'PyBBIO-ehrpwm1']],
'PWM1B' : ['bone_pwm_P9_16', '%s/pwm_test_P9_16.*' % OCP_PATH,
['PyBBIO-epwmss1', 'PyBBIO-ehrpwm1']],
'PWM2A' : ['bone_pwm_P8_19', '%s/pwm_test_P8_19.*' % OCP_PATH,
['PyBBIO-epwmss2', 'PyBBIO-ehrpwm2']],
'PWM2B' : ['bone_pwm_P8_13', '%s/pwm_test_P8_13.*' % OCP_PATH,
['PyBBIO-epwmss2', 'PyBBIO-ehrpwm2']],
'ECAP0' : ['bone_pwm_P9_42', '%s/pwm_test_P9_42.*' % OCP_PATH,
['PyBBIO-epwmss0', 'PyBBIO-ecap0']],
'ECAP1' : ['bone_pwm_P9_28', '%s/pwm_test_P9_28.*' % OCP_PATH,
['PyBBIO-epwmss1', 'PyBBIO-ecap1']],
}
# Using the built-in pin overlays for now, I see no need for custom ones
PWM1A = 'PWM1A'
PWM1B = 'PWM1B'
PWM2A = 'PWM2A'
PWM2B = 'PWM2B'
ECAP0 = 'ECAP0'
ECAP1 = 'ECAP1'
# ocp helper filenames:
PWM_RUN = 'run'
PWM_DUTY = 'duty'
PWM_PERIOD = 'period'
PWM_POLARITY = 'polarity'
PWM_DEFAULT_PERIOD = int(1e9/PWM_DEFAULT_FREQ)
##--- End PWM config: ------##
##############################
##############################
##--- Start SPI config: ----##
SPI_BASE_ADDRESSES = [
0x48030000, # SPI0
0x481a0000 # SPI1
]
##--- End SPI config: ------##
##############################
##############################
##--- Start I2C config: ----##
I2C_BASE_ADDRESSES = [
0x44e0b000, # I2C0 (shouldn't ever be used)
0x4802a000, # I2C1
0x4819c000 # I2C2
]
##--- End I2C config: ------##
##############################
| mit |
nmayorov/scipy | scipy/signal/lti_conversion.py | 3 | 15058 | """
ltisys -- a collection of functions to convert linear time invariant systems
from one representation to another.
"""
import numpy
import numpy as np
from numpy import (r_, eye, atleast_2d, poly, dot,
asarray, prod, zeros, array, outer)
from scipy import linalg
from .filter_design import tf2zpk, zpk2tf, normalize
__all__ = ['tf2ss', 'abcd_normalize', 'ss2tf', 'zpk2ss', 'ss2zpk',
'cont2discrete']
def tf2ss(num, den):
r"""Transfer function to state-space representation.
Parameters
----------
num, den : array_like
Sequences representing the coefficients of the numerator and
denominator polynomials, in order of descending degree. The
denominator needs to be at least as long as the numerator.
Returns
-------
A, B, C, D : ndarray
State space representation of the system, in controller canonical
form.
Examples
--------
Convert the transfer function:
.. math:: H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1}
>>> num = [1, 3, 3]
>>> den = [1, 2, 1]
to the state-space representation:
.. math::
\dot{\textbf{x}}(t) =
\begin{bmatrix} -2 & -1 \\ 1 & 0 \end{bmatrix} \textbf{x}(t) +
\begin{bmatrix} 1 \\ 0 \end{bmatrix} \textbf{u}(t) \\
\textbf{y}(t) = \begin{bmatrix} 1 & 2 \end{bmatrix} \textbf{x}(t) +
\begin{bmatrix} 1 \end{bmatrix} \textbf{u}(t)
>>> from scipy.signal import tf2ss
>>> A, B, C, D = tf2ss(num, den)
>>> A
array([[-2., -1.],
[ 1., 0.]])
>>> B
array([[ 1.],
[ 0.]])
>>> C
array([[ 1., 2.]])
>>> D
array([[ 1.]])
"""
# Controller canonical state-space representation.
# if M+1 = len(num) and K+1 = len(den) then we must have M <= K
# states are found by asserting that X(s) = U(s) / D(s)
# then Y(s) = N(s) * X(s)
#
# A, B, C, and D follow quite naturally.
#
num, den = normalize(num, den) # Strips zeros, checks arrays
nn = len(num.shape)
if nn == 1:
num = asarray([num], num.dtype)
M = num.shape[1]
K = len(den)
if M > K:
msg = "Improper transfer function. `num` is longer than `den`."
raise ValueError(msg)
if M == 0 or K == 0: # Null system
return (array([], float), array([], float), array([], float),
array([], float))
# pad numerator to have same number of columns has denominator
num = r_['-1', zeros((num.shape[0], K - M), num.dtype), num]
if num.shape[-1] > 0:
D = atleast_2d(num[:, 0])
else:
# We don't assign it an empty array because this system
# is not 'null'. It just doesn't have a non-zero D
# matrix. Thus, it should have a non-zero shape so that
# it can be operated on by functions like 'ss2tf'
D = array([[0]], float)
if K == 1:
D = D.reshape(num.shape)
return (zeros((1, 1)), zeros((1, D.shape[1])),
zeros((D.shape[0], 1)), D)
frow = -array([den[1:]])
A = r_[frow, eye(K - 2, K - 1)]
B = eye(K - 1, 1)
C = num[:, 1:] - outer(num[:, 0], den[1:])
D = D.reshape((C.shape[0], B.shape[1]))
return A, B, C, D
def _none_to_empty_2d(arg):
if arg is None:
return zeros((0, 0))
else:
return arg
def _atleast_2d_or_none(arg):
if arg is not None:
return atleast_2d(arg)
def _shape_or_none(M):
if M is not None:
return M.shape
else:
return (None,) * 2
def _choice_not_none(*args):
for arg in args:
if arg is not None:
return arg
def _restore(M, shape):
if M.shape == (0, 0):
return zeros(shape)
else:
if M.shape != shape:
raise ValueError("The input arrays have incompatible shapes.")
return M
def abcd_normalize(A=None, B=None, C=None, D=None):
"""Check state-space matrices and ensure they are 2-D.
If enough information on the system is provided, that is, enough
properly-shaped arrays are passed to the function, the missing ones
are built from this information, ensuring the correct number of
rows and columns. Otherwise a ValueError is raised.
Parameters
----------
A, B, C, D : array_like, optional
State-space matrices. All of them are None (missing) by default.
See `ss2tf` for format.
Returns
-------
A, B, C, D : array
Properly shaped state-space matrices.
Raises
------
ValueError
If not enough information on the system was provided.
"""
A, B, C, D = map(_atleast_2d_or_none, (A, B, C, D))
MA, NA = _shape_or_none(A)
MB, NB = _shape_or_none(B)
MC, NC = _shape_or_none(C)
MD, ND = _shape_or_none(D)
p = _choice_not_none(MA, MB, NC)
q = _choice_not_none(NB, ND)
r = _choice_not_none(MC, MD)
if p is None or q is None or r is None:
raise ValueError("Not enough information on the system.")
A, B, C, D = map(_none_to_empty_2d, (A, B, C, D))
A = _restore(A, (p, p))
B = _restore(B, (p, q))
C = _restore(C, (r, p))
D = _restore(D, (r, q))
return A, B, C, D
def ss2tf(A, B, C, D, input=0):
r"""State-space to transfer function.
A, B, C, D defines a linear state-space system with `p` inputs,
`q` outputs, and `n` state variables.
Parameters
----------
A : array_like
State (or system) matrix of shape ``(n, n)``
B : array_like
Input matrix of shape ``(n, p)``
C : array_like
Output matrix of shape ``(q, n)``
D : array_like
Feedthrough (or feedforward) matrix of shape ``(q, p)``
input : int, optional
For multiple-input systems, the index of the input to use.
Returns
-------
num : 2-D ndarray
Numerator(s) of the resulting transfer function(s). `num` has one row
for each of the system's outputs. Each row is a sequence representation
of the numerator polynomial.
den : 1-D ndarray
Denominator of the resulting transfer function(s). `den` is a sequence
representation of the denominator polynomial.
Examples
--------
Convert the state-space representation:
.. math::
\dot{\textbf{x}}(t) =
\begin{bmatrix} -2 & -1 \\ 1 & 0 \end{bmatrix} \textbf{x}(t) +
\begin{bmatrix} 1 \\ 0 \end{bmatrix} \textbf{u}(t) \\
\textbf{y}(t) = \begin{bmatrix} 1 & 2 \end{bmatrix} \textbf{x}(t) +
\begin{bmatrix} 1 \end{bmatrix} \textbf{u}(t)
>>> A = [[-2, -1], [1, 0]]
>>> B = [[1], [0]] # 2-D column vector
>>> C = [[1, 2]] # 2-D row vector
>>> D = 1
to the transfer function:
.. math:: H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1}
>>> from scipy.signal import ss2tf
>>> ss2tf(A, B, C, D)
(array([[1, 3, 3]]), array([ 1., 2., 1.]))
"""
# transfer function is C (sI - A)**(-1) B + D
# Check consistency and make them all rank-2 arrays
A, B, C, D = abcd_normalize(A, B, C, D)
nout, nin = D.shape
if input >= nin:
raise ValueError("System does not have the input specified.")
# make SIMO from possibly MIMO system.
B = B[:, input:input + 1]
D = D[:, input:input + 1]
try:
den = poly(A)
except ValueError:
den = 1
if (prod(B.shape, axis=0) == 0) and (prod(C.shape, axis=0) == 0):
num = numpy.ravel(D)
if (prod(D.shape, axis=0) == 0) and (prod(A.shape, axis=0) == 0):
den = []
return num, den
num_states = A.shape[0]
type_test = A[:, 0] + B[:, 0] + C[0, :] + D
num = numpy.empty((nout, num_states + 1), type_test.dtype)
for k in range(nout):
Ck = atleast_2d(C[k, :])
num[k] = poly(A - dot(B, Ck)) + (D[k] - 1) * den
return num, den
def zpk2ss(z, p, k):
"""Zero-pole-gain representation to state-space representation
Parameters
----------
z, p : sequence
Zeros and poles.
k : float
System gain.
Returns
-------
A, B, C, D : ndarray
State space representation of the system, in controller canonical
form.
"""
return tf2ss(*zpk2tf(z, p, k))
def ss2zpk(A, B, C, D, input=0):
"""State-space representation to zero-pole-gain representation.
A, B, C, D defines a linear state-space system with `p` inputs,
`q` outputs, and `n` state variables.
Parameters
----------
A : array_like
State (or system) matrix of shape ``(n, n)``
B : array_like
Input matrix of shape ``(n, p)``
C : array_like
Output matrix of shape ``(q, n)``
D : array_like
Feedthrough (or feedforward) matrix of shape ``(q, p)``
input : int, optional
For multiple-input systems, the index of the input to use.
Returns
-------
z, p : sequence
Zeros and poles.
k : float
System gain.
"""
return tf2zpk(*ss2tf(A, B, C, D, input=input))
def cont2discrete(system, dt, method="zoh", alpha=None):
"""
Transform a continuous to a discrete state-space system.
Parameters
----------
system : a tuple describing the system or an instance of `lti`
The following gives the number of elements in the tuple and
the interpretation:
* 1: (instance of `lti`)
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
dt : float
The discretization time step.
method : str, optional
Which method to use:
* gbt: generalized bilinear transformation
* bilinear: Tustin's approximation ("gbt" with alpha=0.5)
* euler: Euler (or forward differencing) method ("gbt" with alpha=0)
* backward_diff: Backwards differencing ("gbt" with alpha=1.0)
* zoh: zero-order hold (default)
* foh: first-order hold (*versionadded: 1.3.0*)
* impulse: equivalent impulse response (*versionadded: 1.3.0*)
alpha : float within [0, 1], optional
The generalized bilinear transformation weighting parameter, which
should only be specified with method="gbt", and is ignored otherwise
Returns
-------
sysd : tuple containing the discrete system
Based on the input type, the output will be of the form
* (num, den, dt) for transfer function input
* (zeros, poles, gain, dt) for zeros-poles-gain input
* (A, B, C, D, dt) for state-space system input
Notes
-----
By default, the routine uses a Zero-Order Hold (zoh) method to perform
the transformation. Alternatively, a generalized bilinear transformation
may be used, which includes the common Tustin's bilinear approximation,
an Euler's method technique, or a backwards differencing technique.
The Zero-Order Hold (zoh) method is based on [1]_, the generalized bilinear
approximation is based on [2]_ and [3]_, the First-Order Hold (foh) method
is based on [4]_.
References
----------
.. [1] https://en.wikipedia.org/wiki/Discretization#Discretization_of_linear_state_space_models
.. [2] http://techteach.no/publications/discretetime_signals_systems/discrete.pdf
.. [3] G. Zhang, X. Chen, and T. Chen, Digital redesign via the generalized
bilinear transformation, Int. J. Control, vol. 82, no. 4, pp. 741-754,
2009.
(https://www.mypolyuweb.hk/~magzhang/Research/ZCC09_IJC.pdf)
.. [4] G. F. Franklin, J. D. Powell, and M. L. Workman, Digital control
of dynamic systems, 3rd ed. Menlo Park, Calif: Addison-Wesley,
pp. 204-206, 1998.
"""
if len(system) == 1:
return system.to_discrete()
if len(system) == 2:
sysd = cont2discrete(tf2ss(system[0], system[1]), dt, method=method,
alpha=alpha)
return ss2tf(sysd[0], sysd[1], sysd[2], sysd[3]) + (dt,)
elif len(system) == 3:
sysd = cont2discrete(zpk2ss(system[0], system[1], system[2]), dt,
method=method, alpha=alpha)
return ss2zpk(sysd[0], sysd[1], sysd[2], sysd[3]) + (dt,)
elif len(system) == 4:
a, b, c, d = system
else:
raise ValueError("First argument must either be a tuple of 2 (tf), "
"3 (zpk), or 4 (ss) arrays.")
if method == 'gbt':
if alpha is None:
raise ValueError("Alpha parameter must be specified for the "
"generalized bilinear transform (gbt) method")
elif alpha < 0 or alpha > 1:
raise ValueError("Alpha parameter must be within the interval "
"[0,1] for the gbt method")
if method == 'gbt':
# This parameter is used repeatedly - compute once here
ima = np.eye(a.shape[0]) - alpha*dt*a
ad = linalg.solve(ima, np.eye(a.shape[0]) + (1.0-alpha)*dt*a)
bd = linalg.solve(ima, dt*b)
# Similarly solve for the output equation matrices
cd = linalg.solve(ima.transpose(), c.transpose())
cd = cd.transpose()
dd = d + alpha*np.dot(c, bd)
elif method == 'bilinear' or method == 'tustin':
return cont2discrete(system, dt, method="gbt", alpha=0.5)
elif method == 'euler' or method == 'forward_diff':
return cont2discrete(system, dt, method="gbt", alpha=0.0)
elif method == 'backward_diff':
return cont2discrete(system, dt, method="gbt", alpha=1.0)
elif method == 'zoh':
# Build an exponential matrix
em_upper = np.hstack((a, b))
# Need to stack zeros under the a and b matrices
em_lower = np.hstack((np.zeros((b.shape[1], a.shape[0])),
np.zeros((b.shape[1], b.shape[1]))))
em = np.vstack((em_upper, em_lower))
ms = linalg.expm(dt * em)
# Dispose of the lower rows
ms = ms[:a.shape[0], :]
ad = ms[:, 0:a.shape[1]]
bd = ms[:, a.shape[1]:]
cd = c
dd = d
elif method == 'foh':
# Size parameters for convenience
n = a.shape[0]
m = b.shape[1]
# Build an exponential matrix similar to 'zoh' method
em_upper = linalg.block_diag(np.block([a, b]) * dt, np.eye(m))
em_lower = zeros((m, n + 2 * m))
em = np.block([[em_upper], [em_lower]])
ms = linalg.expm(em)
# Get the three blocks from upper rows
ms11 = ms[:n, 0:n]
ms12 = ms[:n, n:n + m]
ms13 = ms[:n, n + m:]
ad = ms11
bd = ms12 - ms13 + ms11 @ ms13
cd = c
dd = d + c @ ms13
elif method == 'impulse':
if not np.allclose(d, 0):
raise ValueError("Impulse method is only applicable"
"to strictly proper systems")
ad = linalg.expm(a * dt)
bd = ad @ b * dt
cd = c
dd = c @ b * dt
else:
raise ValueError("Unknown transformation method '%s'" % method)
return ad, bd, cd, dd, dt
| bsd-3-clause |
cyberphox/MissionPlanner | Lib/compileall.py | 50 | 7452 | """Module/script to "compile" all .py files to .pyc (or .pyo) file.
When called as a script with arguments, this compiles the directories
given as arguments recursively; the -l option prevents it from
recursing into directories.
Without arguments, if compiles all modules on sys.path, without
recursing into subdirectories. (Even though it should do so for
packages -- for now, you'll have to deal with packages separately.)
See module py_compile for details of the actual byte-compilation.
"""
import os
import sys
import py_compile
import struct
import imp
__all__ = ["compile_dir","compile_file","compile_path"]
def compile_dir(dir, maxlevels=10, ddir=None,
force=0, rx=None, quiet=0):
"""Byte-compile all modules in the given directory tree.
Arguments (only dir is required):
dir: the directory to byte-compile
maxlevels: maximum recursion level (default 10)
ddir: if given, purported directory name (this is the
directory name that will show up in error messages)
force: if 1, force compilation, even if timestamps are up-to-date
quiet: if 1, be quiet during compilation
"""
if not quiet:
print 'Listing', dir, '...'
try:
names = os.listdir(dir)
except os.error:
print "Can't list", dir
names = []
names.sort()
success = 1
for name in names:
fullname = os.path.join(dir, name)
if ddir is not None:
dfile = os.path.join(ddir, name)
else:
dfile = None
if not os.path.isdir(fullname):
if not compile_file(fullname, ddir, force, rx, quiet):
success = 0
elif maxlevels > 0 and \
name != os.curdir and name != os.pardir and \
os.path.isdir(fullname) and \
not os.path.islink(fullname):
if not compile_dir(fullname, maxlevels - 1, dfile, force, rx,
quiet):
success = 0
return success
def compile_file(fullname, ddir=None, force=0, rx=None, quiet=0):
"""Byte-compile one file.
Arguments (only fullname is required):
fullname: the file to byte-compile
ddir: if given, purported directory name (this is the
directory name that will show up in error messages)
force: if 1, force compilation, even if timestamps are up-to-date
quiet: if 1, be quiet during compilation
"""
success = 1
name = os.path.basename(fullname)
if ddir is not None:
dfile = os.path.join(ddir, name)
else:
dfile = None
if rx is not None:
mo = rx.search(fullname)
if mo:
return success
if os.path.isfile(fullname):
head, tail = name[:-3], name[-3:]
if tail == '.py':
if not force:
try:
mtime = int(os.stat(fullname).st_mtime)
expect = struct.pack('<4sl', imp.get_magic(), mtime)
cfile = fullname + (__debug__ and 'c' or 'o')
with open(cfile, 'rb') as chandle:
actual = chandle.read(8)
if expect == actual:
return success
except IOError:
pass
if not quiet:
print 'Compiling', fullname, '...'
try:
ok = py_compile.compile(fullname, None, dfile, True)
except py_compile.PyCompileError,err:
if quiet:
print 'Compiling', fullname, '...'
print err.msg
success = 0
except IOError, e:
print "Sorry", e
success = 0
else:
if ok == 0:
success = 0
return success
def compile_path(skip_curdir=1, maxlevels=0, force=0, quiet=0):
"""Byte-compile all module on sys.path.
Arguments (all optional):
skip_curdir: if true, skip current directory (default true)
maxlevels: max recursion level (default 0)
force: as for compile_dir() (default 0)
quiet: as for compile_dir() (default 0)
"""
success = 1
for dir in sys.path:
if (not dir or dir == os.curdir) and skip_curdir:
print 'Skipping current directory'
else:
success = success and compile_dir(dir, maxlevels, None,
force, quiet=quiet)
return success
def expand_args(args, flist):
"""read names in flist and append to args"""
expanded = args[:]
if flist:
try:
if flist == '-':
fd = sys.stdin
else:
fd = open(flist)
while 1:
line = fd.readline()
if not line:
break
expanded.append(line[:-1])
except IOError:
print "Error reading file list %s" % flist
raise
return expanded
def main():
"""Script main program."""
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], 'lfqd:x:i:')
except getopt.error, msg:
print msg
print "usage: python compileall.py [-l] [-f] [-q] [-d destdir] " \
"[-x regexp] [-i list] [directory|file ...]"
print "-l: don't recurse down"
print "-f: force rebuild even if timestamps are up-to-date"
print "-q: quiet operation"
print "-d destdir: purported directory name for error messages"
print " if no directory arguments, -l sys.path is assumed"
print "-x regexp: skip files matching the regular expression regexp"
print " the regexp is searched for in the full path of the file"
print "-i list: expand list with its content (file and directory names)"
sys.exit(2)
maxlevels = 10
ddir = None
force = 0
quiet = 0
rx = None
flist = None
for o, a in opts:
if o == '-l': maxlevels = 0
if o == '-d': ddir = a
if o == '-f': force = 1
if o == '-q': quiet = 1
if o == '-x':
import re
rx = re.compile(a)
if o == '-i': flist = a
if ddir:
if len(args) != 1 and not os.path.isdir(args[0]):
print "-d destdir require exactly one directory argument"
sys.exit(2)
success = 1
try:
if args or flist:
try:
if flist:
args = expand_args(args, flist)
except IOError:
success = 0
if success:
for arg in args:
if os.path.isdir(arg):
if not compile_dir(arg, maxlevels, ddir,
force, rx, quiet):
success = 0
else:
if not compile_file(arg, ddir, force, rx, quiet):
success = 0
else:
success = compile_path()
except KeyboardInterrupt:
print "\n[interrupt]"
success = 0
return success
if __name__ == '__main__':
exit_status = int(not main())
sys.exit(exit_status)
| gpl-3.0 |
Lindurion/closure-pro-build | 3p/closure-library-20130212/closure/bin/build/closurebuilder.py | 61 | 8580 | #!/usr/bin/env python
#
# Copyright 2009 The Closure Library Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility for Closure Library dependency calculation.
ClosureBuilder scans source files to build dependency info. From the
dependencies, the script can produce a manifest in dependency order,
a concatenated script, or compiled output from the Closure Compiler.
Paths to files can be expressed as individual arguments to the tool (intended
for use with find and xargs). As a convenience, --root can be used to specify
all JS files below a directory.
usage: %prog [options] [file1.js file2.js ...]
"""
__author__ = 'nnaze@google.com (Nathan Naze)'
import logging
import optparse
import os
import sys
import depstree
import jscompiler
import source
import treescan
def _GetOptionsParser():
"""Get the options parser."""
parser = optparse.OptionParser(__doc__)
parser.add_option('-i',
'--input',
dest='inputs',
action='append',
default=[],
help='One or more input files to calculate dependencies '
'for. The namespaces in this file will be combined with '
'those given with the -n flag to form the set of '
'namespaces to find dependencies for.')
parser.add_option('-n',
'--namespace',
dest='namespaces',
action='append',
default=[],
help='One or more namespaces to calculate dependencies '
'for. These namespaces will be combined with those given '
'with the -i flag to form the set of namespaces to find '
'dependencies for. A Closure namespace is a '
'dot-delimited path expression declared with a call to '
'goog.provide() (e.g. "goog.array" or "foo.bar").')
parser.add_option('--root',
dest='roots',
action='append',
default=[],
help='The paths that should be traversed to build the '
'dependencies.')
parser.add_option('-o',
'--output_mode',
dest='output_mode',
type='choice',
action='store',
choices=['list', 'script', 'compiled'],
default='list',
help='The type of output to generate from this script. '
'Options are "list" for a list of filenames, "script" '
'for a single script containing the contents of all the '
'files, or "compiled" to produce compiled output with '
'the Closure Compiler. Default is "list".')
parser.add_option('-c',
'--compiler_jar',
dest='compiler_jar',
action='store',
help='The location of the Closure compiler .jar file.')
parser.add_option('-f',
'--compiler_flags',
dest='compiler_flags',
default=[],
action='append',
help='Additional flags to pass to the Closure compiler. '
'To pass multiple flags, --compiler_flags has to be '
'specified multiple times.')
parser.add_option('--output_file',
dest='output_file',
action='store',
help=('If specified, write output to this path instead of '
'writing to standard output.'))
return parser
def _GetInputByPath(path, sources):
"""Get the source identified by a path.
Args:
path: str, A path to a file that identifies a source.
sources: An iterable collection of source objects.
Returns:
The source from sources identified by path, if found. Converts to
absolute paths for comparison.
"""
for js_source in sources:
# Convert both to absolute paths for comparison.
if os.path.abspath(path) == os.path.abspath(js_source.GetPath()):
return js_source
def _GetClosureBaseFile(sources):
"""Given a set of sources, returns the one base.js file.
Note that if zero or two or more base.js files are found, an error message
will be written and the program will be exited.
Args:
sources: An iterable of _PathSource objects.
Returns:
The _PathSource representing the base Closure file.
"""
base_files = [
js_source for js_source in sources if _IsClosureBaseFile(js_source)]
if not base_files:
logging.error('No Closure base.js file found.')
sys.exit(1)
if len(base_files) > 1:
logging.error('More than one Closure base.js files found at these paths:')
for base_file in base_files:
logging.error(base_file.GetPath())
sys.exit(1)
return base_files[0]
def _IsClosureBaseFile(js_source):
"""Returns true if the given _PathSource is the Closure base.js source."""
return (os.path.basename(js_source.GetPath()) == 'base.js' and
js_source.provides == set(['goog']))
class _PathSource(source.Source):
"""Source file subclass that remembers its file path."""
def __init__(self, path):
"""Initialize a source.
Args:
path: str, Path to a JavaScript file. The source string will be read
from this file.
"""
super(_PathSource, self).__init__(source.GetFileContents(path))
self._path = path
def GetPath(self):
"""Returns the path."""
return self._path
def main():
logging.basicConfig(format=(sys.argv[0] + ': %(message)s'),
level=logging.INFO)
options, args = _GetOptionsParser().parse_args()
# Make our output pipe.
if options.output_file:
out = open(options.output_file, 'w')
else:
out = sys.stdout
sources = set()
logging.info('Scanning paths...')
for path in options.roots:
for js_path in treescan.ScanTreeForJsFiles(path):
sources.add(_PathSource(js_path))
# Add scripts specified on the command line.
for js_path in args:
sources.add(_PathSource(js_path))
logging.info('%s sources scanned.', len(sources))
# Though deps output doesn't need to query the tree, we still build it
# to validate dependencies.
logging.info('Building dependency tree..')
tree = depstree.DepsTree(sources)
input_namespaces = set()
inputs = options.inputs or []
for input_path in inputs:
js_input = _GetInputByPath(input_path, sources)
if not js_input:
logging.error('No source matched input %s', input_path)
sys.exit(1)
input_namespaces.update(js_input.provides)
input_namespaces.update(options.namespaces)
if not input_namespaces:
logging.error('No namespaces found. At least one namespace must be '
'specified with the --namespace or --input flags.')
sys.exit(2)
# The Closure Library base file must go first.
base = _GetClosureBaseFile(sources)
deps = [base] + tree.GetDependencies(input_namespaces)
output_mode = options.output_mode
if output_mode == 'list':
out.writelines([js_source.GetPath() + '\n' for js_source in deps])
elif output_mode == 'script':
out.writelines([js_source.GetSource() for js_source in deps])
elif output_mode == 'compiled':
# Make sure a .jar is specified.
if not options.compiler_jar:
logging.error('--compiler_jar flag must be specified if --output is '
'"compiled"')
sys.exit(2)
compiled_source = jscompiler.Compile(
options.compiler_jar,
[js_source.GetPath() for js_source in deps],
options.compiler_flags)
if compiled_source is None:
logging.error('JavaScript compilation failed.')
sys.exit(1)
else:
logging.info('JavaScript compilation succeeded.')
out.write(compiled_source)
else:
logging.error('Invalid value for --output flag.')
sys.exit(2)
if __name__ == '__main__':
main()
| apache-2.0 |
legalsylvain/OpenUpgrade | addons/account_bank_statement_extensions/wizard/confirm_statement_line.py | 381 | 1490 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class confirm_statement_line(osv.osv_memory):
_name = 'confirm.statement.line'
_description = 'Confirm selected statement lines'
def confirm_lines(self, cr, uid, ids, context):
line_ids = context['active_ids']
line_obj = self.pool.get('account.bank.statement.line')
line_obj.write(cr, uid, line_ids, {'state': 'confirm'}, context=context)
return {}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
phektus/Django-Google-AppEngine-OpenId-Auth | django/views/generic/date_based.py | 246 | 14025 | import datetime
import time
from django.template import loader, RequestContext
from django.core.exceptions import ObjectDoesNotExist
from django.core.xheaders import populate_xheaders
from django.db.models.fields import DateTimeField
from django.http import Http404, HttpResponse
import warnings
warnings.warn(
'Function-based generic views have been deprecated; use class-based views instead.',
PendingDeprecationWarning
)
def archive_index(request, queryset, date_field, num_latest=15,
template_name=None, template_loader=loader,
extra_context=None, allow_empty=True, context_processors=None,
mimetype=None, allow_future=False, template_object_name='latest'):
"""
Generic top-level archive of date-based objects.
Templates: ``<app_label>/<model_name>_archive.html``
Context:
date_list
List of years
latest
Latest N (defaults to 15) objects by date
"""
if extra_context is None: extra_context = {}
model = queryset.model
if not allow_future:
queryset = queryset.filter(**{'%s__lte' % date_field: datetime.datetime.now()})
date_list = queryset.dates(date_field, 'year')[::-1]
if not date_list and not allow_empty:
raise Http404("No %s available" % model._meta.verbose_name)
if date_list and num_latest:
latest = queryset.order_by('-'+date_field)[:num_latest]
else:
latest = None
if not template_name:
template_name = "%s/%s_archive.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'date_list' : date_list,
template_object_name : latest,
}, context_processors)
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
return HttpResponse(t.render(c), mimetype=mimetype)
def archive_year(request, year, queryset, date_field, template_name=None,
template_loader=loader, extra_context=None, allow_empty=False,
context_processors=None, template_object_name='object', mimetype=None,
make_object_list=False, allow_future=False):
"""
Generic yearly archive view.
Templates: ``<app_label>/<model_name>_archive_year.html``
Context:
date_list
List of months in this year with objects
year
This year
object_list
List of objects published in the given month
(Only available if make_object_list argument is True)
"""
if extra_context is None: extra_context = {}
model = queryset.model
now = datetime.datetime.now()
lookup_kwargs = {'%s__year' % date_field: year}
# Only bother to check current date if the year isn't in the past and future objects aren't requested.
if int(year) >= now.year and not allow_future:
lookup_kwargs['%s__lte' % date_field] = now
date_list = queryset.filter(**lookup_kwargs).dates(date_field, 'month')
if not date_list and not allow_empty:
raise Http404
if make_object_list:
object_list = queryset.filter(**lookup_kwargs)
else:
object_list = []
if not template_name:
template_name = "%s/%s_archive_year.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'date_list': date_list,
'year': year,
'%s_list' % template_object_name: object_list,
}, context_processors)
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
return HttpResponse(t.render(c), mimetype=mimetype)
def archive_month(request, year, month, queryset, date_field,
month_format='%b', template_name=None, template_loader=loader,
extra_context=None, allow_empty=False, context_processors=None,
template_object_name='object', mimetype=None, allow_future=False):
"""
Generic monthly archive view.
Templates: ``<app_label>/<model_name>_archive_month.html``
Context:
date_list:
List of days in this month with objects
month:
(date) this month
next_month:
(date) the first day of the next month, or None if the next month is in the future
previous_month:
(date) the first day of the previous month
object_list:
list of objects published in the given month
"""
if extra_context is None: extra_context = {}
try:
tt = time.strptime("%s-%s" % (year, month), '%s-%s' % ('%Y', month_format))
date = datetime.date(*tt[:3])
except ValueError:
raise Http404
model = queryset.model
now = datetime.datetime.now()
# Calculate first and last day of month, for use in a date-range lookup.
first_day = date.replace(day=1)
if first_day.month == 12:
last_day = first_day.replace(year=first_day.year + 1, month=1)
else:
last_day = first_day.replace(month=first_day.month + 1)
lookup_kwargs = {
'%s__gte' % date_field: first_day,
'%s__lt' % date_field: last_day,
}
# Only bother to check current date if the month isn't in the past and future objects are requested.
if last_day >= now.date() and not allow_future:
lookup_kwargs['%s__lte' % date_field] = now
object_list = queryset.filter(**lookup_kwargs)
date_list = object_list.dates(date_field, 'day')
if not object_list and not allow_empty:
raise Http404
# Calculate the next month, if applicable.
if allow_future:
next_month = last_day
elif last_day <= datetime.date.today():
next_month = last_day
else:
next_month = None
# Calculate the previous month
if first_day.month == 1:
previous_month = first_day.replace(year=first_day.year-1,month=12)
else:
previous_month = first_day.replace(month=first_day.month-1)
if not template_name:
template_name = "%s/%s_archive_month.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'date_list': date_list,
'%s_list' % template_object_name: object_list,
'month': date,
'next_month': next_month,
'previous_month': previous_month,
}, context_processors)
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
return HttpResponse(t.render(c), mimetype=mimetype)
def archive_week(request, year, week, queryset, date_field,
template_name=None, template_loader=loader,
extra_context=None, allow_empty=True, context_processors=None,
template_object_name='object', mimetype=None, allow_future=False):
"""
Generic weekly archive view.
Templates: ``<app_label>/<model_name>_archive_week.html``
Context:
week:
(date) this week
object_list:
list of objects published in the given week
"""
if extra_context is None: extra_context = {}
try:
tt = time.strptime(year+'-0-'+week, '%Y-%w-%U')
date = datetime.date(*tt[:3])
except ValueError:
raise Http404
model = queryset.model
now = datetime.datetime.now()
# Calculate first and last day of week, for use in a date-range lookup.
first_day = date
last_day = date + datetime.timedelta(days=7)
lookup_kwargs = {
'%s__gte' % date_field: first_day,
'%s__lt' % date_field: last_day,
}
# Only bother to check current date if the week isn't in the past and future objects aren't requested.
if last_day >= now.date() and not allow_future:
lookup_kwargs['%s__lte' % date_field] = now
object_list = queryset.filter(**lookup_kwargs)
if not object_list and not allow_empty:
raise Http404
if not template_name:
template_name = "%s/%s_archive_week.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'%s_list' % template_object_name: object_list,
'week': date,
})
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
return HttpResponse(t.render(c), mimetype=mimetype)
def archive_day(request, year, month, day, queryset, date_field,
month_format='%b', day_format='%d', template_name=None,
template_loader=loader, extra_context=None, allow_empty=False,
context_processors=None, template_object_name='object',
mimetype=None, allow_future=False):
"""
Generic daily archive view.
Templates: ``<app_label>/<model_name>_archive_day.html``
Context:
object_list:
list of objects published that day
day:
(datetime) the day
previous_day
(datetime) the previous day
next_day
(datetime) the next day, or None if the current day is today
"""
if extra_context is None: extra_context = {}
try:
tt = time.strptime('%s-%s-%s' % (year, month, day),
'%s-%s-%s' % ('%Y', month_format, day_format))
date = datetime.date(*tt[:3])
except ValueError:
raise Http404
model = queryset.model
now = datetime.datetime.now()
if isinstance(model._meta.get_field(date_field), DateTimeField):
lookup_kwargs = {'%s__range' % date_field: (datetime.datetime.combine(date, datetime.time.min), datetime.datetime.combine(date, datetime.time.max))}
else:
lookup_kwargs = {date_field: date}
# Only bother to check current date if the date isn't in the past and future objects aren't requested.
if date >= now.date() and not allow_future:
lookup_kwargs['%s__lte' % date_field] = now
object_list = queryset.filter(**lookup_kwargs)
if not allow_empty and not object_list:
raise Http404
# Calculate the next day, if applicable.
if allow_future:
next_day = date + datetime.timedelta(days=1)
elif date < datetime.date.today():
next_day = date + datetime.timedelta(days=1)
else:
next_day = None
if not template_name:
template_name = "%s/%s_archive_day.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'%s_list' % template_object_name: object_list,
'day': date,
'previous_day': date - datetime.timedelta(days=1),
'next_day': next_day,
}, context_processors)
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
return HttpResponse(t.render(c), mimetype=mimetype)
def archive_today(request, **kwargs):
"""
Generic daily archive view for today. Same as archive_day view.
"""
today = datetime.date.today()
kwargs.update({
'year': str(today.year),
'month': today.strftime('%b').lower(),
'day': str(today.day),
})
return archive_day(request, **kwargs)
def object_detail(request, year, month, day, queryset, date_field,
month_format='%b', day_format='%d', object_id=None, slug=None,
slug_field='slug', template_name=None, template_name_field=None,
template_loader=loader, extra_context=None, context_processors=None,
template_object_name='object', mimetype=None, allow_future=False):
"""
Generic detail view from year/month/day/slug or year/month/day/id structure.
Templates: ``<app_label>/<model_name>_detail.html``
Context:
object:
the object to be detailed
"""
if extra_context is None: extra_context = {}
try:
tt = time.strptime('%s-%s-%s' % (year, month, day),
'%s-%s-%s' % ('%Y', month_format, day_format))
date = datetime.date(*tt[:3])
except ValueError:
raise Http404
model = queryset.model
now = datetime.datetime.now()
if isinstance(model._meta.get_field(date_field), DateTimeField):
lookup_kwargs = {'%s__range' % date_field: (datetime.datetime.combine(date, datetime.time.min), datetime.datetime.combine(date, datetime.time.max))}
else:
lookup_kwargs = {date_field: date}
# Only bother to check current date if the date isn't in the past and future objects aren't requested.
if date >= now.date() and not allow_future:
lookup_kwargs['%s__lte' % date_field] = now
if object_id:
lookup_kwargs['%s__exact' % model._meta.pk.name] = object_id
elif slug and slug_field:
lookup_kwargs['%s__exact' % slug_field] = slug
else:
raise AttributeError("Generic detail view must be called with either an object_id or a slug/slugfield")
try:
obj = queryset.get(**lookup_kwargs)
except ObjectDoesNotExist:
raise Http404("No %s found for" % model._meta.verbose_name)
if not template_name:
template_name = "%s/%s_detail.html" % (model._meta.app_label, model._meta.object_name.lower())
if template_name_field:
template_name_list = [getattr(obj, template_name_field), template_name]
t = template_loader.select_template(template_name_list)
else:
t = template_loader.get_template(template_name)
c = RequestContext(request, {
template_object_name: obj,
}, context_processors)
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
response = HttpResponse(t.render(c), mimetype=mimetype)
populate_xheaders(request, response, model, getattr(obj, obj._meta.pk.name))
return response
| bsd-3-clause |
biolab/orange | Orange/utils/__init__.py | 6 | 20831 | """
.. index:: utils
Orange.utils contains developer utilities.
------------------
Reporting progress
------------------
.. autoclass:: Orange.utils.ConsoleProgressBar
:members:
-----------------------------
Deprecation utility functions
-----------------------------
.. autofunction:: Orange.utils.deprecation_warning
.. autofunction:: Orange.utils.deprecated_members
.. autofunction:: Orange.utils.deprecated_keywords
.. autofunction:: Orange.utils.deprecated_attribute
.. autofunction:: Orange.utils.deprecated_function_name
----------------
Submodules
----------------
.. automodule:: Orange.utils.environ
.. automodule:: Orange.utils.counters
:members:
.. automodule:: Orange.utils.render
:members:
.. automodule:: Orange.utils.addons
.. automodule:: Orange.utils.selection
.. automodule:: Orange.utils.serverfiles
"""
import sys
import os
import types
import random
import time
import urllib2
import posixpath
import warnings
import StringIO
from functools import wraps
from contextlib import contextmanager
from . import environ
__all__ = ["deprecated_members", "deprecated_keywords",
"deprecated_attribute", "deprecation_warning",
"deprecated_function_name",
"counters", "render", "serverfiles"]
def deprecation_warning(old, new, stacklevel=-2):
""" Raise a deprecation warning of an obsolete attribute access.
:param old: Old attribute name (used in warning message).
:param new: New attribute name (used in warning message).
"""
warnings.warn("'%s' is deprecated. Use '%s' instead!" % (old, new), DeprecationWarning, stacklevel=stacklevel)
# We need to get the instancemethod type
class _Foo():
def bar(self):
pass
instancemethod = type(_Foo.bar)
del _Foo
function = type(lambda: None)
class universal_set(set):
""" A universal set, pretends it contains everything.
"""
def __contains__(self, value):
return True
def deprecated_members(name_map, wrap_methods="all", in_place=True):
""" Decorate a class with properties for accessing attributes, and methods
with deprecated names. In addition methods from the `wrap_methods` list
will be wrapped to receive mapped keyword arguments.
:param name_map: A dictionary mapping old into new names.
:type name_map: dict
:param wrap_methods: A list of method names to wrap. Wrapped methods will
be called with mapped keyword arguments (by default all methods will
be wrapped).
:type wrap_methods: list
:param in_place: If True the class will be modified in place, otherwise
it will be subclassed (default True).
:type in_place: bool
Example ::
>>> class A(object):
... def __init__(self, foo_bar="bar"):
... self.set_foo_bar(foo_bar)
...
... def set_foo_bar(self, foo_bar="bar"):
... self.foo_bar = foo_bar
...
... A = deprecated_members(
... {"fooBar": "foo_bar",
... "setFooBar":"set_foo_bar"},
... wrap_methods=["set_foo_bar", "__init__"])(A)
...
...
>>> a = A(fooBar="foo")
__main__:1: DeprecationWarning: 'fooBar' is deprecated. Use 'foo_bar' instead!
>>> print a.fooBar, a.foo_bar
foo foo
>>> a.setFooBar("FooBar!")
__main__:1: DeprecationWarning: 'setFooBar' is deprecated. Use 'set_foo_bar' instead!
.. note:: This decorator does nothing if \
:obj:`Orange.utils.environ.orange_no_deprecated_members` environment \
variable is set to `True`.
"""
if environ.orange_no_deprecated_members:
return lambda cls: cls
def is_wrapped(method):
""" Is member method already wrapped.
"""
if getattr(method, "_deprecate_members_wrapped", False):
return True
elif hasattr(method, "im_func"):
im_func = method.im_func
return getattr(im_func, "_deprecate_members_wrapped", False)
else:
return False
if wrap_methods == "all":
wrap_methods = universal_set()
elif not wrap_methods:
wrap_methods = set()
def wrapper(cls):
cls_names = {}
# Create properties for accessing deprecated members
for old_name, new_name in name_map.items():
cls_names[old_name] = deprecated_attribute(old_name, new_name)
# wrap member methods to map keyword arguments
for key, value in cls.__dict__.items():
if isinstance(value, (instancemethod, function)) \
and not is_wrapped(value) and key in wrap_methods:
wrapped = deprecated_keywords(name_map)(value)
wrapped._deprecate_members_wrapped = True # A flag indicating this function already maps keywords
cls_names[key] = wrapped
if in_place:
for key, val in cls_names.items():
setattr(cls, key, val)
return cls
else:
return type(cls.__name__, (cls,), cls_names)
return wrapper
def deprecated_keywords(name_map):
""" Deprecates the keyword arguments of the function.
Example ::
>>> @deprecated_keywords({"myArg": "my_arg"})
... def my_func(my_arg=None):
... print my_arg
...
...
>>> my_func(myArg="Arg")
__main__:1: DeprecationWarning: 'myArg' is deprecated. Use 'my_arg' instead!
Arg
.. note:: This decorator does nothing if \
:obj:`Orange.utils.environ.orange_no_deprecated_members` environment \
variable is set to `True`.
"""
if environ.orange_no_deprecated_members:
return lambda func: func
for name in name_map.values():
if name in name_map:
raise ValueError("Deprecation keys and values overlap; this could"
" cause trouble!")
def decorator(func):
@wraps(func)
def wrap_call(*args, **kwargs):
kwargs = dict(kwargs)
for name in name_map:
if name in kwargs:
deprecation_warning(name, name_map[name], stacklevel=3)
kwargs[name_map[name]] = kwargs[name]
del kwargs[name]
return func(*args, **kwargs)
return wrap_call
return decorator
def deprecated_attribute(old_name, new_name):
""" Return a property object that accesses an attribute named `new_name`
and raises a deprecation warning when doing so.
..
>>> sys.stderr = sys.stdout
Example ::
>>> class A(object):
... def __init__(self):
... self.my_attr = "123"
... myAttr = deprecated_attribute("myAttr", "my_attr")
...
...
>>> a = A()
>>> print a.myAttr
...:1: DeprecationWarning: 'myAttr' is deprecated. Use 'my_attr' instead!
123
.. note:: This decorator does nothing and returns None if \
:obj:`Orange.utils.environ.orange_no_deprecated_members` environment \
variable is set to `True`.
"""
if environ.orange_no_deprecated_members:
return None
def fget(self):
deprecation_warning(old_name, new_name, stacklevel=3)
return getattr(self, new_name)
def fset(self, value):
deprecation_warning(old_name, new_name, stacklevel=3)
setattr(self, new_name, value)
def fdel(self):
deprecation_warning(old_name, new_name, stacklevel=3)
delattr(self, new_name)
prop = property(fget, fset, fdel,
doc="A deprecated member '%s'. Use '%s' instead." % (old_name, new_name))
return prop
class class_property(object):
def __init__(self, fget=None, fset=None, fdel=None, doc="class property"):
self.fget = fget
self.fset = fset
self.fdel = fdel
self.__doc__ = doc
def __get__(self, instance, owner):
if instance is None:
return self.fget(owner)
else:
return self.fget(instance)
def deprecated_class_attribute(old_name, new_name):
""" Return a property object that accesses an class attribute
named `new_name` and raises a deprecation warning when doing so.
"""
if environ.orange_no_deprecated_members:
return None
def fget(self):
deprecation_warning(old_name, new_name, stacklevel=3)
return getattr(self, new_name)
prop = class_property(fget,
doc="A deprecated class member '%s'. Use '%s' instead." % (old_name, new_name))
return prop
def deprecated_function_name(func):
""" Return a wrapped function that raises an deprecation warning when
called. This should be used for deprecation of module level function names.
Example ::
>>> def func_a(arg):
... print "This is func_a (used to be named funcA) called with", arg
...
...
>>> funcA = deprecated_function_name(func_a)
>>> funcA(None)
.. note:: This decorator does nothing and if \
:obj:`Orange.utils.environ.orange_no_deprecated_members` environment \
variable is set to `True`.
"""
if environ.orange_no_deprecated_members:
return func
@wraps(func)
def wrapped(*args, **kwargs):
warnings.warn("Deprecated function name. Use %r instead!" % func.__name__,
DeprecationWarning, stacklevel=2)
return func(*args, **kwargs)
return wrapped
class ConsoleProgressBar(object):
""" A class to for printing progress bar reports in the console.
Example ::
>>> import sys, time
>>> progress = ConsoleProgressBar("Example", output=sys.stdout)
>>> for i in range(100):
... progress.advance()
... # Or progress.set_state(i)
... time.sleep(0.01)
...
...
Example ===================================>100%
"""
def __init__(self, title="", charwidth=40, step=1, output=None):
""" Initialize the progress bar.
:param title: The title for the progress bar.
:type title: str
:param charwidth: The maximum progress bar width in characters.
.. todo:: Get the console width from the ``output`` if the
information can be retrieved.
:type charwidth: int
:param step: A default step used if ``advance`` is called without
any arguments
:type step: int
:param output: The output file. If None (default) then ``sys.stderr``
is used.
:type output: An file like object to print the progress report to.
"""
self.title = title + " "
self.charwidth = charwidth
self.step = step
self.currstring = ""
self.state = 0
if output is None:
output = sys.stderr
self.output = output
def clear(self, i=-1):
""" Clear the current progress line indicator string.
"""
try:
if hasattr(self.output, "isatty") and self.output.isatty():
self.output.write("\b" * (i if i != -1 else len(self.currstring)))
else:
self.output.seek(-i if i != -1 else -len(self.currstring), 2)
except Exception: ## If for some reason we failed
self.output.write("\n")
def getstring(self):
""" Return the progress indicator string.
"""
progchar = int(round(float(self.state) * (self.charwidth - 5) / 100.0))
return self.title + "=" * (progchar) + ">" + " " * (self.charwidth\
- 5 - progchar) + "%3i" % int(round(self.state)) + "%"
def printline(self, string):
""" Print the ``string`` to the output file.
"""
try:
self.clear()
self.output.write(string)
self.output.flush()
except Exception:
pass
self.currstring = string
def __call__(self, newstate=None):
""" Set the ``newstate`` as the current state of the progress bar.
``newstate`` must be in the interval [0, 100].
.. note:: ``set_state`` is the prefered way to set a new steate.
:param newstate: The new state of the progress bar.
:type newstate: float
"""
if newstate is None:
self.advance()
else:
self.set_state(newstate)
def set_state(self, newstate):
""" Set the ``newstate`` as the current state of the progress bar.
``newstate`` must be in the interval [0, 100].
:param newstate: The new state of the progress bar.
:type newstate: float
"""
if int(newstate) != int(self.state):
self.state = newstate
self.printline(self.getstring())
else:
self.state = newstate
def advance(self, step=None):
""" Advance the current state by ``step``. If ``step`` is None use
the default step as set at class initialization.
"""
if step is None:
step = self.step
newstate = self.state + step
self.set_state(newstate)
def finish(self):
""" Finish the progress bar (i.e. set the state to 100 and
print the final newline to the ``output`` file).
"""
self.__call__(100)
self.output.write("\n")
def progress_bar_milestones(count, iterations=100):
return set([int(i*count/float(iterations)) for i in range(iterations)])
progressBarMilestones = deprecated_function_name(progress_bar_milestones)
def getobjectname(x, default=""):
if type(x)==types.StringType:
return x
for i in ["name", "shortDescription", "description", "func_doc", "func_name"]:
if getattr(x, i, ""):
return getattr(x, i)
if hasattr(x, "__class__"):
r = repr(x.__class__)
if r[1:5]=="type":
return str(x.__class__)[7:-2]
elif r[1:6]=="class":
return str(x.__class__)[8:-2]
return default
def demangle_examples(x):
if type(x)==types.TupleType:
return x
else:
return x, 0
def frange(*argw):
""" Like builtin `range` but works with floats
"""
start, stop, step = 0.0, 1.0, 0.1
if len(argw)==1:
start=step=argw[0]
elif len(argw)==2:
stop, step = argw
elif len(argw)==3:
start, stop, step = argw
elif len(argw)>3:
raise AttributeError, "1-3 arguments expected"
stop+=1e-10
i=0
res=[]
while 1:
f=start+i*step
if f>stop:
break
res.append(f)
i+=1
return res
verbose = 0
def print_verbose(text, *verb):
if len(verb) and verb[0] or verbose:
print text
def lru_cache(maxsize=100):
""" A least recently used cache function decorator.
(Similar to the functools.lru_cache in python 3.2)
"""
def decorating_function(func):
cache = {}
@wraps(func)
def wrapped(*args, **kwargs):
key = args + tuple(sorted(kwargs.items()))
if key not in cache:
res = func(*args, **kwargs)
cache[key] = (time.time(), res)
if len(cache) > maxsize:
key, (_, _) = min(cache.iteritems(), key=lambda item: item[1][0])
del cache[key]
else:
_, res = cache[key]
cache[key] = (time.time(), res) # update the time
return res
def clear():
cache.clear()
wrapped.clear = clear
wrapped._cache = cache
return wrapped
return decorating_function
@contextmanager
def member_set(obj, name, val):
""" A context manager that sets member ``name`` on ``obj`` to ``val``
and restores the previous value on exit.
"""
old_val = getattr(obj, name, val)
setattr(obj, name, val)
yield
setattr(obj, name, old_val)
class recursion_limit(object):
""" A context manager that sets a new recursion limit.
"""
def __init__(self, limit=1000):
self.limit = limit
def __enter__(self):
self.old_limit = sys.getrecursionlimit()
sys.setrecursionlimit(self.limit)
def __exit__(self, exc_type, exc_val, exc_tb):
sys.setrecursionlimit(self.old_limit)
"""
Some utility functions common to Orange classes.
"""
def _orange_learner__new__(base):
"""Return an 'schizophrenic' __new__ class method following
`Orange.core.Learner.__new__` calling convention.
:param base: base class.
:type base: type
"""
import Orange
@wraps(base.__new__)
def _orange_learner__new__wrapped(cls, data=None, weight=0, **kwargs):
self = base.__new__(cls, **kwargs)
if data is not None:
self.__init__(**kwargs)
return self.__call__(data, weight)
else:
return self
return _orange_learner__new__wrapped
def _orange__new__(base=None):
"""Return an orange 'schizophrenic' __new__ class method.
:param base: base orange class (default `Orange.core.Learner`)
:type base: type
Example::
class NewOrangeLearner(Orange.core.Learner):
__new__ = _orange__new(Orange.core.Learner)
"""
import Orange
if base is None:
base = Orange.core.Learner
if issubclass(base, Orange.core.Learner):
return _orange_learner__new__(base)
else:
@wraps(base.__new__)
def _orange__new_wrapped(cls, data=None, **kwargs):
if base == object:
self = base.__new__(cls)
else:
self = base.__new__(cls, **kwargs)
if data:
self.__init__(**kwargs)
return self.__call__(data)
else:
return self
return _orange__new_wrapped
def _orange__reduce__(self):
""" A default __reduce__ method for orange types. Assumes the object
can be reconstructed with the call `constructor(__dict__)` where __dict__
if the stored (pickled) __dict__ attribute.
Example::
class NewOrangeType(Orange.core.Learner):
__reduce__ = _orange__reduce()
"""
return type(self), (), dict(self.__dict__)
demangleExamples = deprecated_function_name(demangle_examples)
printVerbose = deprecated_function_name(print_verbose)
@contextmanager
def finishing(obj):
""" Calls obj.finish() on context exit.
"""
yield obj
obj.finish()
def guess_size(fileobj):
try:
if isinstance(fileobj, file):
return os.fstat(fileobj.fileno()).st_size
elif isinstance(fileobj, StringIO.StringIO):
pos = fileobj.tell()
fileobj.seek(0, 2)
length = fileobj.tell() - pos
fileobj.seek(pos, 0)
return length
elif isinstance(fileobj, urllib.addinfourl):
length = fileobj.headers.get("content-length", None)
return length
except Exception, ex:
pass
def copyfileobj(src, dst, buffer=2**10, content_len=None, progress=None):
"""
shutil.copyfileobj with progress reporting.
"""
count = 0
if content_len is None:
content_len = guess_size(src) or sys.maxint
while True:
data = src.read(buffer)
dst.write(data)
count += len(data)
if progress:
progress(100.0 * count / content_len)
if not data:
break
def wget(url, directory=".", dst_obj=None, progress=None):
stream = urllib2.urlopen(url)
length = stream.headers.get("content-length", None)
if length is None:
length = sys.maxint
else:
length = int(length)
basename = posixpath.basename(url)
if dst_obj is None:
dst_obj = open(os.path.join(directory, basename), "wb")
if progress == True:
progress = ConsoleProgressBar("Downloading %r." % basename)
with finishing(progress):
copyfileobj(stream, dst_obj, buffer=2**10, content_len=length,
progress=progress)
else:
copyfileobj(stream, dst_obj, buffer=2**10, content_len=length,
progress=progress)
from . import selection
from . import render
| gpl-3.0 |
niknow/scipy | scipy/_lib/_threadsafety.py | 71 | 1530 | from __future__ import division, print_function, absolute_import
import threading
import scipy._lib.decorator
__all__ = ['ReentrancyError', 'ReentrancyLock', 'non_reentrant']
class ReentrancyError(RuntimeError):
pass
class ReentrancyLock(object):
"""
Threading lock that raises an exception for reentrant calls.
Calls from different threads are serialized, and nested calls from the
same thread result to an error.
The object can be used as a context manager, or to decorate functions
via the decorate() method.
"""
def __init__(self, err_msg):
self._rlock = threading.RLock()
self._entered = False
self._err_msg = err_msg
def __enter__(self):
self._rlock.acquire()
if self._entered:
self._rlock.release()
raise ReentrancyError(self._err_msg)
self._entered = True
def __exit__(self, type, value, traceback):
self._entered = False
self._rlock.release()
def decorate(self, func):
def caller(func, *a, **kw):
with self:
return func(*a, **kw)
return scipy._lib.decorator.decorate(func, caller)
def non_reentrant(err_msg=None):
"""
Decorate a function with a threading lock and prevent reentrant calls.
"""
def decorator(func):
msg = err_msg
if msg is None:
msg = "%s is not re-entrant" % func.__name__
lock = ReentrancyLock(msg)
return lock.decorate(func)
return decorator
| bsd-3-clause |
cesargtz/YecoraOdoo | addons/crm/crm_segmentation.py | 333 | 9067 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv,orm
class crm_segmentation(osv.osv):
'''
A segmentation is a tool to automatically assign categories on partners.
These assignations are based on criterions.
'''
_name = "crm.segmentation"
_description = "Partner Segmentation"
_columns = {
'name': fields.char('Name', required=True, help='The name of the segmentation.'),
'description': fields.text('Description'),
'categ_id': fields.many2one('res.partner.category', 'Partner Category',\
required=True, help='The partner category that will be \
added to partners that match the segmentation criterions after computation.'),
'exclusif': fields.boolean('Exclusive', help='Check if the category is limited to partners that match the segmentation criterions.\
\nIf checked, remove the category from partners that doesn\'t match segmentation criterions'),
'state': fields.selection([('not running','Not Running'),\
('running','Running')], 'Execution Status', readonly=True),
'partner_id': fields.integer('Max Partner ID processed'),
'segmentation_line': fields.one2many('crm.segmentation.line', \
'segmentation_id', 'Criteria', required=True, copy=True),
'sales_purchase_active': fields.boolean('Use The Sales Purchase Rules', help='Check if you want to use this tab as part of the segmentation rule. If not checked, the criteria beneath will be ignored')
}
_defaults = {
'partner_id': lambda *a: 0,
'state': lambda *a: 'not running',
}
def process_continue(self, cr, uid, ids, start=False):
""" @param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Process continue’s IDs"""
partner_obj = self.pool.get('res.partner')
categs = self.read(cr, uid, ids, ['categ_id', 'exclusif', 'sales_purchase_active'])
for categ in categs:
if start:
if categ['exclusif']:
cr.execute('delete from res_partner_res_partner_category_rel \
where category_id=%s', (categ['categ_id'][0],))
partner_obj.invalidate_cache(cr, uid, ['category_id'])
id = categ['id']
cr.execute('select id from res_partner order by id ')
partners = [x[0] for x in cr.fetchall()]
if categ['sales_purchase_active']:
to_remove_list=[]
cr.execute('select id from crm_segmentation_line where segmentation_id=%s', (id,))
line_ids = [x[0] for x in cr.fetchall()]
for pid in partners:
if (not self.pool.get('crm.segmentation.line').test(cr, uid, line_ids, pid)):
to_remove_list.append(pid)
for pid in to_remove_list:
partners.remove(pid)
for partner in partner_obj.browse(cr, uid, partners):
category_ids = [categ_id.id for categ_id in partner.category_id]
if categ['categ_id'][0] not in category_ids:
cr.execute('insert into res_partner_res_partner_category_rel (category_id,partner_id) \
values (%s,%s)', (categ['categ_id'][0], partner.id))
partner_obj.invalidate_cache(cr, uid, ['category_id'], [partner.id])
self.write(cr, uid, [id], {'state':'not running', 'partner_id':0})
return True
def process_stop(self, cr, uid, ids, *args):
""" @param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Process stop’s IDs"""
return self.write(cr, uid, ids, {'state':'not running', 'partner_id':0})
def process_start(self, cr, uid, ids, *args):
""" @param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Process start’s IDs """
self.write(cr, uid, ids, {'state':'running', 'partner_id':0})
return self.process_continue(cr, uid, ids, start=True)
class crm_segmentation_line(osv.osv):
""" Segmentation line """
_name = "crm.segmentation.line"
_description = "Segmentation line"
_columns = {
'name': fields.char('Rule Name', required=True),
'segmentation_id': fields.many2one('crm.segmentation', 'Segmentation'),
'expr_name': fields.selection([('sale','Sale Amount'),
('purchase','Purchase Amount')], 'Control Variable', required=True),
'expr_operator': fields.selection([('<','<'),('=','='),('>','>')], 'Operator', required=True),
'expr_value': fields.float('Value', required=True),
'operator': fields.selection([('and','Mandatory Expression'),\
('or','Optional Expression')],'Mandatory / Optional', required=True),
}
_defaults = {
'expr_name': lambda *a: 'sale',
'expr_operator': lambda *a: '>',
'operator': lambda *a: 'and'
}
def test(self, cr, uid, ids, partner_id):
""" @param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Test’s IDs """
expression = {'<': lambda x,y: x<y, '=':lambda x,y:x==y, '>':lambda x,y:x>y}
ok = False
lst = self.read(cr, uid, ids)
for l in lst:
cr.execute('select * from ir_module_module where name=%s and state=%s', ('account','installed'))
if cr.fetchone():
if l['expr_name']=='sale':
cr.execute('SELECT SUM(l.price_unit * l.quantity) ' \
'FROM account_invoice_line l, account_invoice i ' \
'WHERE (l.invoice_id = i.id) ' \
'AND i.partner_id = %s '\
'AND i.type = \'out_invoice\'',
(partner_id,))
value = cr.fetchone()[0] or 0.0
cr.execute('SELECT SUM(l.price_unit * l.quantity) ' \
'FROM account_invoice_line l, account_invoice i ' \
'WHERE (l.invoice_id = i.id) ' \
'AND i.partner_id = %s '\
'AND i.type = \'out_refund\'',
(partner_id,))
value -= cr.fetchone()[0] or 0.0
elif l['expr_name']=='purchase':
cr.execute('SELECT SUM(l.price_unit * l.quantity) ' \
'FROM account_invoice_line l, account_invoice i ' \
'WHERE (l.invoice_id = i.id) ' \
'AND i.partner_id = %s '\
'AND i.type = \'in_invoice\'',
(partner_id,))
value = cr.fetchone()[0] or 0.0
cr.execute('SELECT SUM(l.price_unit * l.quantity) ' \
'FROM account_invoice_line l, account_invoice i ' \
'WHERE (l.invoice_id = i.id) ' \
'AND i.partner_id = %s '\
'AND i.type = \'in_refund\'',
(partner_id,))
value -= cr.fetchone()[0] or 0.0
res = expression[l['expr_operator']](value, l['expr_value'])
if (not res) and (l['operator']=='and'):
return False
if res:
return True
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ASCrookes/django | django/db/models/base.py | 61 | 69022 | from __future__ import unicode_literals
import copy
import inspect
import warnings
from itertools import chain
from django.apps import apps
from django.conf import settings
from django.core import checks
from django.core.exceptions import (
NON_FIELD_ERRORS, FieldDoesNotExist, FieldError, MultipleObjectsReturned,
ObjectDoesNotExist, ValidationError,
)
from django.db import (
DEFAULT_DB_ALIAS, DJANGO_VERSION_PICKLE_KEY, DatabaseError, connections,
router, transaction,
)
from django.db.models import signals
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import Collector
from django.db.models.fields import AutoField
from django.db.models.fields.related import (
ForeignObjectRel, ManyToOneRel, OneToOneField, lazy_related_operation,
resolve_relation,
)
from django.db.models.manager import ensure_default_manager
from django.db.models.options import Options
from django.db.models.query import Q
from django.db.models.query_utils import (
DeferredAttribute, deferred_class_factory,
)
from django.db.models.utils import make_model_tuple
from django.utils import six
from django.utils.encoding import force_str, force_text
from django.utils.functional import curry
from django.utils.six.moves import zip
from django.utils.text import capfirst, get_text_list
from django.utils.translation import ugettext_lazy as _
from django.utils.version import get_version
def subclass_exception(name, parents, module, attached_to=None):
"""
Create exception subclass. Used by ModelBase below.
If 'attached_to' is supplied, the exception will be created in a way that
allows it to be pickled, assuming the returned exception class will be added
as an attribute to the 'attached_to' class.
"""
class_dict = {'__module__': module}
if attached_to is not None:
def __reduce__(self):
# Exceptions are special - they've got state that isn't
# in self.__dict__. We assume it is all in self.args.
return (unpickle_inner_exception, (attached_to, name), self.args)
def __setstate__(self, args):
self.args = args
class_dict['__reduce__'] = __reduce__
class_dict['__setstate__'] = __setstate__
return type(name, parents, class_dict)
class ModelBase(type):
"""
Metaclass for all models.
"""
def __new__(cls, name, bases, attrs):
super_new = super(ModelBase, cls).__new__
# Also ensure initialization is only performed for subclasses of Model
# (excluding Model class itself).
parents = [b for b in bases if isinstance(b, ModelBase)]
if not parents:
return super_new(cls, name, bases, attrs)
# Create the class.
module = attrs.pop('__module__')
new_class = super_new(cls, name, bases, {'__module__': module})
attr_meta = attrs.pop('Meta', None)
abstract = getattr(attr_meta, 'abstract', False)
if not attr_meta:
meta = getattr(new_class, 'Meta', None)
else:
meta = attr_meta
base_meta = getattr(new_class, '_meta', None)
app_label = None
# Look for an application configuration to attach the model to.
app_config = apps.get_containing_app_config(module)
if getattr(meta, 'app_label', None) is None:
if app_config is None:
if not abstract:
raise RuntimeError(
"Model class %s.%s doesn't declare an explicit "
"app_label and either isn't in an application in "
"INSTALLED_APPS or else was imported before its "
"application was loaded. " % (module, name))
else:
app_label = app_config.label
new_class.add_to_class('_meta', Options(meta, app_label))
if not abstract:
new_class.add_to_class(
'DoesNotExist',
subclass_exception(
str('DoesNotExist'),
tuple(
x.DoesNotExist for x in parents if hasattr(x, '_meta') and not x._meta.abstract
) or (ObjectDoesNotExist,),
module,
attached_to=new_class))
new_class.add_to_class(
'MultipleObjectsReturned',
subclass_exception(
str('MultipleObjectsReturned'),
tuple(
x.MultipleObjectsReturned for x in parents if hasattr(x, '_meta') and not x._meta.abstract
) or (MultipleObjectsReturned,),
module,
attached_to=new_class))
if base_meta and not base_meta.abstract:
# Non-abstract child classes inherit some attributes from their
# non-abstract parent (unless an ABC comes before it in the
# method resolution order).
if not hasattr(meta, 'ordering'):
new_class._meta.ordering = base_meta.ordering
if not hasattr(meta, 'get_latest_by'):
new_class._meta.get_latest_by = base_meta.get_latest_by
is_proxy = new_class._meta.proxy
# If the model is a proxy, ensure that the base class
# hasn't been swapped out.
if is_proxy and base_meta and base_meta.swapped:
raise TypeError("%s cannot proxy the swapped model '%s'." % (name, base_meta.swapped))
if getattr(new_class, '_default_manager', None):
if not is_proxy:
# Multi-table inheritance doesn't inherit default manager from
# parents.
new_class._default_manager = None
new_class._base_manager = None
else:
# Proxy classes do inherit parent's default manager, if none is
# set explicitly.
new_class._default_manager = new_class._default_manager._copy_to_model(new_class)
new_class._base_manager = new_class._base_manager._copy_to_model(new_class)
# Add all attributes to the class.
for obj_name, obj in attrs.items():
new_class.add_to_class(obj_name, obj)
# All the fields of any type declared on this model
new_fields = chain(
new_class._meta.local_fields,
new_class._meta.local_many_to_many,
new_class._meta.virtual_fields
)
field_names = {f.name for f in new_fields}
# Basic setup for proxy models.
if is_proxy:
base = None
for parent in [kls for kls in parents if hasattr(kls, '_meta')]:
if parent._meta.abstract:
if parent._meta.fields:
raise TypeError(
"Abstract base class containing model fields not "
"permitted for proxy model '%s'." % name
)
else:
continue
if base is not None:
raise TypeError("Proxy model '%s' has more than one non-abstract model base class." % name)
else:
base = parent
if base is None:
raise TypeError("Proxy model '%s' has no non-abstract model base class." % name)
new_class._meta.setup_proxy(base)
new_class._meta.concrete_model = base._meta.concrete_model
base._meta.concrete_model._meta.proxied_children.append(new_class._meta)
else:
new_class._meta.concrete_model = new_class
# Collect the parent links for multi-table inheritance.
parent_links = {}
for base in reversed([new_class] + parents):
# Conceptually equivalent to `if base is Model`.
if not hasattr(base, '_meta'):
continue
# Skip concrete parent classes.
if base != new_class and not base._meta.abstract:
continue
# Locate OneToOneField instances.
for field in base._meta.local_fields:
if isinstance(field, OneToOneField):
related = resolve_relation(new_class, field.remote_field.model)
parent_links[make_model_tuple(related)] = field
# Do the appropriate setup for any model parents.
for base in parents:
original_base = base
if not hasattr(base, '_meta'):
# Things without _meta aren't functional models, so they're
# uninteresting parents.
continue
parent_fields = base._meta.local_fields + base._meta.local_many_to_many
# Check for clashes between locally declared fields and those
# on the base classes (we cannot handle shadowed fields at the
# moment).
for field in parent_fields:
if field.name in field_names:
raise FieldError(
'Local field %r in class %r clashes '
'with field of similar name from '
'base class %r' % (field.name, name, base.__name__)
)
if not base._meta.abstract:
# Concrete classes...
base = base._meta.concrete_model
base_key = make_model_tuple(base)
if base_key in parent_links:
field = parent_links[base_key]
elif not is_proxy:
attr_name = '%s_ptr' % base._meta.model_name
field = OneToOneField(base, name=attr_name,
auto_created=True, parent_link=True)
# Only add the ptr field if it's not already present;
# e.g. migrations will already have it specified
if not hasattr(new_class, attr_name):
new_class.add_to_class(attr_name, field)
else:
field = None
new_class._meta.parents[base] = field
else:
# .. and abstract ones.
for field in parent_fields:
new_field = copy.deepcopy(field)
new_class.add_to_class(field.name, new_field)
# Pass any non-abstract parent classes onto child.
new_class._meta.parents.update(base._meta.parents)
# Inherit managers from the abstract base classes.
new_class.copy_managers(base._meta.abstract_managers)
# Proxy models inherit the non-abstract managers from their base,
# unless they have redefined any of them.
if is_proxy:
new_class.copy_managers(original_base._meta.concrete_managers)
# Inherit virtual fields (like GenericForeignKey) from the parent
# class
for field in base._meta.virtual_fields:
if base._meta.abstract and field.name in field_names:
raise FieldError(
'Local field %r in class %r clashes '
'with field of similar name from '
'abstract base class %r' % (field.name, name, base.__name__)
)
new_class.add_to_class(field.name, copy.deepcopy(field))
if abstract:
# Abstract base models can't be instantiated and don't appear in
# the list of models for an app. We do the final setup for them a
# little differently from normal models.
attr_meta.abstract = False
new_class.Meta = attr_meta
return new_class
new_class._prepare()
new_class._meta.apps.register_model(new_class._meta.app_label, new_class)
return new_class
def copy_managers(cls, base_managers):
# This is in-place sorting of an Options attribute, but that's fine.
base_managers.sort()
for _, mgr_name, manager in base_managers: # NOQA (redefinition of _)
val = getattr(cls, mgr_name, None)
if not val or val is manager:
new_manager = manager._copy_to_model(cls)
cls.add_to_class(mgr_name, new_manager)
def add_to_class(cls, name, value):
# We should call the contribute_to_class method only if it's bound
if not inspect.isclass(value) and hasattr(value, 'contribute_to_class'):
value.contribute_to_class(cls, name)
else:
setattr(cls, name, value)
def _prepare(cls):
"""
Creates some methods once self._meta has been populated.
"""
opts = cls._meta
opts._prepare(cls)
if opts.order_with_respect_to:
cls.get_next_in_order = curry(cls._get_next_or_previous_in_order, is_next=True)
cls.get_previous_in_order = curry(cls._get_next_or_previous_in_order, is_next=False)
# defer creating accessors on the foreign class until we are
# certain it has been created
def make_foreign_order_accessors(cls, model, field):
setattr(
field.remote_field.model,
'get_%s_order' % cls.__name__.lower(),
curry(method_get_order, cls)
)
setattr(
field.remote_field.model,
'set_%s_order' % cls.__name__.lower(),
curry(method_set_order, cls)
)
wrt = opts.order_with_respect_to
lazy_related_operation(make_foreign_order_accessors, cls, wrt.remote_field.model, field=wrt)
# Give the class a docstring -- its definition.
if cls.__doc__ is None:
cls.__doc__ = "%s(%s)" % (cls.__name__, ", ".join(f.name for f in opts.fields))
get_absolute_url_override = settings.ABSOLUTE_URL_OVERRIDES.get(opts.label_lower)
if get_absolute_url_override:
setattr(cls, 'get_absolute_url', get_absolute_url_override)
ensure_default_manager(cls)
signals.class_prepared.send(sender=cls)
class ModelState(object):
"""
A class for storing instance state
"""
def __init__(self, db=None):
self.db = db
# If true, uniqueness validation checks will consider this a new, as-yet-unsaved object.
# Necessary for correct validation of new instances of objects with explicit (non-auto) PKs.
# This impacts validation only; it has no effect on the actual save.
self.adding = True
class Model(six.with_metaclass(ModelBase)):
_deferred = False
def __init__(self, *args, **kwargs):
signals.pre_init.send(sender=self.__class__, args=args, kwargs=kwargs)
# Set up the storage for instance state
self._state = ModelState()
# There is a rather weird disparity here; if kwargs, it's set, then args
# overrides it. It should be one or the other; don't duplicate the work
# The reason for the kwargs check is that standard iterator passes in by
# args, and instantiation for iteration is 33% faster.
args_len = len(args)
if args_len > len(self._meta.concrete_fields):
# Daft, but matches old exception sans the err msg.
raise IndexError("Number of args exceeds number of fields")
if not kwargs:
fields_iter = iter(self._meta.concrete_fields)
# The ordering of the zip calls matter - zip throws StopIteration
# when an iter throws it. So if the first iter throws it, the second
# is *not* consumed. We rely on this, so don't change the order
# without changing the logic.
for val, field in zip(args, fields_iter):
setattr(self, field.attname, val)
else:
# Slower, kwargs-ready version.
fields_iter = iter(self._meta.fields)
for val, field in zip(args, fields_iter):
setattr(self, field.attname, val)
kwargs.pop(field.name, None)
# Maintain compatibility with existing calls.
if isinstance(field.remote_field, ManyToOneRel):
kwargs.pop(field.attname, None)
# Now we're left with the unprocessed fields that *must* come from
# keywords, or default.
for field in fields_iter:
is_related_object = False
# This slightly odd construct is so that we can access any
# data-descriptor object (DeferredAttribute) without triggering its
# __get__ method.
if (field.attname not in kwargs and
(isinstance(self.__class__.__dict__.get(field.attname), DeferredAttribute)
or field.column is None)):
# This field will be populated on request.
continue
if kwargs:
if isinstance(field.remote_field, ForeignObjectRel):
try:
# Assume object instance was passed in.
rel_obj = kwargs.pop(field.name)
is_related_object = True
except KeyError:
try:
# Object instance wasn't passed in -- must be an ID.
val = kwargs.pop(field.attname)
except KeyError:
val = field.get_default()
else:
# Object instance was passed in. Special case: You can
# pass in "None" for related objects if it's allowed.
if rel_obj is None and field.null:
val = None
else:
try:
val = kwargs.pop(field.attname)
except KeyError:
# This is done with an exception rather than the
# default argument on pop because we don't want
# get_default() to be evaluated, and then not used.
# Refs #12057.
val = field.get_default()
else:
val = field.get_default()
if is_related_object:
# If we are passed a related instance, set it using the
# field.name instead of field.attname (e.g. "user" instead of
# "user_id") so that the object gets properly cached (and type
# checked) by the RelatedObjectDescriptor.
setattr(self, field.name, rel_obj)
else:
setattr(self, field.attname, val)
if kwargs:
for prop in list(kwargs):
try:
if isinstance(getattr(self.__class__, prop), property):
setattr(self, prop, kwargs.pop(prop))
except AttributeError:
pass
if kwargs:
raise TypeError("'%s' is an invalid keyword argument for this function" % list(kwargs)[0])
super(Model, self).__init__()
signals.post_init.send(sender=self.__class__, instance=self)
@classmethod
def from_db(cls, db, field_names, values):
if cls._deferred:
new = cls(**dict(zip(field_names, values)))
else:
new = cls(*values)
new._state.adding = False
new._state.db = db
return new
def __repr__(self):
try:
u = six.text_type(self)
except (UnicodeEncodeError, UnicodeDecodeError):
u = '[Bad Unicode data]'
return force_str('<%s: %s>' % (self.__class__.__name__, u))
def __str__(self):
if six.PY2 and hasattr(self, '__unicode__'):
return force_text(self).encode('utf-8')
return '%s object' % self.__class__.__name__
def __eq__(self, other):
if not isinstance(other, Model):
return False
if self._meta.concrete_model != other._meta.concrete_model:
return False
my_pk = self._get_pk_val()
if my_pk is None:
return self is other
return my_pk == other._get_pk_val()
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
if self._get_pk_val() is None:
raise TypeError("Model instances without primary key value are unhashable")
return hash(self._get_pk_val())
def __reduce__(self):
"""
Provides pickling support. Normally, this just dispatches to Python's
standard handling. However, for models with deferred field loading, we
need to do things manually, as they're dynamically created classes and
only module-level classes can be pickled by the default path.
"""
data = self.__dict__
data[DJANGO_VERSION_PICKLE_KEY] = get_version()
if not self._deferred:
class_id = self._meta.app_label, self._meta.object_name
return model_unpickle, (class_id, [], simple_class_factory), data
defers = []
for field in self._meta.fields:
if isinstance(self.__class__.__dict__.get(field.attname),
DeferredAttribute):
defers.append(field.attname)
model = self._meta.proxy_for_model
class_id = model._meta.app_label, model._meta.object_name
return (model_unpickle, (class_id, defers, deferred_class_factory), data)
def __setstate__(self, state):
msg = None
pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY)
if pickled_version:
current_version = get_version()
if current_version != pickled_version:
msg = ("Pickled model instance's Django version %s does"
" not match the current version %s."
% (pickled_version, current_version))
else:
msg = "Pickled model instance's Django version is not specified."
if msg:
warnings.warn(msg, RuntimeWarning, stacklevel=2)
self.__dict__.update(state)
def _get_pk_val(self, meta=None):
if not meta:
meta = self._meta
return getattr(self, meta.pk.attname)
def _set_pk_val(self, value):
return setattr(self, self._meta.pk.attname, value)
pk = property(_get_pk_val, _set_pk_val)
def get_deferred_fields(self):
"""
Returns a set containing names of deferred fields for this instance.
"""
return {
f.attname for f in self._meta.concrete_fields
if isinstance(self.__class__.__dict__.get(f.attname), DeferredAttribute)
}
def refresh_from_db(self, using=None, fields=None, **kwargs):
"""
Reloads field values from the database.
By default, the reloading happens from the database this instance was
loaded from, or by the read router if this instance wasn't loaded from
any database. The using parameter will override the default.
Fields can be used to specify which fields to reload. The fields
should be an iterable of field attnames. If fields is None, then
all non-deferred fields are reloaded.
When accessing deferred fields of an instance, the deferred loading
of the field will call this method.
"""
if fields is not None:
if len(fields) == 0:
return
if any(LOOKUP_SEP in f for f in fields):
raise ValueError(
'Found "%s" in fields argument. Relations and transforms '
'are not allowed in fields.' % LOOKUP_SEP)
db = using if using is not None else self._state.db
if self._deferred:
non_deferred_model = self._meta.proxy_for_model
else:
non_deferred_model = self.__class__
db_instance_qs = non_deferred_model._default_manager.using(db).filter(pk=self.pk)
# Use provided fields, if not set then reload all non-deferred fields.
if fields is not None:
fields = list(fields)
db_instance_qs = db_instance_qs.only(*fields)
elif self._deferred:
deferred_fields = self.get_deferred_fields()
fields = [f.attname for f in self._meta.concrete_fields
if f.attname not in deferred_fields]
db_instance_qs = db_instance_qs.only(*fields)
db_instance = db_instance_qs.get()
non_loaded_fields = db_instance.get_deferred_fields()
for field in self._meta.concrete_fields:
if field.attname in non_loaded_fields:
# This field wasn't refreshed - skip ahead.
continue
setattr(self, field.attname, getattr(db_instance, field.attname))
# Throw away stale foreign key references.
if field.is_relation and field.get_cache_name() in self.__dict__:
rel_instance = getattr(self, field.get_cache_name())
local_val = getattr(db_instance, field.attname)
related_val = None if rel_instance is None else getattr(rel_instance, field.target_field.attname)
if local_val != related_val:
del self.__dict__[field.get_cache_name()]
self._state.db = db_instance._state.db
def serializable_value(self, field_name):
"""
Returns the value of the field name for this instance. If the field is
a foreign key, returns the id value, instead of the object. If there's
no Field object with this name on the model, the model attribute's
value is returned directly.
Used to serialize a field's value (in the serializer, or form output,
for example). Normally, you would just access the attribute directly
and not use this method.
"""
try:
field = self._meta.get_field(field_name)
except FieldDoesNotExist:
return getattr(self, field_name)
return getattr(self, field.attname)
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
"""
Saves the current instance. Override this in a subclass if you want to
control the saving process.
The 'force_insert' and 'force_update' parameters can be used to insist
that the "save" must be an SQL insert or update (or equivalent for
non-SQL backends), respectively. Normally, they should not be set.
"""
using = using or router.db_for_write(self.__class__, instance=self)
if force_insert and (force_update or update_fields):
raise ValueError("Cannot force both insert and updating in model saving.")
if update_fields is not None:
# If update_fields is empty, skip the save. We do also check for
# no-op saves later on for inheritance cases. This bailout is
# still needed for skipping signal sending.
if len(update_fields) == 0:
return
update_fields = frozenset(update_fields)
field_names = set()
for field in self._meta.fields:
if not field.primary_key:
field_names.add(field.name)
if field.name != field.attname:
field_names.add(field.attname)
non_model_fields = update_fields.difference(field_names)
if non_model_fields:
raise ValueError("The following fields do not exist in this "
"model or are m2m fields: %s"
% ', '.join(non_model_fields))
# If saving to the same database, and this model is deferred, then
# automatically do a "update_fields" save on the loaded fields.
elif not force_insert and self._deferred and using == self._state.db:
field_names = set()
for field in self._meta.concrete_fields:
if not field.primary_key and not hasattr(field, 'through'):
field_names.add(field.attname)
deferred_fields = [
f.attname for f in self._meta.fields
if (f.attname not in self.__dict__ and
isinstance(self.__class__.__dict__[f.attname], DeferredAttribute))
]
loaded_fields = field_names.difference(deferred_fields)
if loaded_fields:
update_fields = frozenset(loaded_fields)
self.save_base(using=using, force_insert=force_insert,
force_update=force_update, update_fields=update_fields)
save.alters_data = True
def save_base(self, raw=False, force_insert=False,
force_update=False, using=None, update_fields=None):
"""
Handles the parts of saving which should be done only once per save,
yet need to be done in raw saves, too. This includes some sanity
checks and signal sending.
The 'raw' argument is telling save_base not to save any parent
models and not to do any changes to the values before save. This
is used by fixture loading.
"""
using = using or router.db_for_write(self.__class__, instance=self)
assert not (force_insert and (force_update or update_fields))
assert update_fields is None or len(update_fields) > 0
cls = origin = self.__class__
# Skip proxies, but keep the origin as the proxy model.
if cls._meta.proxy:
cls = cls._meta.concrete_model
meta = cls._meta
if not meta.auto_created:
signals.pre_save.send(sender=origin, instance=self, raw=raw, using=using,
update_fields=update_fields)
with transaction.atomic(using=using, savepoint=False):
if not raw:
self._save_parents(cls, using, update_fields)
updated = self._save_table(raw, cls, force_insert, force_update, using, update_fields)
# Store the database on which the object was saved
self._state.db = using
# Once saved, this is no longer a to-be-added instance.
self._state.adding = False
# Signal that the save is complete
if not meta.auto_created:
signals.post_save.send(sender=origin, instance=self, created=(not updated),
update_fields=update_fields, raw=raw, using=using)
save_base.alters_data = True
def _save_parents(self, cls, using, update_fields):
"""
Saves all the parents of cls using values from self.
"""
meta = cls._meta
for parent, field in meta.parents.items():
# Make sure the link fields are synced between parent and self.
if (field and getattr(self, parent._meta.pk.attname) is None
and getattr(self, field.attname) is not None):
setattr(self, parent._meta.pk.attname, getattr(self, field.attname))
self._save_parents(cls=parent, using=using, update_fields=update_fields)
self._save_table(cls=parent, using=using, update_fields=update_fields)
# Set the parent's PK value to self.
if field:
setattr(self, field.attname, self._get_pk_val(parent._meta))
# Since we didn't have an instance of the parent handy set
# attname directly, bypassing the descriptor. Invalidate
# the related object cache, in case it's been accidentally
# populated. A fresh instance will be re-built from the
# database if necessary.
cache_name = field.get_cache_name()
if hasattr(self, cache_name):
delattr(self, cache_name)
def _save_table(self, raw=False, cls=None, force_insert=False,
force_update=False, using=None, update_fields=None):
"""
Does the heavy-lifting involved in saving. Updates or inserts the data
for a single table.
"""
meta = cls._meta
non_pks = [f for f in meta.local_concrete_fields if not f.primary_key]
if update_fields:
non_pks = [f for f in non_pks
if f.name in update_fields or f.attname in update_fields]
pk_val = self._get_pk_val(meta)
if pk_val is None:
pk_val = meta.pk.get_pk_value_on_save(self)
setattr(self, meta.pk.attname, pk_val)
pk_set = pk_val is not None
if not pk_set and (force_update or update_fields):
raise ValueError("Cannot force an update in save() with no primary key.")
updated = False
# If possible, try an UPDATE. If that doesn't update anything, do an INSERT.
if pk_set and not force_insert:
base_qs = cls._base_manager.using(using)
values = [(f, None, (getattr(self, f.attname) if raw else f.pre_save(self, False)))
for f in non_pks]
forced_update = update_fields or force_update
updated = self._do_update(base_qs, using, pk_val, values, update_fields,
forced_update)
if force_update and not updated:
raise DatabaseError("Forced update did not affect any rows.")
if update_fields and not updated:
raise DatabaseError("Save with update_fields did not affect any rows.")
if not updated:
if meta.order_with_respect_to:
# If this is a model with an order_with_respect_to
# autopopulate the _order field
field = meta.order_with_respect_to
order_value = cls._base_manager.using(using).filter(
**{field.name: getattr(self, field.attname)}).count()
self._order = order_value
fields = meta.local_concrete_fields
if not pk_set:
fields = [f for f in fields if not isinstance(f, AutoField)]
update_pk = bool(meta.has_auto_field and not pk_set)
result = self._do_insert(cls._base_manager, using, fields, update_pk, raw)
if update_pk:
setattr(self, meta.pk.attname, result)
return updated
def _do_update(self, base_qs, using, pk_val, values, update_fields, forced_update):
"""
This method will try to update the model. If the model was updated (in
the sense that an update query was done and a matching row was found
from the DB) the method will return True.
"""
filtered = base_qs.filter(pk=pk_val)
if not values:
# We can end up here when saving a model in inheritance chain where
# update_fields doesn't target any field in current model. In that
# case we just say the update succeeded. Another case ending up here
# is a model with just PK - in that case check that the PK still
# exists.
return update_fields is not None or filtered.exists()
if self._meta.select_on_save and not forced_update:
if filtered.exists():
# It may happen that the object is deleted from the DB right after
# this check, causing the subsequent UPDATE to return zero matching
# rows. The same result can occur in some rare cases when the
# database returns zero despite the UPDATE being executed
# successfully (a row is matched and updated). In order to
# distinguish these two cases, the object's existence in the
# database is again checked for if the UPDATE query returns 0.
return filtered._update(values) > 0 or filtered.exists()
else:
return False
return filtered._update(values) > 0
def _do_insert(self, manager, using, fields, update_pk, raw):
"""
Do an INSERT. If update_pk is defined then this method should return
the new pk for the model.
"""
return manager._insert([self], fields=fields, return_id=update_pk,
using=using, raw=raw)
def delete(self, using=None, keep_parents=False):
using = using or router.db_for_write(self.__class__, instance=self)
assert self._get_pk_val() is not None, (
"%s object can't be deleted because its %s attribute is set to None." %
(self._meta.object_name, self._meta.pk.attname)
)
collector = Collector(using=using)
collector.collect([self], keep_parents=keep_parents)
return collector.delete()
delete.alters_data = True
def _get_FIELD_display(self, field):
value = getattr(self, field.attname)
return force_text(dict(field.flatchoices).get(value, value), strings_only=True)
def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs):
if not self.pk:
raise ValueError("get_next/get_previous cannot be used on unsaved objects.")
op = 'gt' if is_next else 'lt'
order = '' if is_next else '-'
param = force_text(getattr(self, field.attname))
q = Q(**{'%s__%s' % (field.name, op): param})
q = q | Q(**{field.name: param, 'pk__%s' % op: self.pk})
qs = self.__class__._default_manager.using(self._state.db).filter(**kwargs).filter(q).order_by(
'%s%s' % (order, field.name), '%spk' % order
)
try:
return qs[0]
except IndexError:
raise self.DoesNotExist("%s matching query does not exist." % self.__class__._meta.object_name)
def _get_next_or_previous_in_order(self, is_next):
cachename = "__%s_order_cache" % is_next
if not hasattr(self, cachename):
op = 'gt' if is_next else 'lt'
order = '_order' if is_next else '-_order'
order_field = self._meta.order_with_respect_to
obj = self._default_manager.filter(**{
order_field.name: getattr(self, order_field.attname)
}).filter(**{
'_order__%s' % op: self._default_manager.values('_order').filter(**{
self._meta.pk.name: self.pk
})
}).order_by(order)[:1].get()
setattr(self, cachename, obj)
return getattr(self, cachename)
def prepare_database_save(self, field):
if self.pk is None:
raise ValueError("Unsaved model instance %r cannot be used in an ORM query." % self)
return getattr(self, field.remote_field.get_related_field().attname)
def clean(self):
"""
Hook for doing any extra model-wide validation after clean() has been
called on every field by self.clean_fields. Any ValidationError raised
by this method will not be associated with a particular field; it will
have a special-case association with the field defined by NON_FIELD_ERRORS.
"""
pass
def validate_unique(self, exclude=None):
"""
Checks unique constraints on the model and raises ``ValidationError``
if any failed.
"""
unique_checks, date_checks = self._get_unique_checks(exclude=exclude)
errors = self._perform_unique_checks(unique_checks)
date_errors = self._perform_date_checks(date_checks)
for k, v in date_errors.items():
errors.setdefault(k, []).extend(v)
if errors:
raise ValidationError(errors)
def _get_unique_checks(self, exclude=None):
"""
Gather a list of checks to perform. Since validate_unique could be
called from a ModelForm, some fields may have been excluded; we can't
perform a unique check on a model that is missing fields involved
in that check.
Fields that did not validate should also be excluded, but they need
to be passed in via the exclude argument.
"""
if exclude is None:
exclude = []
unique_checks = []
unique_togethers = [(self.__class__, self._meta.unique_together)]
for parent_class in self._meta.get_parent_list():
if parent_class._meta.unique_together:
unique_togethers.append((parent_class, parent_class._meta.unique_together))
for model_class, unique_together in unique_togethers:
for check in unique_together:
for name in check:
# If this is an excluded field, don't add this check.
if name in exclude:
break
else:
unique_checks.append((model_class, tuple(check)))
# These are checks for the unique_for_<date/year/month>.
date_checks = []
# Gather a list of checks for fields declared as unique and add them to
# the list of checks.
fields_with_class = [(self.__class__, self._meta.local_fields)]
for parent_class in self._meta.get_parent_list():
fields_with_class.append((parent_class, parent_class._meta.local_fields))
for model_class, fields in fields_with_class:
for f in fields:
name = f.name
if name in exclude:
continue
if f.unique:
unique_checks.append((model_class, (name,)))
if f.unique_for_date and f.unique_for_date not in exclude:
date_checks.append((model_class, 'date', name, f.unique_for_date))
if f.unique_for_year and f.unique_for_year not in exclude:
date_checks.append((model_class, 'year', name, f.unique_for_year))
if f.unique_for_month and f.unique_for_month not in exclude:
date_checks.append((model_class, 'month', name, f.unique_for_month))
return unique_checks, date_checks
def _perform_unique_checks(self, unique_checks):
errors = {}
for model_class, unique_check in unique_checks:
# Try to look up an existing object with the same values as this
# object's values for all the unique field.
lookup_kwargs = {}
for field_name in unique_check:
f = self._meta.get_field(field_name)
lookup_value = getattr(self, f.attname)
if lookup_value is None:
# no value, skip the lookup
continue
if f.primary_key and not self._state.adding:
# no need to check for unique primary key when editing
continue
lookup_kwargs[str(field_name)] = lookup_value
# some fields were skipped, no reason to do the check
if len(unique_check) != len(lookup_kwargs):
continue
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
# Note that we need to use the pk as defined by model_class, not
# self.pk. These can be different fields because model inheritance
# allows single model to have effectively multiple primary keys.
# Refs #17615.
model_class_pk = self._get_pk_val(model_class._meta)
if not self._state.adding and model_class_pk is not None:
qs = qs.exclude(pk=model_class_pk)
if qs.exists():
if len(unique_check) == 1:
key = unique_check[0]
else:
key = NON_FIELD_ERRORS
errors.setdefault(key, []).append(self.unique_error_message(model_class, unique_check))
return errors
def _perform_date_checks(self, date_checks):
errors = {}
for model_class, lookup_type, field, unique_for in date_checks:
lookup_kwargs = {}
# there's a ticket to add a date lookup, we can remove this special
# case if that makes it's way in
date = getattr(self, unique_for)
if date is None:
continue
if lookup_type == 'date':
lookup_kwargs['%s__day' % unique_for] = date.day
lookup_kwargs['%s__month' % unique_for] = date.month
lookup_kwargs['%s__year' % unique_for] = date.year
else:
lookup_kwargs['%s__%s' % (unique_for, lookup_type)] = getattr(date, lookup_type)
lookup_kwargs[field] = getattr(self, field)
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
if not self._state.adding and self.pk is not None:
qs = qs.exclude(pk=self.pk)
if qs.exists():
errors.setdefault(field, []).append(
self.date_error_message(lookup_type, field, unique_for)
)
return errors
def date_error_message(self, lookup_type, field_name, unique_for):
opts = self._meta
field = opts.get_field(field_name)
return ValidationError(
message=field.error_messages['unique_for_date'],
code='unique_for_date',
params={
'model': self,
'model_name': six.text_type(capfirst(opts.verbose_name)),
'lookup_type': lookup_type,
'field': field_name,
'field_label': six.text_type(capfirst(field.verbose_name)),
'date_field': unique_for,
'date_field_label': six.text_type(capfirst(opts.get_field(unique_for).verbose_name)),
}
)
def unique_error_message(self, model_class, unique_check):
opts = model_class._meta
params = {
'model': self,
'model_class': model_class,
'model_name': six.text_type(capfirst(opts.verbose_name)),
'unique_check': unique_check,
}
# A unique field
if len(unique_check) == 1:
field = opts.get_field(unique_check[0])
params['field_label'] = six.text_type(capfirst(field.verbose_name))
return ValidationError(
message=field.error_messages['unique'],
code='unique',
params=params,
)
# unique_together
else:
field_labels = [capfirst(opts.get_field(f).verbose_name) for f in unique_check]
params['field_labels'] = six.text_type(get_text_list(field_labels, _('and')))
return ValidationError(
message=_("%(model_name)s with this %(field_labels)s already exists."),
code='unique_together',
params=params,
)
def full_clean(self, exclude=None, validate_unique=True):
"""
Calls clean_fields, clean, and validate_unique, on the model,
and raises a ``ValidationError`` for any errors that occurred.
"""
errors = {}
if exclude is None:
exclude = []
else:
exclude = list(exclude)
try:
self.clean_fields(exclude=exclude)
except ValidationError as e:
errors = e.update_error_dict(errors)
# Form.clean() is run even if other validation fails, so do the
# same with Model.clean() for consistency.
try:
self.clean()
except ValidationError as e:
errors = e.update_error_dict(errors)
# Run unique checks, but only for fields that passed validation.
if validate_unique:
for name in errors.keys():
if name != NON_FIELD_ERRORS and name not in exclude:
exclude.append(name)
try:
self.validate_unique(exclude=exclude)
except ValidationError as e:
errors = e.update_error_dict(errors)
if errors:
raise ValidationError(errors)
def clean_fields(self, exclude=None):
"""
Cleans all fields and raises a ValidationError containing a dict
of all validation errors if any occur.
"""
if exclude is None:
exclude = []
errors = {}
for f in self._meta.fields:
if f.name in exclude:
continue
# Skip validation for empty fields with blank=True. The developer
# is responsible for making sure they have a valid value.
raw_value = getattr(self, f.attname)
if f.blank and raw_value in f.empty_values:
continue
try:
setattr(self, f.attname, f.clean(raw_value, self))
except ValidationError as e:
errors[f.name] = e.error_list
if errors:
raise ValidationError(errors)
@classmethod
def check(cls, **kwargs):
errors = []
errors.extend(cls._check_swappable())
errors.extend(cls._check_model())
errors.extend(cls._check_managers(**kwargs))
if not cls._meta.swapped:
errors.extend(cls._check_fields(**kwargs))
errors.extend(cls._check_m2m_through_same_relationship())
errors.extend(cls._check_long_column_names())
clash_errors = cls._check_id_field() + cls._check_field_name_clashes()
errors.extend(clash_errors)
# If there are field name clashes, hide consequent column name
# clashes.
if not clash_errors:
errors.extend(cls._check_column_name_clashes())
errors.extend(cls._check_index_together())
errors.extend(cls._check_unique_together())
errors.extend(cls._check_ordering())
return errors
@classmethod
def _check_swappable(cls):
""" Check if the swapped model exists. """
errors = []
if cls._meta.swapped:
try:
apps.get_model(cls._meta.swapped)
except ValueError:
errors.append(
checks.Error(
"'%s' is not of the form 'app_label.app_name'." % cls._meta.swappable,
hint=None,
obj=None,
id='models.E001',
)
)
except LookupError:
app_label, model_name = cls._meta.swapped.split('.')
errors.append(
checks.Error(
"'%s' references '%s.%s', which has not been "
"installed, or is abstract." % (
cls._meta.swappable, app_label, model_name
),
hint=None,
obj=None,
id='models.E002',
)
)
return errors
@classmethod
def _check_model(cls):
errors = []
if cls._meta.proxy:
if cls._meta.local_fields or cls._meta.local_many_to_many:
errors.append(
checks.Error(
"Proxy model '%s' contains model fields." % cls.__name__,
hint=None,
obj=None,
id='models.E017',
)
)
return errors
@classmethod
def _check_managers(cls, **kwargs):
""" Perform all manager checks. """
errors = []
for __, manager, __ in cls._meta.managers:
errors.extend(manager.check(**kwargs))
return errors
@classmethod
def _check_fields(cls, **kwargs):
""" Perform all field checks. """
errors = []
for field in cls._meta.local_fields:
errors.extend(field.check(**kwargs))
for field in cls._meta.local_many_to_many:
errors.extend(field.check(from_model=cls, **kwargs))
return errors
@classmethod
def _check_m2m_through_same_relationship(cls):
""" Check if no relationship model is used by more than one m2m field.
"""
errors = []
seen_intermediary_signatures = []
fields = cls._meta.local_many_to_many
# Skip when the target model wasn't found.
fields = (f for f in fields if isinstance(f.remote_field.model, ModelBase))
# Skip when the relationship model wasn't found.
fields = (f for f in fields if isinstance(f.remote_field.through, ModelBase))
for f in fields:
signature = (f.remote_field.model, cls, f.remote_field.through)
if signature in seen_intermediary_signatures:
errors.append(
checks.Error(
"The model has two many-to-many relations through "
"the intermediate model '%s'." % f.remote_field.through._meta.label,
hint=None,
obj=cls,
id='models.E003',
)
)
else:
seen_intermediary_signatures.append(signature)
return errors
@classmethod
def _check_id_field(cls):
""" Check if `id` field is a primary key. """
fields = list(f for f in cls._meta.local_fields
if f.name == 'id' and f != cls._meta.pk)
# fields is empty or consists of the invalid "id" field
if fields and not fields[0].primary_key and cls._meta.pk.name == 'id':
return [
checks.Error(
"'id' can only be used as a field name if the field also "
"sets 'primary_key=True'.",
hint=None,
obj=cls,
id='models.E004',
)
]
else:
return []
@classmethod
def _check_field_name_clashes(cls):
""" Ref #17673. """
errors = []
used_fields = {} # name or attname -> field
# Check that multi-inheritance doesn't cause field name shadowing.
for parent in cls._meta.get_parent_list():
for f in parent._meta.local_fields:
clash = used_fields.get(f.name) or used_fields.get(f.attname) or None
if clash:
errors.append(
checks.Error(
"The field '%s' from parent model "
"'%s' clashes with the field '%s' "
"from parent model '%s'." % (
clash.name, clash.model._meta,
f.name, f.model._meta
),
hint=None,
obj=cls,
id='models.E005',
)
)
used_fields[f.name] = f
used_fields[f.attname] = f
# Check that fields defined in the model don't clash with fields from
# parents.
for f in cls._meta.local_fields:
clash = used_fields.get(f.name) or used_fields.get(f.attname) or None
# Note that we may detect clash between user-defined non-unique
# field "id" and automatically added unique field "id", both
# defined at the same model. This special case is considered in
# _check_id_field and here we ignore it.
id_conflict = (f.name == "id" and
clash and clash.name == "id" and clash.model == cls)
if clash and not id_conflict:
errors.append(
checks.Error(
"The field '%s' clashes with the field '%s' "
"from model '%s'." % (
f.name, clash.name, clash.model._meta
),
hint=None,
obj=f,
id='models.E006',
)
)
used_fields[f.name] = f
used_fields[f.attname] = f
return errors
@classmethod
def _check_column_name_clashes(cls):
# Store a list of column names which have already been used by other fields.
used_column_names = []
errors = []
for f in cls._meta.local_fields:
_, column_name = f.get_attname_column()
# Ensure the column name is not already in use.
if column_name and column_name in used_column_names:
errors.append(
checks.Error(
"Field '%s' has column name '%s' that is used by "
"another field." % (f.name, column_name),
hint="Specify a 'db_column' for the field.",
obj=cls,
id='models.E007'
)
)
else:
used_column_names.append(column_name)
return errors
@classmethod
def _check_index_together(cls):
""" Check the value of "index_together" option. """
if not isinstance(cls._meta.index_together, (tuple, list)):
return [
checks.Error(
"'index_together' must be a list or tuple.",
hint=None,
obj=cls,
id='models.E008',
)
]
elif any(not isinstance(fields, (tuple, list))
for fields in cls._meta.index_together):
return [
checks.Error(
"All 'index_together' elements must be lists or tuples.",
hint=None,
obj=cls,
id='models.E009',
)
]
else:
errors = []
for fields in cls._meta.index_together:
errors.extend(cls._check_local_fields(fields, "index_together"))
return errors
@classmethod
def _check_unique_together(cls):
""" Check the value of "unique_together" option. """
if not isinstance(cls._meta.unique_together, (tuple, list)):
return [
checks.Error(
"'unique_together' must be a list or tuple.",
hint=None,
obj=cls,
id='models.E010',
)
]
elif any(not isinstance(fields, (tuple, list))
for fields in cls._meta.unique_together):
return [
checks.Error(
"All 'unique_together' elements must be lists or tuples.",
hint=None,
obj=cls,
id='models.E011',
)
]
else:
errors = []
for fields in cls._meta.unique_together:
errors.extend(cls._check_local_fields(fields, "unique_together"))
return errors
@classmethod
def _check_local_fields(cls, fields, option):
from django.db import models
# In order to avoid hitting the relation tree prematurely, we use our
# own fields_map instead of using get_field()
forward_fields_map = {
field.name: field for field in cls._meta._get_fields(reverse=False)
}
errors = []
for field_name in fields:
try:
field = forward_fields_map[field_name]
except KeyError:
errors.append(
checks.Error(
"'%s' refers to the non-existent field '%s'." % (
option, field_name,
),
hint=None,
obj=cls,
id='models.E012',
)
)
else:
if isinstance(field.remote_field, models.ManyToManyRel):
errors.append(
checks.Error(
"'%s' refers to a ManyToManyField '%s', but "
"ManyToManyFields are not permitted in '%s'." % (
option, field_name, option,
),
hint=None,
obj=cls,
id='models.E013',
)
)
elif field not in cls._meta.local_fields:
errors.append(
checks.Error(
("'%s' refers to field '%s' which is not local "
"to model '%s'.") % (
option, field_name, cls._meta.object_name,
),
hint=("This issue may be caused by multi-table "
"inheritance."),
obj=cls,
id='models.E016',
)
)
return errors
@classmethod
def _check_ordering(cls):
""" Check "ordering" option -- is it a list of strings and do all fields
exist? """
if cls._meta._ordering_clash:
return [
checks.Error(
"'ordering' and 'order_with_respect_to' cannot be used together.",
hint=None,
obj=cls,
id='models.E021',
),
]
if cls._meta.order_with_respect_to or not cls._meta.ordering:
return []
if not isinstance(cls._meta.ordering, (list, tuple)):
return [
checks.Error(
("'ordering' must be a tuple or list "
"(even if you want to order by only one field)."),
hint=None,
obj=cls,
id='models.E014',
)
]
errors = []
fields = cls._meta.ordering
# Skip '?' fields.
fields = (f for f in fields if f != '?')
# Convert "-field" to "field".
fields = ((f[1:] if f.startswith('-') else f) for f in fields)
# Skip ordering in the format field1__field2 (FIXME: checking
# this format would be nice, but it's a little fiddly).
fields = (f for f in fields if '__' not in f)
# Skip ordering on pk. This is always a valid order_by field
# but is an alias and therefore won't be found by opts.get_field.
fields = {f for f in fields if f != 'pk'}
# Check for invalid or non-existent fields in ordering.
invalid_fields = []
# Any field name that is not present in field_names does not exist.
# Also, ordering by m2m fields is not allowed.
opts = cls._meta
valid_fields = set(chain.from_iterable(
(f.name, f.attname) if not (f.auto_created and not f.concrete) else (f.field.related_query_name(),)
for f in chain(opts.fields, opts.related_objects)
))
invalid_fields.extend(fields - valid_fields)
for invalid_field in invalid_fields:
errors.append(
checks.Error(
"'ordering' refers to the non-existent field '%s'." % invalid_field,
hint=None,
obj=cls,
id='models.E015',
)
)
return errors
@classmethod
def _check_long_column_names(cls):
"""
Check that any auto-generated column names are shorter than the limits
for each database in which the model will be created.
"""
errors = []
allowed_len = None
db_alias = None
# Find the minimum max allowed length among all specified db_aliases.
for db in settings.DATABASES.keys():
# skip databases where the model won't be created
if not router.allow_migrate_model(db, cls):
continue
connection = connections[db]
max_name_length = connection.ops.max_name_length()
if max_name_length is None or connection.features.truncates_names:
continue
else:
if allowed_len is None:
allowed_len = max_name_length
db_alias = db
elif max_name_length < allowed_len:
allowed_len = max_name_length
db_alias = db
if allowed_len is None:
return errors
for f in cls._meta.local_fields:
_, column_name = f.get_attname_column()
# Check if auto-generated name for the field is too long
# for the database.
if (f.db_column is None and column_name is not None
and len(column_name) > allowed_len):
errors.append(
checks.Error(
'Autogenerated column name too long for field "%s". '
'Maximum length is "%s" for database "%s".'
% (column_name, allowed_len, db_alias),
hint="Set the column name manually using 'db_column'.",
obj=cls,
id='models.E018',
)
)
for f in cls._meta.local_many_to_many:
# Check if auto-generated name for the M2M field is too long
# for the database.
for m2m in f.remote_field.through._meta.local_fields:
_, rel_name = m2m.get_attname_column()
if (m2m.db_column is None and rel_name is not None
and len(rel_name) > allowed_len):
errors.append(
checks.Error(
'Autogenerated column name too long for M2M field '
'"%s". Maximum length is "%s" for database "%s".'
% (rel_name, allowed_len, db_alias),
hint=("Use 'through' to create a separate model "
"for M2M and then set column_name using "
"'db_column'."),
obj=cls,
id='models.E019',
)
)
return errors
############################################
# HELPER FUNCTIONS (CURRIED MODEL METHODS) #
############################################
# ORDERING METHODS #########################
def method_set_order(ordered_obj, self, id_list, using=None):
if using is None:
using = DEFAULT_DB_ALIAS
rel_val = getattr(self, ordered_obj._meta.order_with_respect_to.remote_field.field_name)
order_name = ordered_obj._meta.order_with_respect_to.name
# FIXME: It would be nice if there was an "update many" version of update
# for situations like this.
with transaction.atomic(using=using, savepoint=False):
for i, j in enumerate(id_list):
ordered_obj.objects.filter(**{'pk': j, order_name: rel_val}).update(_order=i)
def method_get_order(ordered_obj, self):
rel_val = getattr(self, ordered_obj._meta.order_with_respect_to.remote_field.field_name)
order_name = ordered_obj._meta.order_with_respect_to.name
pk_name = ordered_obj._meta.pk.name
return [r[pk_name] for r in
ordered_obj.objects.filter(**{order_name: rel_val}).values(pk_name)]
########
# MISC #
########
def simple_class_factory(model, attrs):
"""
Needed for dynamic classes.
"""
return model
def model_unpickle(model_id, attrs, factory):
"""
Used to unpickle Model subclasses with deferred fields.
"""
if isinstance(model_id, tuple):
if not apps.ready:
apps.populate(settings.INSTALLED_APPS)
model = apps.get_model(*model_id)
else:
# Backwards compat - the model was cached directly in earlier versions.
model = model_id
cls = factory(model, attrs)
return cls.__new__(cls)
model_unpickle.__safe_for_unpickle__ = True
def unpickle_inner_exception(klass, exception_name):
# Get the exception class from the class it is attached to:
exception = getattr(klass, exception_name)
return exception.__new__(exception)
| bsd-3-clause |
piotroxp/scibibscan | scib/lib/python3.5/site-packages/astropy/io/fits/_numpy_hacks.py | 2 | 2798 | """
This module is for functions that do tricky things with Numpy arrays and dtypes
that are not normally supported in Numpy (but can work in limited cases
relevant to FITS) or that otherwise require workarounds.
"""
import numpy as np
def realign_dtype(dtype, offsets):
"""
Given a Numpy struct dtype object an a list of integer offsets, with one
offset per field in the dtype, returns a new dtype where each field has the
given offset.
All offsets must be non-negative integers, but otherwise have no
restrictions, and may overlap, per the usual rules for creating struct
dtypes. The new dtype will have an itemsize equal to the offset of the
right-most field plus the width of that field.
One restriction of this function is that it must not be used with object
arrays--incorrect offsets may lead to invalid pointers in the arrays.
However, this function is really only meant for use by astropy.io.fits and
object arrays are not supported for FITS data anyhow.
This function is used primarily to get around a shortcoming in Numpy that
it is currently impossible to create dtypes with arbitrary offsets, *and*
that have zero-width fields. Both of these features are needed for full
FITS support. However, this will be fixed in a future version of Numpy at
which point use of this hack can be deprecated. See
https://github.com/numpy/numpy/pull/6430
"""
# Previously this was implemented in C, but then I realized that the C
# version is not needed--the workaround is to use dtype.__setstate__
# Note: There is a comment in the Numpy source code (see
# https://github.com/numpy/numpy/blob/v1.10.1/numpy/core/src/multiarray/descriptor.c#L2226)
# that this may be changed at some point. But hopefully by then the fixes
# in #6430 will be implemented, making this hack unnecessary to begin with.
cls, args, state = dtype.__reduce__()
names, fields = state[3:5]
fields = fields.copy()
max_offset = 0
itemsize = state[5] # Default to the original itemsize
if fields is None or len(offsets) != len(names):
raise ValueError(
"Dtype must be a structured dtype, and length of offsets list "
"must be the same as the number of fields.")
for name, offset in zip(names, offsets):
field = fields[name]
if offset == field[1]:
continue
fields[name] = (field[0], offset)
if offset > max_offset:
itemsize = offset + field[0].itemsize
max_offset = offset
new_typespec = '|V{0}'.format(itemsize)
new_state = state[:4] + (fields, itemsize) + state[6:]
new_dtype = cls(new_typespec, *args[1:])
new_dtype.__setstate__(new_state)
return new_dtype
| mit |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_09_01/models/security_group_network_interface.py | 1 | 1260 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SecurityGroupNetworkInterface(Model):
"""Network interface and all its associated security rules.
:param id: ID of the network interface.
:type id: str
:param security_rule_associations:
:type security_rule_associations:
~azure.mgmt.network.v2017_09_01.models.SecurityRuleAssociations
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'security_rule_associations': {'key': 'securityRuleAssociations', 'type': 'SecurityRuleAssociations'},
}
def __init__(self, **kwargs):
super(SecurityGroupNetworkInterface, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.security_rule_associations = kwargs.get('security_rule_associations', None)
| mit |
PetePriority/home-assistant | homeassistant/components/netgear_lte/__init__.py | 2 | 4551 | """
Support for Netgear LTE modems.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/netgear_lte/
"""
import asyncio
from datetime import timedelta
import logging
import voluptuous as vol
import attr
import aiohttp
from homeassistant.const import (
CONF_HOST, CONF_PASSWORD, EVENT_HOMEASSISTANT_STOP)
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_create_clientsession
from homeassistant.util import Throttle
REQUIREMENTS = ['eternalegypt==0.0.5']
_LOGGER = logging.getLogger(__name__)
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=10)
DOMAIN = 'netgear_lte'
DATA_KEY = 'netgear_lte'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.All(cv.ensure_list, [vol.Schema({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
})])
}, extra=vol.ALLOW_EXTRA)
@attr.s
class ModemData:
"""Class for modem state."""
host = attr.ib()
modem = attr.ib()
serial_number = attr.ib(init=False, default=None)
unread_count = attr.ib(init=False, default=None)
usage = attr.ib(init=False, default=None)
connected = attr.ib(init=False, default=True)
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def async_update(self):
"""Call the API to update the data."""
import eternalegypt
try:
information = await self.modem.information()
self.serial_number = information.serial_number
self.unread_count = sum(1 for x in information.sms if x.unread)
self.usage = information.usage
if not self.connected:
_LOGGER.warning("Connected to %s", self.host)
self.connected = True
except eternalegypt.Error:
if self.connected:
_LOGGER.warning("Lost connection to %s", self.host)
self.connected = False
self.unread_count = None
self.usage = None
@attr.s
class LTEData:
"""Shared state."""
websession = attr.ib()
modem_data = attr.ib(init=False, factory=dict)
def get_modem_data(self, config):
"""Get the requested or the only modem_data value."""
if CONF_HOST in config:
return self.modem_data.get(config[CONF_HOST])
if len(self.modem_data) == 1:
return next(iter(self.modem_data.values()))
return None
async def async_setup(hass, config):
"""Set up Netgear LTE component."""
if DATA_KEY not in hass.data:
websession = async_create_clientsession(
hass, cookie_jar=aiohttp.CookieJar(unsafe=True))
hass.data[DATA_KEY] = LTEData(websession)
tasks = [_setup_lte(hass, conf) for conf in config.get(DOMAIN, [])]
if tasks:
await asyncio.wait(tasks)
return True
async def _setup_lte(hass, lte_config):
"""Set up a Netgear LTE modem."""
import eternalegypt
host = lte_config[CONF_HOST]
password = lte_config[CONF_PASSWORD]
websession = hass.data[DATA_KEY].websession
modem = eternalegypt.Modem(hostname=host, websession=websession)
modem_data = ModemData(host, modem)
try:
await _login(hass, modem_data, password)
except eternalegypt.Error:
retry_task = hass.loop.create_task(
_retry_login(hass, modem_data, password))
@callback
def cleanup_retry(event):
"""Clean up retry task resources."""
if not retry_task.done():
retry_task.cancel()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, cleanup_retry)
async def _login(hass, modem_data, password):
"""Log in and complete setup."""
await modem_data.modem.login(password=password)
await modem_data.async_update()
hass.data[DATA_KEY].modem_data[modem_data.host] = modem_data
async def cleanup(event):
"""Clean up resources."""
await modem_data.modem.logout()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, cleanup)
async def _retry_login(hass, modem_data, password):
"""Sleep and retry setup."""
import eternalegypt
_LOGGER.warning(
"Could not connect to %s. Will keep trying.", modem_data.host)
modem_data.connected = False
delay = 15
while not modem_data.connected:
await asyncio.sleep(delay)
try:
await _login(hass, modem_data, password)
except eternalegypt.Error:
delay = min(2*delay, 300)
| apache-2.0 |
JianyuWang/nova | nova/volume/encryptors/luks.py | 30 | 5176 | # Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_concurrency import processutils
from oslo_log import log as logging
from nova.i18n import _LI
from nova.i18n import _LW
from nova import utils
from nova.volume.encryptors import cryptsetup
LOG = logging.getLogger(__name__)
def is_luks(device):
"""Checks if the specified device uses LUKS for encryption.
:param device: the device to check
:returns: true if the specified device uses LUKS; false otherwise
"""
try:
# check to see if the device uses LUKS: exit status is 0
# if the device is a LUKS partition and non-zero if not
utils.execute('cryptsetup', 'isLuks', '--verbose', device,
run_as_root=True, check_exit_code=True)
return True
except processutils.ProcessExecutionError as e:
LOG.warning(_LW("isLuks exited abnormally (status %(exit_code)s): "
"%(stderr)s"),
{"exit_code": e.exit_code, "stderr": e.stderr})
return False
class LuksEncryptor(cryptsetup.CryptsetupEncryptor):
"""A VolumeEncryptor based on LUKS.
This VolumeEncryptor uses dm-crypt to encrypt the specified volume.
"""
def __init__(self, connection_info, **kwargs):
super(LuksEncryptor, self).__init__(connection_info, **kwargs)
def _format_volume(self, passphrase, **kwargs):
"""Creates a LUKS header on the volume.
:param passphrase: the passphrase used to access the volume
"""
LOG.debug("formatting encrypted volume %s", self.dev_path)
# NOTE(joel-coffman): cryptsetup will strip trailing newlines from
# input specified on stdin unless --key-file=- is specified.
cmd = ["cryptsetup", "--batch-mode", "luksFormat", "--key-file=-"]
cipher = kwargs.get("cipher", None)
if cipher is not None:
cmd.extend(["--cipher", cipher])
key_size = kwargs.get("key_size", None)
if key_size is not None:
cmd.extend(["--key-size", key_size])
cmd.extend([self.dev_path])
utils.execute(*cmd, process_input=passphrase,
check_exit_code=True, run_as_root=True, attempts=3)
def _open_volume(self, passphrase, **kwargs):
"""Opens the LUKS partition on the volume using the specified
passphrase.
:param passphrase: the passphrase used to access the volume
"""
LOG.debug("opening encrypted volume %s", self.dev_path)
utils.execute('cryptsetup', 'luksOpen', '--key-file=-',
self.dev_path, self.dev_name, process_input=passphrase,
run_as_root=True, check_exit_code=True)
def attach_volume(self, context, **kwargs):
"""Shadows the device and passes an unencrypted version to the
instance.
Transparent disk encryption is achieved by mounting the volume via
dm-crypt and passing the resulting device to the instance. The
instance is unaware of the underlying encryption due to modifying the
original symbolic link to refer to the device mounted by dm-crypt.
"""
key = self._get_key(context).get_encoded()
# LUKS uses a passphrase rather than a raw key -- convert to string
passphrase = ''.join(hex(x).replace('0x', '') for x in key)
try:
self._open_volume(passphrase, **kwargs)
except processutils.ProcessExecutionError as e:
if e.exit_code == 1 and not is_luks(self.dev_path):
# the device has never been formatted; format it and try again
LOG.info(_LI("%s is not a valid LUKS device;"
" formatting device for first use"),
self.dev_path)
self._format_volume(passphrase, **kwargs)
self._open_volume(passphrase, **kwargs)
else:
raise
# modify the original symbolic link to refer to the decrypted device
utils.execute('ln', '--symbolic', '--force',
'/dev/mapper/%s' % self.dev_name, self.symlink_path,
run_as_root=True, check_exit_code=True)
def _close_volume(self, **kwargs):
"""Closes the device (effectively removes the dm-crypt mapping)."""
LOG.debug("closing encrypted volume %s", self.dev_path)
utils.execute('cryptsetup', 'luksClose', self.dev_name,
run_as_root=True, check_exit_code=True,
attempts=3)
| apache-2.0 |
Alberto-Beralix/Beralix | i386-squashfs-root/usr/share/pyshared/ibus/serializable.py | 6 | 2912 | # vim:set et sts=4 sw=4:
#
# ibus - The Input Bus
#
# Copyright (c) 2007-2010 Peng Huang <shawn.p.huang@gmail.com>
# Copyright (c) 2007-2010 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330,
# Boston, MA 02111-1307 USA
__all__ = (
"Serializable",
"serialize_object",
"deserialize_object",
)
from object import Object
import dbus
import gobject
__serializable_name_dict = dict()
def serializable_register(classobj):
# if not issubclass(classobj, Serializable):
# raise "%s is not a sub-class of Serializable" % str(classobj)
__serializable_name_dict[classobj.__NAME__] = classobj
def serialize_object(o):
if isinstance(o, Serializable):
l = [o.__NAME__]
o.serialize(l)
return dbus.Struct(l)
else:
return o
def deserialize_object(v):
if isinstance(v, tuple):
struct = list(v)
type_name = struct.pop(0)
type_class = __serializable_name_dict[type_name]
o = type_class()
o.deserialize (struct)
return o
return v
class SerializableMeta(gobject.GObjectMeta):
def __init__(cls, name, bases, dict_):
super(SerializableMeta, cls).__init__(name, bases, dict_)
if "__NAME__" in cls.__dict__:
serializable_register(cls)
class Serializable(Object):
__metaclass__ = SerializableMeta
__gtype_name__ = "PYIBusSerializable"
__NAME__ = "IBusSerializable"
def __init__(self):
super(Serializable, self).__init__()
self.__attachments = dict()
def set_attachment(self, name, value):
self.__attachments[name] = value
def get_attachment(self, name):
return self.__attachments.get(name, None)
def serialize(self, struct):
d = dbus.Dictionary(signature="sv")
for k, v in self.__attachments.items():
d[k] = serialize_object(v)
struct.append(d)
def deserialize(self, struct):
d = struct.pop(0)
self.__attachments = dict()
for k, v in d.items():
self.__attachments[k] = deserialize_object(v)
def do_destroy(self):
self.__attachments = None
super(Serializable, self).do_destroy()
__serializable_name_dict["IBusSerializable"] = Serializable
| gpl-3.0 |
jquacinella/IS602_Project | web/scripts/service/service.py | 7 | 6565 | #!/usr/bin/env python
import sys, os, time, subprocess
class Base:
def run_command(self, *args):
"""
Returns the output of a command as a tuple (output, error).
"""
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return p.communicate()
class ServiceBase(Base):
def __init__(self, name, label, stdout=None, stderr=None):
self.name = name
self.label = label
self.stdout = stdout
self.stderr = stderr
self.config_file = None
def load_configuration(self):
"""
Loads the configuration required to build the command-line string
for running web2py. Returns a tuple (command_args, config_dict).
"""
s = os.path.sep
default = dict(
python = 'python',
web2py = os.path.join(s.join(__file__.split(s)[:-3]), 'web2py.py'),
http_enabled = True,
http_ip = '0.0.0.0',
http_port = 8000,
https_enabled = True,
https_ip = '0.0.0.0',
https_port = 8001,
https_key = '',
https_cert = '',
password = '<recycle>',
)
config = default
if self.config_file:
try:
f = open(self.config_file, 'r')
lines = f.readlines()
f.close()
for line in lines:
fields = line.split('=', 1)
if len(fields) == 2:
key, value = fields
key = key.strip()
value = value.strip()
config[key] = value
except:
pass
web2py_path = os.path.dirname(config['web2py'])
os.chdir(web2py_path)
args = [config['python'], config['web2py']]
interfaces = []
ports = []
if config['http_enabled']:
ip = config['http_ip']
port = config['http_port']
interfaces.append('%s:%s' % (ip, port))
ports.append(port)
if config['https_enabled']:
ip = config['https_ip']
port = config['https_port']
key = config['https_key']
cert = config['https_cert']
if key != '' and cert != '':
interfaces.append('%s:%s:%s:%s' % (ip, port, cert, key))
ports.append(ports)
if len(interfaces) == 0:
sys.exit('Configuration error. Must have settings for http and/or https')
password = config['password']
if not password == '<recycle>':
from gluon import main
for port in ports:
main.save_password(password, port)
password = '<recycle>'
args.append('-a "%s"' % password)
interfaces = ';'.join(interfaces)
args.append('--interfaces=%s' % interfaces)
if 'log_filename' in config.key():
log_filename = config['log_filename']
args.append('--log_filename=%s' % log_filename)
return (args, config)
def start(self):
pass
def stop(self):
pass
def restart(self):
pass
def status(self):
pass
def run(self):
pass
def install(self):
pass
def uninstall(self):
pass
def check_permissions(self):
"""
Does the script have permissions to install, uninstall, start, and stop services?
Return value must be a tuple (True/False, error_message_if_False).
"""
return (False, 'Permissions check not implemented')
class WebServerBase(Base):
def install(self):
pass
def uninstall(self):
pass
def get_service():
service_name = 'web2py'
service_label = 'web2py Service'
if sys.platform == 'linux2':
from linux import LinuxService as Service
elif sys.platform == 'darwin':
# from mac import MacService as Service
sys.exit('Mac OS X is not yet supported.\n')
elif sys.platform == 'win32':
# from windows import WindowsService as Service
sys.exit('Windows is not yet supported.\n')
else:
sys.exit('The following platform is not supported: %s.\n' % sys.platform)
service = Service(service_name, service_label)
return service
if __name__ == '__main__':
service = get_service()
is_root, error_message = service.check_permissions()
if not is_root:
sys.exit(error_message)
if len(sys.argv) >= 2:
command = sys.argv[1]
if command == 'start':
service.start()
elif command == 'stop':
service.stop()
elif command == 'restart':
service.restart()
elif command == 'status':
print service.status() + '\n'
elif command == 'run':
service.run()
elif command == 'install':
service.install()
elif command == 'uninstall':
service.uninstall()
elif command == 'install-apache':
# from apache import Apache
# server = Apache()
# server.install()
sys.exit('Configuring Apache is not yet supported.\n')
elif command == 'uninstall-apache':
# from apache import Apache
# server = Apache()
# server.uninstall()
sys.exit('Configuring Apache is not yet supported.\n')
else:
sys.exit('Unknown command: %s' % command)
else:
print 'Usage: %s [command] \n' % sys.argv[0] + \
'\tCommands:\n' + \
'\t\tstart Starts the service\n' + \
'\t\tstop Stop the service\n' + \
'\t\trestart Restart the service\n' + \
'\t\tstatus Check if the service is running\n' + \
'\t\trun Run service is blocking mode\n' + \
'\t\t (Press Ctrl + C to exit)\n' + \
'\t\tinstall Install the service\n' + \
'\t\tuninstall Uninstall the service\n' + \
'\t\tinstall-apache Install as an Apache site\n' + \
'\t\tuninstall-apache Uninstall from Apache\n'
| gpl-2.0 |
taxido/django-xadmin | xadmin/plugins/ajax.py | 13 | 3347 | from django import forms
from django.utils.datastructures import SortedDict
from django.utils.html import escape
from django.utils.encoding import force_unicode
from xadmin.sites import site
from xadmin.views import BaseAdminPlugin, ListAdminView, ModelFormAdminView, DetailAdminView
NON_FIELD_ERRORS = '__all__'
class BaseAjaxPlugin(BaseAdminPlugin):
def init_request(self, *args, **kwargs):
return bool(self.request.is_ajax() or self.request.REQUEST.get('_ajax'))
class AjaxListPlugin(BaseAjaxPlugin):
def get_list_display(self,list_display):
list_fields = [field for field in self.request.GET.get('_fields',"").split(",")
if field.strip() != ""]
if list_fields:
return list_fields
return list_display
def get_result_list(self, response):
av = self.admin_view
base_fields = self.get_list_display(av.base_list_display)
headers = dict([(c.field_name, force_unicode(c.text)) for c in av.result_headers(
).cells if c.field_name in base_fields])
objects = [dict([(o.field_name, escape(str(o.value))) for i, o in
enumerate(filter(lambda c:c.field_name in base_fields, r.cells))])
for r in av.results()]
return self.render_response({'headers': headers, 'objects': objects, 'total_count': av.result_count, 'has_more': av.has_more})
class JsonErrorDict(forms.util.ErrorDict):
def __init__(self, errors, form):
super(JsonErrorDict, self).__init__(errors)
self.form = form
def as_json(self):
if not self:
return u''
return [{'id': self.form[k].auto_id if k != NON_FIELD_ERRORS else NON_FIELD_ERRORS, 'name': k, 'errors': v} for k, v in self.items()]
class AjaxFormPlugin(BaseAjaxPlugin):
def post_response(self, __):
new_obj = self.admin_view.new_obj
return self.render_response({
'result': 'success',
'obj_id': new_obj.pk,
'obj_repr': str(new_obj),
'change_url': self.admin_view.model_admin_url('change', new_obj.pk),
'detail_url': self.admin_view.model_admin_url('detail', new_obj.pk)
})
def get_response(self, __):
if self.request.method.lower() != 'post':
return __()
result = {}
form = self.admin_view.form_obj
if form.is_valid():
result['result'] = 'success'
else:
result['result'] = 'error'
result['errors'] = JsonErrorDict(form.errors, form).as_json()
return self.render_response(result)
class AjaxDetailPlugin(BaseAjaxPlugin):
def get_response(self, __):
if self.request.GET.get('_format') == 'html':
self.admin_view.detail_template = 'xadmin/views/quick_detail.html'
return __()
form = self.admin_view.form_obj
layout = form.helper.layout
results = []
for p, f in layout.get_field_names():
result = self.admin_view.get_field_result(f)
results.append((result.label, result.val))
return self.render_response(SortedDict(results))
site.register_plugin(AjaxListPlugin, ListAdminView)
site.register_plugin(AjaxFormPlugin, ModelFormAdminView)
site.register_plugin(AjaxDetailPlugin, DetailAdminView)
| bsd-3-clause |
NeovaHealth/odoo | addons/l10n_pt/__openerp__.py | 380 | 1834 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012 Thinkopen Solutions, Lda. All Rights Reserved
# http://www.thinkopensolutions.com.
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Portugal - Chart of Accounts',
'version': '0.011',
'author': 'ThinkOpen Solutions',
'website': 'http://www.thinkopensolutions.com/',
'category': 'Localization/Account Charts',
'description': 'Plano de contas SNC para Portugal',
'depends': ['base',
'base_vat',
'account',
'account_chart',
],
'data': [
'account_types.xml',
'account_chart.xml',
'account_tax_code_template.xml',
'account_chart_template.xml',
'fiscal_position_templates.xml',
'account_taxes.xml',
'l10n_chart_pt_wizard.xml',
],
'demo': [],
'installable': True,
}
| agpl-3.0 |
foreni-packages/pyew | vtrace/platforms/solaris.py | 24 | 6626 | """
Solaris Platform Module (Incomplete)
"""
# Copyright (C) 2007 Invisigoth - See LICENSE file for details
import os
import struct
import array
# Control codes (long values) for messages written to ctl and lwpctl files.
PCNULL = 0L# null request, advance to next message */
PCSTOP = 1L# direct process or lwp to stop and wait for stop */
PCDSTOP = 2L# direct process or lwp to stop */
PCWSTOP = 3L# wait for process or lwp to stop, no timeout */
PCTWSTOP = 4L# wait for stop, with long millisecond timeout arg */
PCRUN = 5L# make process/lwp runnable, w/ long flags argument */
PCCSIG = 6L# clear current signal from lwp */
PCCFAULT = 7L# clear current fault from lwp */
PCSSIG = 8L# set current signal from siginfo_t argument */
PCKILL = 9L# post a signal to process/lwp, long argument */
PCUNKILL = 10L# delete a pending signal from process/lwp, long arg */
PCSHOLD = 11L# set lwp signal mask from sigset_t argument */
PCSTRACE = 12L# set traced signal set from sigset_t argument */
PCSFAULT = 13L# set traced fault set from fltset_t argument */
PCSENTRY = 14L# set traced syscall entry set from sysset_t arg */
PCSEXIT = 15L# set traced syscall exit set from sysset_t arg */
PCSET = 16L# set modes from long argument */
PCUNSET = 17L# unset modes from long argument */
PCSREG = 18L# set lwp general registers from prgregset_t arg */
PCSFPREG = 19L# set lwp floating-point registers from prfpregset_t */
PCSXREG = 20L# set lwp extra registers from prxregset_t arg */
PCNICE = 21L# set nice priority from long argument */
PCSVADDR = 22L# set %pc virtual address from long argument */
PCWATCH = 23L# set/unset watched memory area from prwatch_t arg */
PCAGENT = 24L# create agent lwp with regs from prgregset_t arg */
PCREAD = 25L# read from the address space via priovec_t arg */
PCWRITE = 26L# write to the address space via priovec_t arg */
PCSCRED = 27L# set process credentials from prcred_t argument */
PCSASRS = 28L# set ancillary state registers from asrset_t arg */
PCSPRIV = 29L# set process privileges from prpriv_t argument */
PCSZONE = 30L# set zoneid from zoneid_t argument */
PCSCREDX = 31L# as PCSCRED but with supplemental groups */
# PCRUN long operand flags.
PRCSIG = 0x01# clear current signal, if any */
PRCFAULT = 0x02# clear current fault, if any */
PRSTEP = 0x04# direct the lwp to single-step */
PRSABORT = 0x08# abort syscall, if in syscall */
PRSTOP = 0x10# set directed stop request */
# Status flags
PR_STOPPED = 0x00000001# lwp is stopped */
PR_ISTOP = 0x00000002# lwp is stopped on an event of interest */
PR_DSTOP = 0x00000004# lwp has a stop directive in effect */
PR_STEP = 0x00000008# lwp has a single-step directive in effect */
PR_ASLEEP = 0x00000010# lwp is sleeping in a system call */
PR_PCINVAL = 0x00000020# contents of pr_instr undefined */
PR_ASLWP = 0x00000040# obsolete flag; never set */
PR_AGENT = 0x00000080# this lwp is the /proc agent lwp */
PR_DETACH = 0x00000100# this is a detached lwp */
PR_DAEMON = 0x00000200# this is a daemon lwp */
# The following flags apply to the process, not to an individual lwp */
PR_ISSYS = 0x00001000# this is a system process */
PR_VFORKP = 0x00002000# process is the parent of a vfork()d child */
PR_ORPHAN = 0x00004000# process's process group is orphaned */
# The following process flags are modes settable by PCSET/PCUNSET */
PR_FORK = 0x00100000# inherit-on-fork is in effect */
PR_RLC = 0x00200000# run-on-last-close is in effect */
PR_KLC = 0x00400000# kill-on-last-close is in effect */
PR_ASYNC = 0x00800000# asynchronous-stop is in effect */
PR_MSACCT = 0x01000000# micro-state usage accounting is in effect */
PR_BPTADJ = 0x02000000# breakpoint trap pc adjustment is in effect */
PR_PTRACE = 0x04000000# ptrace-compatibility mode is in effect */
PR_MSFORK = 0x08000000# micro-state accounting inherited on fork */
PR_IDLE = 0x10000000# lwp is a cpu's idle thread */
# Permissions...
MA_READ = 0x04# readable by the traced process */
MA_WRITE = 0x02# writable by the traced process */
MA_EXEC = 0x01# executable by the traced process */
MA_SHARED = 0x08# changes are shared by mapped object */
MA_ANON = 0x40# anonymous memory (e.g. /dev/zero) */
MA_ISM = 0x80# intimate shared mem (shared MMU resources) */
MA_NORESERVE = 0x100# mapped with MAP_NORESERVE */
MA_SHM = 0x200# System V shared memory */
MA_RESERVED1 = 0x400# reserved for future use */
class SolarisMixin:
def initMixin(self):
#import sunprocfs
self.ctl = None
def platformGetRegs(self):
pid = self.getPid()
#def platformGetThreads(self):
#ret = []
#for name in os.listdir("/proc/%d/lwp" % self.pid):
#ret.append(int(name))
#return ret
def platformAttach(self, pid):
self.ctl = file("/proc/%d/ctl" % pid, "ab")
self.ctl.write(struct.pack("<L", PRSTOP))
def platformContinue(self):
"""
Tell the process to continue running
"""
self.writeCtl(struct.pack("<LL", PCRUN, 0))
def platformWait(self):
"""
wait for the process to do someting "interesting"
"""
self.writeCtl(struct.pack("<L", PCWSTOP))
bytes = file("/proc/%d/psinfo" % self.pid, "rb").read()
return bytes
def writeCtl(self, bytes):
os.write(self.ctl.fileno(), bytes)
def platformDetach(self):
print "SOLARIS DETACH"
self.ctl.close()
self.ctl = None
class SolarisIntelMixin:
"""
Handle register formats for the intel solaris stuff
"""
def getRegisterFormat(self):
return ""
def getRegisterNames(self):
return []
def platformReadMemory(self, addr, size):
a = array.array('c',"\x00" * size)
baddr, blen = a.buffer_info()
priovec = struct.pack("<4L",PCREAD, baddr, blen, addr)
print repr(priovec)
self.writeCtl(priovec)
return a.tostring()
def platformWriteMemory(self, addr, bytes):
a = array.array('c',bytes)
baddr,blen = a.buffer_info()
priovec = struct.pack("<LLLL", PCWRITE, baddr, blen, addr)
self.writeCtl(priovec)
def platformGetMaps(self):
ret = []
pid = self.getPid()
mapdata = file("/proc/%d/map" % pid, "rb").read()
while mapdata:
addr,size = struct.unpack("<LL", mapdata[:8])
perms, = struct.unpack("<L", mapdata[80:84])
perms = perms & 0x7
ret.append((addr,size, perms, ""))
mapdata = mapdata[96:]
return ret
| gpl-2.0 |
manashmndl/scikit-learn | sklearn/cluster/spectral.py | 233 | 18153 | # -*- coding: utf-8 -*-
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux gael.varoquaux@normalesup.org
# Brian Cheung
# Wei LI <kuantkid@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_random_state, as_float_array
from ..utils.validation import check_array
from ..utils.extmath import norm
from ..metrics.pairwise import pairwise_kernels
from ..neighbors import kneighbors_graph
from ..manifold import spectral_embedding
from .k_means_ import k_means
def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,
random_state=None):
"""Search for a partition matrix (clustering) which is closest to the
eigenvector embedding.
Parameters
----------
vectors : array-like, shape: (n_samples, n_clusters)
The embedding space of the samples.
copy : boolean, optional, default: True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, optional, default: 30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, optional, default: 30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
of the rotation matrix
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
from scipy.sparse import csc_matrix
from scipy.linalg import LinAlgError
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / norm(vectors[:, i])) \
* norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components))
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
svd_restarts += 1
except LinAlgError:
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if ((abs(ncut_value - last_objective_value) < eps) or
(n_iter > n_iter_max)):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError('SVD did not converge')
return labels
def spectral_clustering(affinity, n_clusters=8, n_components=None,
eigen_solver=None, random_state=None, n_init=10,
eigen_tol=0.0, assign_labels='kmeans'):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
affinity : array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters : integer, optional
Number of clusters to extract.
n_components : integer, optional, default is n_clusters
Number of eigen vectors to use for the spectral embedding
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization. See
the 'Multiclass spectral clustering' paper referenced below for
more details on the discretization approach.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
------
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if assign_labels not in ('kmeans', 'discretize'):
raise ValueError("The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given"
% assign_labels)
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
maps = spectral_embedding(affinity, n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol, drop_first=False)
if assign_labels == 'kmeans':
_, labels, _ = k_means(maps, n_clusters, random_state=random_state,
n_init=n_init)
else:
labels = discretize(maps, random_state=random_state)
return labels
class SpectralClustering(BaseEstimator, ClusterMixin):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either
kernel function such the Gaussian (aka RBF) kernel of the euclidean
distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
n_clusters : integer, optional
The dimension of the projection subspace.
affinity : string, array-like or callable, default 'rbf'
If a string, this may be one of 'nearest_neighbors', 'precomputed',
'rbf' or one of the kernels supported by
`sklearn.metrics.pairwise_kernels`.
Only kernels that produce similarity scores (non-negative values that
increase with similarity) should be used. This property is not checked
by the clustering algorithm.
gamma : float
Scaling factor of RBF, polynomial, exponential chi^2 and
sigmoid affinity kernel. Ignored for
``affinity='nearest_neighbors'``.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
n_neighbors : integer
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another approach
which is less sensitive to random initialization.
kernel_params : dictionary of string to any, optional
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
Attributes
----------
affinity_matrix_ : array-like, shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
labels_ :
Labels of each point
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- X ** 2 / (2. * delta ** 2))
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
"""
def __init__(self, n_clusters=8, eigen_solver=None, random_state=None,
n_init=10, gamma=1., affinity='rbf', n_neighbors=10,
eigen_tol=0.0, assign_labels='kmeans', degree=3, coef0=1,
kernel_params=None):
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def fit(self, X, y=None):
"""Creates an affinity matrix for X using the selected affinity,
then applies spectral clustering to this affinity matrix.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
OR, if affinity==`precomputed`, a precomputed affinity
matrix of shape (n_samples, n_samples)
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
if X.shape[0] == X.shape[1] and self.affinity != "precomputed":
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
if self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors, include_self=True)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity,
filter_params=True,
**params)
random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters,
eigen_solver=self.eigen_solver,
random_state=random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels)
return self
@property
def _pairwise(self):
return self.affinity == "precomputed"
| bsd-3-clause |
ashhher3/invenio | modules/miscutil/lib/upgrades/invenio_2013_03_28_idxINDEX_tokenizer.py | 18 | 2470 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from invenio.dbquery import run_sql
depends_on = ['invenio_2013_03_25_idxINDEX_html_markup']
def info():
return "Introduces new columns for idxINDEX table: tokenizer"
def do_upgrade():
#first step: change table
stmt = run_sql('SHOW CREATE TABLE idxINDEX')[0][1]
if '`tokenizer` varchar(50)' not in stmt:
run_sql("ALTER TABLE idxINDEX ADD COLUMN tokenizer varchar(50) NOT NULL default '' AFTER remove_latex_markup")
#second step: update table
run_sql("""UPDATE idxINDEX SET tokenizer='BibIndexDefaultTokenizer' WHERE name IN
('global', 'collection', 'abstract', 'keyword',
'reference', 'reportnumber', 'title', 'collaboration',
'affiliation', 'caption', 'exacttitle')""")
run_sql("""UPDATE idxINDEX SET tokenizer='BibIndexAuthorTokenizer' WHERE name IN
('author', 'firstauthor')""")
run_sql("""UPDATE idxINDEX SET tokenizer='BibIndexExactAuthorTokenizer' WHERE name IN
('exactauthor', 'exactfirstauthor')""")
run_sql("""UPDATE idxINDEX SET tokenizer='BibIndexFulltextTokenizer' WHERE name='fulltext'""")
run_sql("""UPDATE idxINDEX SET tokenizer='BibIndexAuthorCountTokenizer' WHERE name='authorcount'""")
run_sql("""UPDATE idxINDEX SET tokenizer='BibIndexJournalTokenizer' WHERE name='journal'""")
run_sql("""UPDATE idxINDEX SET tokenizer='BibIndexYearTokenizer' WHERE name='year'""")
run_sql("""UPDATE idxINDEX SET tokenizer='BibIndexDefaultTokenizer' WHERE tokenizer = ''""")
def estimate():
return 1
def pre_upgrade():
pass
def post_upgrade():
pass
| gpl-2.0 |
suryakencana/niimanga | niimanga/views/cms.py | 1 | 26586 | """
# Copyright (c) 05 2015 | surya
# 05/05/15 nanang.ask@kubuskotak.com
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# cms.py
"""
import logging
import shutil
from niimanga.libs.crawend import CrawlEnd
from niimanga.libs.ziputils import extract_zip
from os import path, walk, makedirs
from niimanga.configs.view import ZHandler
from niimanga.libs import utils
from niimanga.libs.utils import FieldsGrid, ResponseHTTP, slugist
from niimanga.models.acl import Group
from niimanga.models.component import Menu, Slider, SliderImage
from niimanga.models.manga import Manga, Chapter
from niimanga.models.master import Season, ISOLang
from pyramid.view import view_config
LOG = logging.getLogger(__name__)
class CMSMain(ZHandler):
@view_config(route_name='cms_main',
renderer="layouts/cms.html")
@CrawlEnd()
def index(self):
_ = self.R
return dict(project="menu")
class ChapterView(ZHandler):
@view_config(route_name='cms_chapter', match_param='action=search',
renderer='json')
def search_chapter(self):
_ = self.R
rows = []
with ResponseHTTP(_.response) as resp:
_in = u'Failed'
code, status = ResponseHTTP.BAD_REQUEST
qry = Chapter.query
q = _.params.get('q', None)
if q is not None:
data = qry.filter(Chapter.title.ilike('%{0}%'.format(q))).all()
for row in data:
rows.append(dict(
label=row.title,
value=row.id
))
_in = u'Success'
code, status = ResponseHTTP.OK
return resp.to_json(_in,
code=code,
status=status, rows=rows)
@view_config(route_name='cms_chapter', match_param='action=lang',
renderer='json')
def iso_lang(self):
_ = self.R
rows = []
with ResponseHTTP(_.response) as resp:
_in = u'Failed'
code, status = ResponseHTTP.BAD_REQUEST
qry = ISOLang.query
data = qry.all()
if data is not None and len(data) > 0:
for row in data:
rows.append(dict(
label=row.name,
value=row.iso
))
_in = u'Success'
code, status = ResponseHTTP.OK
return resp.to_json(_in,
code=code,
status=status, rows=rows)
@view_config(route_name='cms_chapter', match_param='action=src',
renderer='json', request_method="POST")
def dataset(self):
_ = self.R
qry = Chapter.query
chapters = qry.all()
return dict(
total=qry.count(),
rows=[
dict(
id=chapter.id,
mangaid=chapter.manga.id,
manga=chapter.manga.title,
title=chapter.title,
volume=chapter.volume,
chapter=chapter.chapter,
lang=chapter.lang.iso,
slug=chapter.slug
) for chapter in chapters]
)
@view_config(route_name='cms_chapter', match_param='action=edit-able',
renderer='json', request_method='POST')
def editable_save(self):
_ = self.R
with ResponseHTTP(_.response) as resp:
_in = u'Failed'
code, status = ResponseHTTP.BAD_REQUEST
qry = Chapter.query
rdict = utils.loads(_.params.get("row", None))
if rdict is not None and len(rdict) > 0:
# manga = Manga.query.filter(Manga.id == rdict.get('mangaid', None)).first()
# manga_title = "-".join([manga.type, manga.title])
lang = ISOLang.query.filter(ISOLang.iso == rdict.get('lang', 'en')).first()
chapter = qry.get(rdict.get('id', None))
chapter.title = rdict.get('title', None)
chapter.volume = rdict.get('volume', None)
chapter.chapter = rdict.get('chapter', None)
chapter.lang = lang
_in = u'Success'
code, status = ResponseHTTP.OK
return resp.to_json(_in,
code=code,
status=status)
@view_config(route_name='cms_chapter', match_param='action=save-new',
renderer='json', request_method='POST')
def save_new(self):
_ = self.R
with ResponseHTTP(_.response) as resp:
_in = u'Failed'
code, status = ResponseHTTP.BAD_REQUEST
# for key, value in _.params.iteritems():
# print(":".join([key, value]))
if _.params.get('title', None) is not None:
manga = Manga.query.filter(Manga.id == _.params.get('series', None)).first()
manga_slug = "-".join([manga.type, manga.title])
lang = ISOLang.query.filter(ISOLang.iso == _.params.get('lang', 'en')).first()
v = _.params.get('volume', 0)
c = _.params.get('chapter', 0)
chapter = Chapter(
_.params.get('title', None),
c if str(c).isdigit() else 0,
v if str(v).isdigit() else 0
)
slug_chapter = ' '.join([manga_slug, _.params.get('title', None)])
manga.chapter_count += 1
manga.updated_chapter()
chapter.lang = lang
chapter.updated = utils.datetime.now()
chapter.manga = manga
# s = 1000v + c
# chapter.sortorder = (1000*float(v)) + float(c)
chapter.sortorder = float(_.params.get('chapter', None))
chapter.slug = slug_chapter
_.db.add(chapter)
chp_tmp = Chapter.query.filter(Chapter.slug == slugist(slug_chapter)).first()
temps_path = _.storage.path('/'.join(['temps', _.params.get('uuid', None)]))
print(temps_path)
for root, dirs, files in walk(temps_path):
LOG.info(files)
for f in files:
fpath = '/'.join([temps_path, f])
fdest = _.storage.path('/'.join([manga.id, chp_tmp.id]))
print(fpath)
print(fdest)
extract_zip(fpath, fdest)
_in = u'Success'
code, status = ResponseHTTP.OK
return resp.to_json(_in,
code=code,
status=status)
class SeriesView(ZHandler):
@view_config(route_name='cms_series', match_param='action=search',
renderer='json')
def search_series(self):
_ = self.R
rows = []
with ResponseHTTP(_.response) as resp:
_in = u'Failed'
code, status = ResponseHTTP.BAD_REQUEST
qry = Manga.query
q = _.params.get('q', None)
if q is not None:
data = qry.filter(Manga.title.ilike('%{0}%'.format(q))).all()
for row in data:
rows.append(dict(
label=row.title,
value=row.id
))
_in = u'Success'
code, status = ResponseHTTP.OK
return resp.to_json(_in,
code=code,
status=status, rows=rows)
@view_config(route_name='cms_series', match_param='action=src',
renderer='json', request_method="POST")
def dataset(self):
_ = self.R
qry = Manga.query
manga = qry.all()
return dict(
total=qry.count(),
rows=[
dict(
id=series.id,
title=series.title,
authors=series.get_authors(),
artist=series.get_artist(),
description=series.description,
category=series.category,
released=series.released,
status=series.status,
slug=series.slug
) for series in manga]
)
@view_config(route_name='cms_series', match_param='action=edit-able',
renderer='json', request_method='POST')
def editable_save(self):
_ = self.R
with ResponseHTTP(_.response) as resp:
_in = u'Failed'
code, status = ResponseHTTP.BAD_REQUEST
qry = Manga.query
rdict = utils.loads(_.params.get("row", None))
if rdict is not None and len(rdict) > 0:
series = qry.get(rdict.get('id', None))
series.title = rdict.get('title', 'no title')
series.set_authors(rdict.get('authors', ''))
series.set_artist(rdict.get('artist', ''))
series.description = rdict.get('description', '')
series.category = rdict.get('category', 'ja')
series.released = rdict.get('released', None)
series.status = rdict.get('status', None)
_in = u'Success'
code, status = ResponseHTTP.OK
return resp.to_json(_in,
code=code,
status=status)
@view_config(route_name='cms_series', match_param='action=save-new',
renderer='json', request_method='POST')
def save_new(self):
_ = self.R
with ResponseHTTP(_.response) as resp:
_in = u'Failed'
code, status = ResponseHTTP.BAD_REQUEST
# for key, value in _.params.iteritems():
# print(":".join([key, value]))
if _.params.get('title', None) is not None:
manga = Manga(
_.params.get('type', 'kk'),
_.params.get('title', None),
_.params.get('released', None),
_.params.get('genres', None),
_.params.get('authors', None),
_.params.get('artist', None),
_.params.get('aka', None),
_.params.get('description', None),
_.params.get('status', None)
)
manga.category = _.params.get('category', 'ja')
_.db.add(manga)
mng_tmp = Manga.query.filter(Manga.slug == slugist("-".join([_.params.get('type', 'kk'), _.params.get('title', None)]))).first()
temps_path = _.storage.path('/'.join(['temps', _.params.get('uuid', None)]))
for root, dirs, files in walk(temps_path):
LOG.info(files)
for f in files:
fpath = '/'.join([temps_path, f])
ext = str(f).split('.')[-1]
LOG.info(fpath)
fdest = _.storage.path('/'.join([mng_tmp.id]))
folder_zip = '/'.join([fdest, 'cover.{ext}'.format(ext=ext)])
if '.jpg' in folder_zip or '.png' in folder_zip:
# LOG.info(folder_zip)
if not path.exists(fdest):
makedirs(fdest)
shutil.copy(fpath, folder_zip)
mng_tmp.thumb = '.'.join(['cover', ext])
shutil.rmtree(temps_path)
_in = u'Success'
code, status = ResponseHTTP.OK
return resp.to_json(_in,
code=code,
status=status)
class MenuView(ZHandler):
@view_config(route_name='cms_menu', match_param='action=list',
renderer="cms/menu/index.html")
def index(self):
_ = self.R
cols_name = dict(id="ID", label="Label Menu", name="Route Name", url="Route Url")
data = dict(title="Master Menu")
return dict(project="menu", cols=cols_name, data=data)
@view_config(route_name='cms_menu', match_param='action=src',
renderer='json', request_method="POST")
def dataset(self):
_ = self.R
qry = Menu.query
menu = qry.all()
return dict(
total=qry.count(),
rows=[dict(id=mn.id, label=mn.label, name=mn.name, url=mn.url) for mn in menu]
)
@view_config(route_name='cms_menu', match_param='action=edit-able',
renderer='json', request_method='POST')
def editable_save(self):
_ = self.R
with ResponseHTTP(_.response) as resp:
_in = u'Success'
code, status = ResponseHTTP.OK
qry = Menu.query
rdict = utils.loads(_.params.get("row", None))
menu = qry.get(rdict.get('id', None))
menu.name = rdict.get('name', '')
menu.slug = rdict.get('name', '')
menu.label = rdict.get('label', '')
menu.url = rdict.get('url', '')
return resp.to_json(_in,
code=code,
status=status)
@view_config(route_name='cms_menu', match_param='action=save-new',
renderer='json', request_method='POST')
def save_new(self):
_ = self.R
with ResponseHTTP(_.response) as resp:
_in = u'Failed'
code, status = ResponseHTTP.BAD_REQUEST
if _.params.get('label', None) is not None:
menu = Menu(
_.params.get('label', None),
_.params.get('name', None),
_.params.get('url', None))
_.db.add(menu)
_in = u'Success'
code, status = ResponseHTTP.OK
return resp.to_json(_in,
code=code,
status=status)
class GroupView(ZHandler):
@view_config(route_name='cms_group', match_param='action=list',
renderer="cms/group/index.html")
def index(self):
_ = self.R
cols_name = dict(id="ID", name="Group Name", slug="Slug Name")
data = dict(title="Master Groups")
return dict(project="group", cols=cols_name, data=data)
@view_config(route_name='cms_group', match_param='action=src',
renderer='json', request_method="POST")
def dataset(self):
_ = self.R
qry = Group.query
group = qry.all()
for grp in group:
print(grp.id)
return dict(
total=qry.count(),
rows=[dict(id=grp.id, name=grp.name, slug=grp.slug) for grp in group]
)
@view_config(route_name='cms_group', match_param='action=edit-able',
renderer='json', request_method='POST')
def editable_save(self):
_ = self.R
with ResponseHTTP(_.response) as resp:
_in = u'Success'
code, status = ResponseHTTP.OK
qry = Group.query
rdict = utils.loads(_.params.get("row", None))
group = qry.get(rdict.get('id', None))
group.name = rdict.get('name', '')
group.slug = rdict.get('name', '')
return resp.to_json(_in,
code=code,
status=status)
@view_config(route_name='cms_group', match_param='action=save-new',
renderer='json', request_method='POST')
def save_new(self):
_ = self.R
print(_.params.get('name'))
with ResponseHTTP(_.response) as resp:
_in = u'Failed'
code, status = ResponseHTTP.BAD_REQUEST
if _.params.get('name', None) is not None:
group = Group(_.params.get('name', None))
_.db.add(group)
_in = u'Success'
code, status = ResponseHTTP.OK
return resp.to_json(_in,
code=code,
status=status)
class SeasonView(ZHandler):
@view_config(route_name='cms_season', match_param='action=list',
renderer="cms/season/index.html")
def index(self):
_ = self.R
cols_name = dict(id="ID", title="Season Title",
category="Category",
type="Season Type",
year="Season Years")
data = dict(title="Master Season")
return dict(project="season", cols=cols_name, data=data)
@view_config(route_name='cms_season', match_param='action=src',
renderer='json', request_method="POST")
def dataset(self):
_ = self.R
qry = Season.query
season = qry.all()
return dict(
total=qry.count(),
rows=[dict(id=sea.id,
title=sea.title,
category=sea.category, type=sea.type, year=sea.year) for sea in season]
)
@view_config(route_name='cms_season', match_param='action=edit-able',
renderer='json', request_method='POST')
def editable_save(self):
_ = self.R
with ResponseHTTP(_.response) as resp:
_in = u'Success'
code, status = ResponseHTTP.OK
qry = Season.query
rdict = utils.loads(_.params.get("row", None))
season = qry.get(rdict.get('id', None))
season.title = rdict.get('title', '')
season.slug = rdict.get('title', '')
season.category = rdict.get('category', 'JD')
season.type = rdict.get('type', 'winter')
season.year = rdict.get('year', 0)
return resp.to_json(_in,
code=code,
status=status)
@view_config(route_name='cms_season', match_param='action=save-new',
renderer='json', request_method='POST')
def save_new(self):
_ = self.R
with ResponseHTTP(_.response) as resp:
_in = u'Failed'
code, status = ResponseHTTP.BAD_REQUEST
if _.params.get('title', None) is not None:
season = Season(
_.params.get('title', None),
_.params.get('category', 'JD'),
_.params.get('type', 'winter'),
_.params.get('year', 0))
_.db.add(season)
_in = u'Success'
code, status = ResponseHTTP.OK
return resp.to_json(_in,
code=code,
status=status)
class SliderView(ZHandler):
@view_config(route_name='cms_slider', match_param='action=list',
renderer="cms/slider/index.html")
def index(self):
_ = self.R
cols_name = [FieldsGrid('id', 'ID', visible=False).to_dict(),
FieldsGrid('name', 'Slider Name', editable=True).to_dict(),
FieldsGrid('category', 'Slider Category', editable=dict(
type='select',
source=[{'value': 'JD', 'text': 'Dorama'},
{'value': 'AN', 'text': 'Anime'},
{'value': 'KD', 'text': 'KDrama'}])).to_dict(),
FieldsGrid('type', 'Slider Position', editable=dict(
type='select',
source=[{'value': 'HR', 'text': 'Header'},
{'value': 'BT', 'text': 'Bottom'},
{'value': 'MD', 'text': 'Middle'}])).to_dict(),
FieldsGrid('detail', 'Detail', width=50, align='left',
valign='middle',
actionable=dict(iconcls='fa fa-list-alt')).to_dict(),
FieldsGrid('delete', 'Delete', width=50, align='left',
valign='middle',
actionable=dict(iconcls='fa fa-trash-o')).to_dict()]
data = dict(title="Master Images Slider")
# print(utils.dumps(cols_name))
return dict(project="slider", cols=utils.dumps(cols_name), data=data)
@view_config(route_name='cms_slider', match_param='action=src',
renderer='json', request_method="POST")
def dataset(self):
_ = self.R
qry = Slider.query
slider = qry.all()
return dict(
total=qry.count(),
rows=[dict(id=sld.id,
name=sld.name,
category=sld.category, type=sld.type) for sld in slider]
)
@view_config(route_name='cms_slider', match_param='action=edit-able',
renderer='json', request_method='POST')
def editable_save(self):
_ = self.R
with ResponseHTTP(_.response) as resp:
_in = u'Success'
code, status = ResponseHTTP.OK
qry = Slider.query
rdict = utils.loads(_.params.get("row", None))
slider = qry.get(rdict.get('id', None))
slider.name = rdict.get('name', '')
slider.slug = rdict.get('name', '')
slider.category = rdict.get('category', 'JD')
slider.type = rdict.get('type', 'HR')
return resp.to_json(_in,
code=code,
status=status)
@view_config(route_name='cms_slider', match_param='action=save-new',
renderer='json', request_method='POST')
def save_new(self):
_ = self.R
with ResponseHTTP(_.response) as resp:
_in = u'Failed'
code, status = ResponseHTTP.BAD_REQUEST
if _.params.get('name', None) is not None:
slider = Slider(
_.params.get('name', None),
_.params.get('category', 'JD'),
_.params.get('type', 'HR'))
_.db.add(slider)
_in = u'Success'
code, status = ResponseHTTP.OK
return resp.to_json(_in,
code=code,
status=status)
class SliderDetailView(ZHandler):
@view_config(route_name='cms_slider_detail', match_param='action=list',
renderer="cms/slider/detail.html")
def index(self):
_ = self.R
cols_name = [FieldsGrid('id', 'ID', visible=False).to_dict(),
# master slider
FieldsGrid('pid', 'Slider Name', editable=dict(
type='select',
source=[dict(value=slide.id, text=slide.name) for slide in Slider.query.all()])).to_dict(),
FieldsGrid('image', 'Image', editable=True).to_dict(),
FieldsGrid('url', 'Link Url', editable=True).to_dict(),
FieldsGrid('detail', 'Detail', width=50, align='left',
valign='middle',
actionable=dict(iconcls='fa fa-list-alt')).to_dict(),
FieldsGrid('delete', 'Delete', width=50, align='left',
valign='middle',
actionable=dict(iconcls='fa fa-trash-o')).to_dict()]
sliders = [(slide.id, slide.name) for slide in Slider.query.all()]
data = dict(title="List Images Slider", sliders=sliders)
# print(utils.dumps(cols_name))
return dict(project="slider", cols=utils.dumps(cols_name), data=data)
@view_config(route_name='cms_slider_detail', match_param='action=src',
renderer='json', request_method="POST")
def dataset(self):
_ = self.R
qry = SliderImage.query
slider = qry.all()
return dict(
total=qry.count(),
rows=[dict(id=sld.id,
name=sld.sliders.name,
image=sld.image, url=sld.url) for sld in slider]
)
@view_config(route_name='cms_slider_detail', match_param='action=edit-able',
renderer='json', request_method='POST')
def editable_save(self):
_ = self.R
with ResponseHTTP(_.response) as resp:
_in = u'Success'
code, status = ResponseHTTP.OK
qry = SliderImage.query
rdict = utils.loads(_.params.get("row", None))
slide = Slider.query.get(rdict.get("pid", None))
sliderimg = qry.get(rdict.get('id', None))
sliderimg.image = rdict.get('name', '')
sliderimg.url = rdict.get('name', '')
sliderimg.sliders = slide
return resp.to_json(_in,
code=code,
status=status)
@view_config(route_name='cms_slider_detail', match_param='action=save-new',
renderer='json', request_method='POST')
def save_new(self):
_ = self.R
with ResponseHTTP(_.response) as resp:
_in = u'Failed'
code, status = ResponseHTTP.BAD_REQUEST
print(_.POST['image'].filename)
if _.params.get('image', None) is not None:
slider = Slider.query.get(_.params.get('pid', 'HR'))
slimage = SliderImage(
_.params.get('image', None),
_.params.get('url', 'JD'))
slimage.sliders = slider
_.db.add(slimage)
_in = u'Success'
code, status = ResponseHTTP.OK
return resp.to_json(_in,
code=code,
status=status) | lgpl-3.0 |
Victoralin10/ACMSolutions | IEEExtreme2017/elem3.py | 1 | 1302 | import requests
session = requests.Session()
session.headers.update({'Cookie': 'crossSessionId=u2lwcda374udixopbw6hfunlq8onoowh; csrftoken=oFo0r2bfk3CEMQBFUC10cOBpBl08xf3qK4cxLjP4oJilCNxXt7vEsBB7e2i70wHB',
'Origin': 'https://csacademy.com',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'es-419,es;q=0.8,en-US;q=0.6,en;q=0.4',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36',
'Content-Type': 'multipart/form-data; boundary=----WebKitFormBoundaryDSBJTdTASRxZUc9i',
'Accept': '*/*',
'Referer': 'https://csacademy.com/ieeextreme11/task/c787255968c972c653677a52599ecc2f/',
'x-requested-with': 'XMLHttpRequest',
'Connection': 'keep-alive',
'x-csrftoken': 'oFo0r2bfk3CEMQBFUC10cOBpBl08xf3qK4cxLjP4oJilCNxXt7vEsBB7e2i70wHB',
'DNT': '1'})
def getAnswer(entrada):
data = '------WebKitFormBoundaryDSBJTdTASRxZUc9i\r\nContent-Disposition: form-data; name="type"\r\n\r\nelementary\r\n------WebKitFormBoundaryDSBJTdTASRxZUc9i\r\nContent-Disposition: form-data; name="input"\r\n\r\n{0}------WebKitFormBoundaryDSBJTdTASRxZUc9i--\r\n'.format(entrada)
return session.post('https://csacademy.com/eval/input_server_test/', data=data).json()
a = open('in.txt')
print(getAnswer(a.read().replace('\n', '\r\n'))['output'])
| mit |
Towhidn/django-boilerplate | account/modules/functions.py | 1 | 1077 | import itertools
from django.template.defaultfilters import slugify
from django.utils.encoding import smart_bytes
from hashlib import sha1
import random
def get_slug(obj, title, group):
"""
used to get unique slugs
:param obj: Model Object
:param title: Title to create slug from
:param group: Model Class
:return: Model object with unique slug
"""
if obj.pk is None:
obj.slug = slug_orig = slugify(title)
for x in itertools.count(1):
if not group.objects.filter(slug=obj.slug).exists() and obj.slug is not None:
break
obj.slug = '%s-%d' % (slug_orig, x)
return obj
def generate_sha1(string, salt=None):
"""
:param string: The string that needs to be encrypted.
:param salt: Optionally define your own salt. If none is supplied, will use a random string of 5 characters.
:return: Tuple containing the salt and hash.
"""
string = str(string)
if not salt:
salt = sha1(str(random.random()).encode('utf-8')).hexdigest()[:5]
salted_bytes = (smart_bytes(salt) + smart_bytes(string))
hash_ = sha1(salted_bytes).hexdigest()
return salt, hash_
| mit |
odahoda/noisicaa | noisicaa/builtin_nodes/control_track/processor_messages.py | 1 | 1650 | #!/usr/bin/python3
# @begin:license
#
# Copyright (c) 2015-2019, Benjamin Niemann <pink@odahoda.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @end:license
from noisicaa import audioproc
from noisicaa.builtin_nodes import processor_message_registry_pb2
def add_control_point(
node_id: str,
id: int, # pylint: disable=redefined-builtin
time: audioproc.MusicalTime,
value: float
) -> audioproc.ProcessorMessage:
msg = audioproc.ProcessorMessage(node_id=node_id)
pb = msg.Extensions[processor_message_registry_pb2.cvgenerator_add_control_point]
pb.id = id
pb.time.CopyFrom(time.to_proto())
pb.value = value
return msg
def remove_control_point(
node_id: str,
id: int # pylint: disable=redefined-builtin
) -> audioproc.ProcessorMessage:
msg = audioproc.ProcessorMessage(node_id=node_id)
pb = msg.Extensions[processor_message_registry_pb2.cvgenerator_remove_control_point]
pb.id = id
return msg
| gpl-2.0 |
anilmuthineni/tensorflow | tensorflow/python/debug/cli/analyzer_cli_test.py | 16 | 61279 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests of the Analyzer CLI Backend."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug import debug_data
from tensorflow.python.debug import debug_utils
from tensorflow.python.debug.cli import analyzer_cli
from tensorflow.python.debug.cli import command_parser
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
def parse_op_and_node(line):
"""Parse a line containing an op node followed by a node name.
For example, if the line is
" [Variable] hidden/weights",
this function will return ("Variable", "hidden/weights")
Args:
line: The line to be parsed, as a str.
Returns:
Name of the parsed op type.
Name of the parsed node.
"""
op_type = line.strip().split(" ")[0].replace("[", "").replace("]", "")
# Not using [-1], to tolerate any other items that might be present behind
# the node name.
node_name = line.strip().split(" ")[1]
return op_type, node_name
def assert_column_header_command_shortcut(tst,
command,
reverse,
node_name_regex,
op_type_regex,
tensor_filter_name):
tst.assertFalse(reverse and "-r" in command)
tst.assertFalse(not(op_type_regex) and ("-t %s" % op_type_regex) in command)
tst.assertFalse(
not(node_name_regex) and ("-t %s" % node_name_regex) in command)
tst.assertFalse(
not(tensor_filter_name) and ("-t %s" % tensor_filter_name) in command)
def assert_listed_tensors(tst,
out,
expected_tensor_names,
expected_op_types,
node_name_regex=None,
op_type_regex=None,
tensor_filter_name=None,
sort_by="timestamp",
reverse=False):
"""Check RichTextLines output for list_tensors commands.
Args:
tst: A test_util.TensorFlowTestCase instance.
out: The RichTextLines object to be checked.
expected_tensor_names: (list of str) Expected tensor names in the list.
expected_op_types: (list of str) Expected op types of the tensors, in the
same order as the expected_tensor_names.
node_name_regex: Optional: node name regex filter.
op_type_regex: Optional: op type regex filter.
tensor_filter_name: Optional: name of the tensor filter.
sort_by: (str) (timestamp | op_type | tensor_name) the field by which the
tensors in the list are sorted.
reverse: (bool) whether the sorting is in reverse (i.e., descending) order.
"""
line_iter = iter(out.lines)
attr_segs = out.font_attr_segs
line_counter = 0
num_tensors = len(expected_tensor_names)
if tensor_filter_name is None:
tst.assertEqual("%d dumped tensor(s):" % num_tensors, next(line_iter))
else:
tst.assertEqual("%d dumped tensor(s) passing filter \"%s\":" %
(num_tensors, tensor_filter_name), next(line_iter))
line_counter += 1
if op_type_regex is not None:
tst.assertEqual("Op type regex filter: \"%s\"" % op_type_regex,
next(line_iter))
line_counter += 1
if node_name_regex is not None:
tst.assertEqual("Node name regex filter: \"%s\"" % node_name_regex,
next(line_iter))
line_counter += 1
tst.assertEqual("", next(line_iter))
line_counter += 1
# Verify the column heads "t (ms)", "Op type" and "Tensor name" are present.
line = next(line_iter)
tst.assertIn("t (ms)", line)
tst.assertIn("Op type", line)
tst.assertIn("Tensor name", line)
# Verify the command shortcuts in the top row.
attr_segs = out.font_attr_segs[line_counter]
attr_seg = attr_segs[0]
tst.assertEqual(0, attr_seg[0])
tst.assertEqual(len("t (ms)"), attr_seg[1])
command = attr_seg[2][0].content
tst.assertIn("-s timestamp", command)
assert_column_header_command_shortcut(
tst, command, reverse, node_name_regex, op_type_regex,
tensor_filter_name)
tst.assertEqual("bold", attr_seg[2][1])
idx0 = line.index("Size")
attr_seg = attr_segs[1]
tst.assertEqual(idx0, attr_seg[0])
tst.assertEqual(idx0 + len("Size"), attr_seg[1])
command = attr_seg[2][0].content
tst.assertIn("-s dump_size", command)
assert_column_header_command_shortcut(tst, command, reverse, node_name_regex,
op_type_regex, tensor_filter_name)
tst.assertEqual("bold", attr_seg[2][1])
idx0 = line.index("Op type")
attr_seg = attr_segs[2]
tst.assertEqual(idx0, attr_seg[0])
tst.assertEqual(idx0 + len("Op type"), attr_seg[1])
command = attr_seg[2][0].content
tst.assertIn("-s op_type", command)
assert_column_header_command_shortcut(
tst, command, reverse, node_name_regex, op_type_regex,
tensor_filter_name)
tst.assertEqual("bold", attr_seg[2][1])
idx0 = line.index("Tensor name")
attr_seg = attr_segs[3]
tst.assertEqual(idx0, attr_seg[0])
tst.assertEqual(idx0 + len("Tensor name"), attr_seg[1])
command = attr_seg[2][0].content
tst.assertIn("-s tensor_name", command)
assert_column_header_command_shortcut(
tst, command, reverse, node_name_regex, op_type_regex,
tensor_filter_name)
tst.assertEqual("bold", attr_seg[2][1])
# Verify the listed tensors and their timestamps.
tensor_timestamps = []
dump_sizes_bytes = []
op_types = []
tensor_names = []
for line in line_iter:
items = line.split(" ")
items = [item for item in items if item]
rel_time = float(items[0][1:-1])
tst.assertGreaterEqual(rel_time, 0.0)
tensor_timestamps.append(rel_time)
dump_sizes_bytes.append(command_parser.parse_readable_size_str(items[1]))
op_types.append(items[2])
tensor_names.append(items[3])
# Verify that the tensors should be listed in ascending order of their
# timestamps.
if sort_by == "timestamp":
sorted_timestamps = sorted(tensor_timestamps)
if reverse:
sorted_timestamps.reverse()
tst.assertEqual(sorted_timestamps, tensor_timestamps)
elif sort_by == "dump_size":
sorted_dump_sizes_bytes = sorted(dump_sizes_bytes)
if reverse:
sorted_dump_sizes_bytes.reverse()
tst.assertEqual(sorted_dump_sizes_bytes, dump_sizes_bytes)
elif sort_by == "op_type":
sorted_op_types = sorted(op_types)
if reverse:
sorted_op_types.reverse()
tst.assertEqual(sorted_op_types, op_types)
elif sort_by == "tensor_name":
sorted_tensor_names = sorted(tensor_names)
if reverse:
sorted_tensor_names.reverse()
tst.assertEqual(sorted_tensor_names, tensor_names)
else:
tst.fail("Invalid value in sort_by: %s" % sort_by)
# Verify that the tensors are all listed.
for tensor_name, op_type in zip(expected_tensor_names, expected_op_types):
tst.assertIn(tensor_name, tensor_names)
index = tensor_names.index(tensor_name)
tst.assertEqual(op_type, op_types[index])
def assert_node_attribute_lines(tst,
out,
node_name,
op_type,
device,
input_op_type_node_name_pairs,
ctrl_input_op_type_node_name_pairs,
recipient_op_type_node_name_pairs,
ctrl_recipient_op_type_node_name_pairs,
attr_key_val_pairs=None,
num_dumped_tensors=None,
show_stack_trace=False,
stack_trace_available=False):
"""Check RichTextLines output for node_info commands.
Args:
tst: A test_util.TensorFlowTestCase instance.
out: The RichTextLines object to be checked.
node_name: Name of the node.
op_type: Op type of the node, as a str.
device: Name of the device on which the node resides.
input_op_type_node_name_pairs: A list of 2-tuples of op type and node name,
for the (non-control) inputs to the node.
ctrl_input_op_type_node_name_pairs: A list of 2-tuples of op type and node
name, for the control inputs to the node.
recipient_op_type_node_name_pairs: A list of 2-tuples of op type and node
name, for the (non-control) output recipients to the node.
ctrl_recipient_op_type_node_name_pairs: A list of 2-tuples of op type and
node name, for the control output recipients to the node.
attr_key_val_pairs: Optional: attribute key-value pairs of the node, as a
list of 2-tuples.
num_dumped_tensors: Optional: number of tensor dumps from the node.
show_stack_trace: (bool) whether the stack trace of the node's
construction is asserted to be present.
stack_trace_available: (bool) whether Python stack trace is available.
"""
line_iter = iter(out.lines)
tst.assertEqual("Node %s" % node_name, next(line_iter))
tst.assertEqual("", next(line_iter))
tst.assertEqual(" Op: %s" % op_type, next(line_iter))
tst.assertEqual(" Device: %s" % device, next(line_iter))
tst.assertEqual("", next(line_iter))
tst.assertEqual(" %d input(s) + %d control input(s):" %
(len(input_op_type_node_name_pairs),
len(ctrl_input_op_type_node_name_pairs)), next(line_iter))
# Check inputs.
tst.assertEqual(" %d input(s):" % len(input_op_type_node_name_pairs),
next(line_iter))
for op_type, node_name in input_op_type_node_name_pairs:
tst.assertEqual(" [%s] %s" % (op_type, node_name), next(line_iter))
tst.assertEqual("", next(line_iter))
# Check control inputs.
if ctrl_input_op_type_node_name_pairs:
tst.assertEqual(" %d control input(s):" %
len(ctrl_input_op_type_node_name_pairs), next(line_iter))
for op_type, node_name in ctrl_input_op_type_node_name_pairs:
tst.assertEqual(" [%s] %s" % (op_type, node_name), next(line_iter))
tst.assertEqual("", next(line_iter))
tst.assertEqual(" %d recipient(s) + %d control recipient(s):" %
(len(recipient_op_type_node_name_pairs),
len(ctrl_recipient_op_type_node_name_pairs)),
next(line_iter))
# Check recipients, the order of which is not deterministic.
tst.assertEqual(" %d recipient(s):" %
len(recipient_op_type_node_name_pairs), next(line_iter))
t_recs = []
for _ in recipient_op_type_node_name_pairs:
line = next(line_iter)
op_type, node_name = parse_op_and_node(line)
t_recs.append((op_type, node_name))
tst.assertItemsEqual(recipient_op_type_node_name_pairs, t_recs)
# Check control recipients, the order of which is not deterministic.
if ctrl_recipient_op_type_node_name_pairs:
tst.assertEqual("", next(line_iter))
tst.assertEqual(" %d control recipient(s):" %
len(ctrl_recipient_op_type_node_name_pairs),
next(line_iter))
t_ctrl_recs = []
for _ in ctrl_recipient_op_type_node_name_pairs:
line = next(line_iter)
op_type, node_name = parse_op_and_node(line)
t_ctrl_recs.append((op_type, node_name))
tst.assertItemsEqual(ctrl_recipient_op_type_node_name_pairs, t_ctrl_recs)
# The order of multiple attributes can be non-deterministic.
if attr_key_val_pairs:
tst.assertEqual("", next(line_iter))
tst.assertEqual("Node attributes:", next(line_iter))
kv_pairs = []
for key, val in attr_key_val_pairs:
key = next(line_iter).strip().replace(":", "")
val = next(line_iter).strip()
kv_pairs.append((key, val))
tst.assertEqual("", next(line_iter))
tst.assertItemsEqual(attr_key_val_pairs, kv_pairs)
if num_dumped_tensors is not None:
tst.assertEqual("%d dumped tensor(s):" % num_dumped_tensors,
next(line_iter))
tst.assertEqual("", next(line_iter))
dump_timestamps_ms = []
for _ in xrange(num_dumped_tensors):
line = next(line_iter)
tst.assertStartsWith(line.strip(), "Slot 0 @ DebugIdentity @")
tst.assertTrue(line.strip().endswith(" ms"))
dump_timestamp_ms = float(line.strip().split(" @ ")[-1].replace("ms", ""))
tst.assertGreaterEqual(dump_timestamp_ms, 0.0)
dump_timestamps_ms.append(dump_timestamp_ms)
tst.assertEqual(sorted(dump_timestamps_ms), dump_timestamps_ms)
if show_stack_trace:
tst.assertEqual("", next(line_iter))
tst.assertEqual("", next(line_iter))
tst.assertEqual("Traceback of node construction:", next(line_iter))
if stack_trace_available:
try:
depth_counter = 0
while True:
for i in range(5):
line = next(line_iter)
if i == 0:
tst.assertEqual(depth_counter, int(line.split(":")[0]))
elif i == 1:
tst.assertStartsWith(line, " Line:")
elif i == 2:
tst.assertStartsWith(line, " Function:")
elif i == 3:
tst.assertStartsWith(line, " Text:")
elif i == 4:
tst.assertEqual("", line)
depth_counter += 1
except StopIteration:
tst.assertEqual(0, i)
else:
tst.assertEqual("(Unavailable because no Python graph has been loaded)",
next(line_iter))
def check_syntax_error_output(tst, out, command_prefix):
"""Check RichTextLines output for valid command prefix but invalid syntax."""
tst.assertEqual([
"Syntax error for command: %s" % command_prefix,
"For help, do \"help %s\"" % command_prefix
], out.lines)
def check_error_output(tst, out, command_prefix, args):
"""Check RichTextLines output from invalid/erroneous commands.
Args:
tst: A test_util.TensorFlowTestCase instance.
out: The RichTextLines object to be checked.
command_prefix: The command prefix of the command that caused the error.
args: The arguments (excluding prefix) of the command that caused the error.
"""
tst.assertGreater(len(out.lines), 2)
tst.assertStartsWith(out.lines[0],
"Error occurred during handling of command: %s %s" %
(command_prefix, " ".join(args)))
def check_main_menu(tst,
out,
list_tensors_enabled=False,
node_info_node_name=None,
print_tensor_node_name=None,
list_inputs_node_name=None,
list_outputs_node_name=None):
"""Check the main menu annotation of an output."""
tst.assertIn(debugger_cli_common.MAIN_MENU_KEY, out.annotations)
menu = out.annotations[debugger_cli_common.MAIN_MENU_KEY]
tst.assertEqual(list_tensors_enabled,
menu.caption_to_item("list_tensors").is_enabled())
menu_item = menu.caption_to_item("node_info")
if node_info_node_name:
tst.assertTrue(menu_item.is_enabled())
tst.assertTrue(menu_item.content.endswith(node_info_node_name))
else:
tst.assertFalse(menu_item.is_enabled())
menu_item = menu.caption_to_item("print_tensor")
if print_tensor_node_name:
tst.assertTrue(menu_item.is_enabled())
tst.assertTrue(menu_item.content.endswith(print_tensor_node_name))
else:
tst.assertFalse(menu_item.is_enabled())
menu_item = menu.caption_to_item("list_inputs")
if list_inputs_node_name:
tst.assertTrue(menu_item.is_enabled())
tst.assertTrue(menu_item.content.endswith(list_inputs_node_name))
else:
tst.assertFalse(menu_item.is_enabled())
menu_item = menu.caption_to_item("list_outputs")
if list_outputs_node_name:
tst.assertTrue(menu_item.is_enabled())
tst.assertTrue(menu_item.content.endswith(list_outputs_node_name))
else:
tst.assertFalse(menu_item.is_enabled())
tst.assertTrue(menu.caption_to_item("run_info").is_enabled())
tst.assertTrue(menu.caption_to_item("help").is_enabled())
def check_menu_item(tst, out, line_index, expected_begin, expected_end,
expected_command):
attr_segs = out.font_attr_segs[line_index]
found_menu_item = False
for begin, end, attribute in attr_segs:
attributes = [attribute] if not isinstance(attribute, list) else attribute
menu_item = [attribute for attribute in attributes if
isinstance(attribute, debugger_cli_common.MenuItem)]
if menu_item:
tst.assertEqual(expected_begin, begin)
tst.assertEqual(expected_end, end)
tst.assertEqual(expected_command, menu_item[0].content)
found_menu_item = True
break
tst.assertTrue(found_menu_item)
class AnalyzerCLISimpleMulAddTest(test_util.TensorFlowTestCase):
@classmethod
def setUpClass(cls):
cls._dump_root = tempfile.mkdtemp()
cls._is_gpu_available = test.is_gpu_available()
if cls._is_gpu_available:
cls._main_device = "/job:localhost/replica:0/task:0/gpu:0"
else:
cls._main_device = "/job:localhost/replica:0/task:0/cpu:0"
cls._sess = session.Session()
with cls._sess as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
v_init_val = np.array([[2.0], [-1.0]])
u_name = "simple_mul_add/u"
v_name = "simple_mul_add/v"
u_init = constant_op.constant(u_init_val, shape=[2, 2])
u = variables.Variable(u_init, name=u_name)
v_init = constant_op.constant(v_init_val, shape=[2, 1])
v = variables.Variable(v_init, name=v_name)
w = math_ops.matmul(u, v, name="simple_mul_add/matmul")
x = math_ops.add(w, w, name="simple_mul_add/add")
u.initializer.run()
v.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls="file://%s" % cls._dump_root)
# Invoke Session.run().
run_metadata = config_pb2.RunMetadata()
sess.run(x, options=run_options, run_metadata=run_metadata)
cls._debug_dump = debug_data.DebugDumpDir(
cls._dump_root, partition_graphs=run_metadata.partition_graphs)
# Construct the analyzer.
cls._analyzer = analyzer_cli.DebugAnalyzer(cls._debug_dump)
# Construct the handler registry.
cls._registry = debugger_cli_common.CommandHandlerRegistry()
# Register command handlers.
cls._registry.register_command_handler(
"list_tensors",
cls._analyzer.list_tensors,
cls._analyzer.get_help("list_tensors"),
prefix_aliases=["lt"])
cls._registry.register_command_handler(
"node_info",
cls._analyzer.node_info,
cls._analyzer.get_help("node_info"),
prefix_aliases=["ni"])
cls._registry.register_command_handler(
"print_tensor",
cls._analyzer.print_tensor,
cls._analyzer.get_help("print_tensor"),
prefix_aliases=["pt"])
@classmethod
def tearDownClass(cls):
# Tear down temporary dump directory.
shutil.rmtree(cls._dump_root)
def testListTensors(self):
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", [])
assert_listed_tensors(self, out, [
"simple_mul_add/u:0", "simple_mul_add/v:0", "simple_mul_add/u/read:0",
"simple_mul_add/v/read:0", "simple_mul_add/matmul:0",
"simple_mul_add/add:0"
], ["VariableV2", "VariableV2", "Identity", "Identity", "MatMul", "Add"])
# Check the main menu.
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInReverseTimeOrderWorks(self):
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-s", "timestamp", "-r"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
],
["VariableV2", "VariableV2", "Identity", "Identity", "MatMul", "Add"],
sort_by="timestamp",
reverse=True)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInDumpSizeOrderWorks(self):
out = self._registry.dispatch_command("lt", ["-s", "dump_size"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
],
["VariableV2", "VariableV2", "Identity", "Identity", "MatMul", "Add"],
sort_by="dump_size")
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInReverseDumpSizeOrderWorks(self):
out = self._registry.dispatch_command("lt", ["-s", "dump_size", "-r"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
],
["VariableV2", "VariableV2", "Identity", "Identity", "MatMul", "Add"],
sort_by="dump_size",
reverse=True)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsWithInvalidSortByFieldGivesError(self):
out = self._registry.dispatch_command("lt", ["-s", "foobar"])
self.assertIn("ValueError: Unsupported key to sort tensors by: foobar",
out.lines)
def testListTensorsInOpTypeOrderWorks(self):
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-s", "op_type"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
],
["VariableV2", "VariableV2", "Identity", "Identity", "MatMul", "Add"],
sort_by="op_type",
reverse=False)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInReverseOpTypeOrderWorks(self):
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-s", "op_type", "-r"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
],
["VariableV2", "VariableV2", "Identity", "Identity", "MatMul", "Add"],
sort_by="op_type",
reverse=True)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInTensorNameOrderWorks(self):
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-s", "tensor_name"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
],
["VariableV2", "VariableV2", "Identity", "Identity", "MatMul", "Add"],
sort_by="tensor_name",
reverse=False)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInReverseTensorNameOrderWorks(self):
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-s", "tensor_name", "-r"])
assert_listed_tensors(
self,
out, [
"simple_mul_add/u:0", "simple_mul_add/v:0",
"simple_mul_add/u/read:0", "simple_mul_add/v/read:0",
"simple_mul_add/matmul:0", "simple_mul_add/add:0"
],
["VariableV2", "VariableV2", "Identity", "Identity", "MatMul", "Add"],
sort_by="tensor_name",
reverse=True)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsFilterByNodeNameRegex(self):
out = self._registry.dispatch_command("list_tensors",
["--node_name_filter", ".*read.*"])
assert_listed_tensors(
self,
out, ["simple_mul_add/u/read:0", "simple_mul_add/v/read:0"],
["Identity", "Identity"],
node_name_regex=".*read.*")
out = self._registry.dispatch_command("list_tensors", ["-n", "^read"])
assert_listed_tensors(self, out, [], [], node_name_regex="^read")
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorFilterByOpTypeRegex(self):
out = self._registry.dispatch_command("list_tensors",
["--op_type_filter", "Identity"])
assert_listed_tensors(
self,
out, ["simple_mul_add/u/read:0", "simple_mul_add/v/read:0"],
["Identity", "Identity"],
op_type_regex="Identity")
out = self._registry.dispatch_command("list_tensors",
["-t", "(Add|MatMul)"])
assert_listed_tensors(
self,
out, ["simple_mul_add/add:0", "simple_mul_add/matmul:0"],
["Add", "MatMul"],
op_type_regex="(Add|MatMul)")
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorFilterByNodeNameRegexAndOpTypeRegex(self):
out = self._registry.dispatch_command(
"list_tensors", ["-t", "(Add|MatMul)", "-n", ".*add$"])
assert_listed_tensors(
self,
out, ["simple_mul_add/add:0"], ["Add"],
node_name_regex=".*add$",
op_type_regex="(Add|MatMul)")
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsFilterNanOrInf(self):
"""Test register and invoke a tensor filter."""
# First, register the filter.
self._analyzer.add_tensor_filter("has_inf_or_nan",
debug_data.has_inf_or_nan)
# Use shorthand alias for the command prefix.
out = self._registry.dispatch_command("lt", ["-f", "has_inf_or_nan"])
# This TF graph run did not generate any bad numerical values.
assert_listed_tensors(
self, out, [], [], tensor_filter_name="has_inf_or_nan")
# TODO(cais): A test with some actual bad numerical values.
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorNonexistentFilter(self):
"""Test attempt to use a nonexistent tensor filter."""
out = self._registry.dispatch_command("lt", ["-f", "foo_filter"])
self.assertEqual(["ERROR: There is no tensor filter named \"foo_filter\"."],
out.lines)
check_main_menu(self, out, list_tensors_enabled=False)
def testListTensorsInvalidOptions(self):
out = self._registry.dispatch_command("list_tensors", ["--bar"])
check_syntax_error_output(self, out, "list_tensors")
def testNodeInfoByNodeName(self):
node_name = "simple_mul_add/matmul"
out = self._registry.dispatch_command("node_info", [node_name])
recipients = [("Add", "simple_mul_add/add"), ("Add", "simple_mul_add/add")]
assert_node_attribute_lines(self, out, node_name, "MatMul",
self._main_device,
[("Identity", "simple_mul_add/u/read"),
("Identity", "simple_mul_add/v/read")], [],
recipients, [])
check_main_menu(
self,
out,
list_tensors_enabled=True,
list_inputs_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
# Verify that the node name is bold in the first line.
self.assertEqual(
[(len(out.lines[0]) - len(node_name), len(out.lines[0]), "bold")],
out.font_attr_segs[0])
def testNodeInfoShowAttributes(self):
node_name = "simple_mul_add/matmul"
out = self._registry.dispatch_command("node_info", ["-a", node_name])
assert_node_attribute_lines(
self,
out,
node_name,
"MatMul",
self._main_device, [("Identity", "simple_mul_add/u/read"),
("Identity", "simple_mul_add/v/read")], [],
[("Add", "simple_mul_add/add"), ("Add", "simple_mul_add/add")], [],
attr_key_val_pairs=[("transpose_a", "b: false"),
("transpose_b", "b: false"),
("T", "type: DT_DOUBLE")])
check_main_menu(
self,
out,
list_tensors_enabled=True,
list_inputs_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
def testNodeInfoShowDumps(self):
node_name = "simple_mul_add/matmul"
out = self._registry.dispatch_command("node_info", ["-d", node_name])
assert_node_attribute_lines(
self,
out,
node_name,
"MatMul",
self._main_device, [("Identity", "simple_mul_add/u/read"),
("Identity", "simple_mul_add/v/read")], [],
[("Add", "simple_mul_add/add"), ("Add", "simple_mul_add/add")], [],
num_dumped_tensors=1)
check_main_menu(
self,
out,
list_tensors_enabled=True,
list_inputs_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
check_menu_item(self, out, 16,
len(out.lines[16]) - len(out.lines[16].strip()),
len(out.lines[16]), "pt %s:0 -n 0" % node_name)
def testNodeInfoShowStackTraceUnavailableIsIndicated(self):
self._debug_dump.set_python_graph(None)
node_name = "simple_mul_add/matmul"
out = self._registry.dispatch_command("node_info", ["-t", node_name])
assert_node_attribute_lines(
self,
out,
node_name,
"MatMul",
self._main_device, [("Identity", "simple_mul_add/u/read"),
("Identity", "simple_mul_add/v/read")], [],
[("Add", "simple_mul_add/add"), ("Add", "simple_mul_add/add")], [],
show_stack_trace=True, stack_trace_available=False)
check_main_menu(
self,
out,
list_tensors_enabled=True,
list_inputs_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
def testNodeInfoShowStackTraceAvailableWorks(self):
self._debug_dump.set_python_graph(self._sess.graph)
node_name = "simple_mul_add/matmul"
out = self._registry.dispatch_command("node_info", ["-t", node_name])
assert_node_attribute_lines(
self,
out,
node_name,
"MatMul",
self._main_device, [("Identity", "simple_mul_add/u/read"),
("Identity", "simple_mul_add/v/read")], [],
[("Add", "simple_mul_add/add"), ("Add", "simple_mul_add/add")], [],
show_stack_trace=True, stack_trace_available=True)
check_main_menu(
self,
out,
list_tensors_enabled=True,
list_inputs_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
def testNodeInfoByTensorName(self):
node_name = "simple_mul_add/u/read"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command("node_info", [tensor_name])
assert_node_attribute_lines(self, out, node_name, "Identity",
self._main_device,
[("VariableV2", "simple_mul_add/u")], [],
[("MatMul", "simple_mul_add/matmul")], [])
check_main_menu(
self,
out,
list_tensors_enabled=True,
list_inputs_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
def testNodeInfoNonexistentNodeName(self):
out = self._registry.dispatch_command("node_info", ["bar"])
self.assertEqual(
["ERROR: There is no node named \"bar\" in the partition graphs"],
out.lines)
# Check color indicating error.
self.assertEqual({0: [(0, 59, "red")]}, out.font_attr_segs)
check_main_menu(self, out, list_tensors_enabled=True)
def testPrintTensor(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command(
"print_tensor", [tensor_name], screen_info={"cols": 80})
self.assertEqual([
"Tensor \"%s:DebugIdentity\":" % tensor_name,
" dtype: float64",
" shape: (2, 1)",
"",
"array([[ 7.],",
" [-2.]])",
], out.lines)
self.assertIn("tensor_metadata", out.annotations)
self.assertIn(4, out.annotations)
self.assertIn(5, out.annotations)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
list_inputs_node_name=node_name,
list_outputs_node_name=node_name)
def testPrintTensorHighlightingRanges(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command(
"print_tensor", [tensor_name, "--ranges", "[-inf, 0.0]"],
screen_info={"cols": 80})
self.assertEqual([
"Tensor \"%s:DebugIdentity\": " % tensor_name +
"Highlighted([-inf, 0.0]): 1 of 2 element(s) (50.00%)",
" dtype: float64",
" shape: (2, 1)",
"",
"array([[ 7.],",
" [-2.]])",
], out.lines)
self.assertIn("tensor_metadata", out.annotations)
self.assertIn(4, out.annotations)
self.assertIn(5, out.annotations)
self.assertEqual([(8, 11, "bold")], out.font_attr_segs[5])
out = self._registry.dispatch_command(
"print_tensor", [tensor_name, "--ranges", "[[-inf, -5.5], [5.5, inf]]"],
screen_info={"cols": 80})
self.assertEqual([
"Tensor \"%s:DebugIdentity\": " % tensor_name +
"Highlighted([[-inf, -5.5], [5.5, inf]]): "
"1 of 2 element(s) (50.00%)",
" dtype: float64",
" shape: (2, 1)",
"",
"array([[ 7.],",
" [-2.]])",
], out.lines)
self.assertIn("tensor_metadata", out.annotations)
self.assertIn(4, out.annotations)
self.assertIn(5, out.annotations)
self.assertEqual([(9, 11, "bold")], out.font_attr_segs[4])
self.assertNotIn(5, out.font_attr_segs)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
list_inputs_node_name=node_name,
list_outputs_node_name=node_name)
def testPrintTensorWithSlicing(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command(
"print_tensor", [tensor_name + "[1, :]"], screen_info={"cols": 80})
self.assertEqual([
"Tensor \"%s:DebugIdentity[1, :]\":" % tensor_name, " dtype: float64",
" shape: (1,)", "", "array([-2.])"
], out.lines)
self.assertIn("tensor_metadata", out.annotations)
self.assertIn(4, out.annotations)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
list_inputs_node_name=node_name,
list_outputs_node_name=node_name)
def testPrintTensorInvalidSlicingString(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command(
"print_tensor", [tensor_name + "[1, foo()]"], screen_info={"cols": 80})
self.assertEqual("Error occurred during handling of command: print_tensor "
+ tensor_name + "[1, foo()]:", out.lines[0])
self.assertEqual("ValueError: Invalid tensor-slicing string.",
out.lines[-2])
def testPrintTensorValidExplicitNumber(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command(
"print_tensor", [tensor_name, "-n", "0"], screen_info={"cols": 80})
self.assertEqual([
"Tensor \"%s:DebugIdentity\":" % tensor_name,
" dtype: float64",
" shape: (2, 1)",
"",
"array([[ 7.],",
" [-2.]])",
], out.lines)
self.assertIn("tensor_metadata", out.annotations)
self.assertIn(4, out.annotations)
self.assertIn(5, out.annotations)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
list_inputs_node_name=node_name,
list_outputs_node_name=node_name)
def testPrintTensorInvalidExplicitNumber(self):
node_name = "simple_mul_add/matmul"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command(
"print_tensor", [tensor_name, "-n", "1"], screen_info={"cols": 80})
self.assertEqual([
"ERROR: Invalid number (1) for tensor simple_mul_add/matmul:0, "
"which generated one dump."
], out.lines)
self.assertNotIn("tensor_metadata", out.annotations)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
list_inputs_node_name=node_name,
list_outputs_node_name=node_name)
def testPrintTensorMissingOutputSlotLeadsToOnlyDumpedTensorPrinted(self):
node_name = "simple_mul_add/matmul"
out = self._registry.dispatch_command("print_tensor", [node_name])
self.assertEqual([
"Tensor \"%s:0:DebugIdentity\":" % node_name, " dtype: float64",
" shape: (2, 1)", "", "array([[ 7.],", " [-2.]])"
], out.lines)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
list_inputs_node_name=node_name,
list_outputs_node_name=node_name)
def testPrintTensorNonexistentNodeName(self):
out = self._registry.dispatch_command(
"print_tensor", ["simple_mul_add/matmul/foo:0"])
self.assertEqual([
"ERROR: Node \"simple_mul_add/matmul/foo\" does not exist in partition "
"graphs"
], out.lines)
check_main_menu(self, out, list_tensors_enabled=True)
def testAddGetTensorFilterLambda(self):
analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump)
analyzer.add_tensor_filter("foo_filter", lambda x, y: True)
self.assertTrue(analyzer.get_tensor_filter("foo_filter")(None, None))
def testAddGetTensorFilterNestedFunction(self):
analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump)
def foo_filter(unused_arg_0, unused_arg_1):
return True
analyzer.add_tensor_filter("foo_filter", foo_filter)
self.assertTrue(analyzer.get_tensor_filter("foo_filter")(None, None))
def testAddTensorFilterEmptyName(self):
analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump)
with self.assertRaisesRegexp(ValueError,
"Input argument filter_name cannot be empty."):
analyzer.add_tensor_filter("", lambda datum, tensor: True)
def testAddTensorFilterNonStrName(self):
analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump)
with self.assertRaisesRegexp(
TypeError,
"Input argument filter_name is expected to be str, ""but is not"):
analyzer.add_tensor_filter(1, lambda datum, tensor: True)
def testAddGetTensorFilterNonCallable(self):
analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump)
with self.assertRaisesRegexp(
TypeError, "Input argument filter_callable is expected to be callable, "
"but is not."):
analyzer.add_tensor_filter("foo_filter", "bar")
def testGetNonexistentTensorFilter(self):
analyzer = analyzer_cli.DebugAnalyzer(self._debug_dump)
analyzer.add_tensor_filter("foo_filter", lambda datum, tensor: True)
with self.assertRaisesRegexp(ValueError,
"There is no tensor filter named \"bar\""):
analyzer.get_tensor_filter("bar")
class AnalyzerCLIPrintLargeTensorTest(test_util.TensorFlowTestCase):
@classmethod
def setUpClass(cls):
cls._dump_root = tempfile.mkdtemp()
with session.Session() as sess:
# 2400 elements should exceed the default threshold (2000).
x = constant_op.constant(np.zeros([300, 8]), name="large_tensors/x")
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls="file://%s" % cls._dump_root)
# Invoke Session.run().
run_metadata = config_pb2.RunMetadata()
sess.run(x, options=run_options, run_metadata=run_metadata)
cls._debug_dump = debug_data.DebugDumpDir(
cls._dump_root, partition_graphs=run_metadata.partition_graphs)
# Construct the analyzer.
cls._analyzer = analyzer_cli.DebugAnalyzer(cls._debug_dump)
# Construct the handler registry.
cls._registry = debugger_cli_common.CommandHandlerRegistry()
# Register command handler.
cls._registry.register_command_handler(
"print_tensor",
cls._analyzer.print_tensor,
cls._analyzer.get_help("print_tensor"),
prefix_aliases=["pt"])
@classmethod
def tearDownClass(cls):
# Tear down temporary dump directory.
shutil.rmtree(cls._dump_root)
def testPrintLargeTensorWithoutAllOption(self):
out = self._registry.dispatch_command(
"print_tensor", ["large_tensors/x:0"], screen_info={"cols": 80})
# Assert that ellipses are present in the tensor value printout.
self.assertIn("...,", out.lines[4])
# 2100 still exceeds 2000.
out = self._registry.dispatch_command(
"print_tensor", ["large_tensors/x:0[:, 0:7]"],
screen_info={"cols": 80})
self.assertIn("...,", out.lines[4])
def testPrintLargeTensorWithAllOption(self):
out = self._registry.dispatch_command(
"print_tensor", ["large_tensors/x:0", "-a"],
screen_info={"cols": 80})
# Assert that ellipses are not present in the tensor value printout.
self.assertNotIn("...,", out.lines[4])
out = self._registry.dispatch_command(
"print_tensor", ["large_tensors/x:0[:, 0:7]", "--all"],
screen_info={"cols": 80})
self.assertNotIn("...,", out.lines[4])
class AnalyzerCLIControlDepTest(test_util.TensorFlowTestCase):
@classmethod
def setUpClass(cls):
cls._dump_root = tempfile.mkdtemp()
cls._is_gpu_available = test.is_gpu_available()
if cls._is_gpu_available:
cls._main_device = "/job:localhost/replica:0/task:0/gpu:0"
else:
cls._main_device = "/job:localhost/replica:0/task:0/cpu:0"
with session.Session() as sess:
x_init_val = np.array([5.0, 3.0])
x_init = constant_op.constant(x_init_val, shape=[2])
x = variables.Variable(x_init, name="control_deps/x")
y = math_ops.add(x, x, name="control_deps/y")
y = control_flow_ops.with_dependencies(
[x], y, name="control_deps/ctrl_dep_y")
z = math_ops.multiply(x, y, name="control_deps/z")
z = control_flow_ops.with_dependencies(
[x, y], z, name="control_deps/ctrl_dep_z")
x.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls="file://%s" % cls._dump_root)
# Invoke Session.run().
run_metadata = config_pb2.RunMetadata()
sess.run(z, options=run_options, run_metadata=run_metadata)
debug_dump = debug_data.DebugDumpDir(
cls._dump_root, partition_graphs=run_metadata.partition_graphs)
# Construct the analyzer.
analyzer = analyzer_cli.DebugAnalyzer(debug_dump)
# Construct the handler registry.
cls._registry = debugger_cli_common.CommandHandlerRegistry()
# Register command handlers.
cls._registry.register_command_handler(
"node_info",
analyzer.node_info,
analyzer.get_help("node_info"),
prefix_aliases=["ni"])
cls._registry.register_command_handler(
"list_inputs",
analyzer.list_inputs,
analyzer.get_help("list_inputs"),
prefix_aliases=["li"])
cls._registry.register_command_handler(
"list_outputs",
analyzer.list_outputs,
analyzer.get_help("list_outputs"),
prefix_aliases=["lo"])
@classmethod
def tearDownClass(cls):
# Tear down temporary dump directory.
shutil.rmtree(cls._dump_root)
def testNodeInfoWithControlDependencies(self):
# Call node_info on a node with control inputs.
out = self._registry.dispatch_command("node_info",
["control_deps/ctrl_dep_y"])
assert_node_attribute_lines(
self, out, "control_deps/ctrl_dep_y", "Identity",
self._main_device, [("Add", "control_deps/y")],
[("VariableV2", "control_deps/x")],
[("Mul", "control_deps/z")],
[("Identity", "control_deps/ctrl_dep_z")])
# Call node info on a node with control recipients.
out = self._registry.dispatch_command("ni", ["control_deps/x"])
assert_node_attribute_lines(self, out, "control_deps/x", "VariableV2",
self._main_device, [], [],
[("Identity", "control_deps/x/read")],
[("Identity", "control_deps/ctrl_dep_y"),
("Identity", "control_deps/ctrl_dep_z")])
# Verify the menu items (command shortcuts) in the output.
check_menu_item(self, out, 10,
len(out.lines[10]) - len("control_deps/x/read"),
len(out.lines[10]), "ni -a -d control_deps/x/read")
if out.lines[13].endswith("control_deps/ctrl_dep_y"):
y_line = 13
z_line = 14
else:
y_line = 14
z_line = 13
check_menu_item(self, out, y_line,
len(out.lines[y_line]) - len("control_deps/ctrl_dep_y"),
len(out.lines[y_line]), "ni -a -d control_deps/ctrl_dep_y")
check_menu_item(self, out, z_line,
len(out.lines[z_line]) - len("control_deps/ctrl_dep_z"),
len(out.lines[z_line]), "ni -a -d control_deps/ctrl_dep_z")
def testListInputsNonRecursiveNoControl(self):
"""List inputs non-recursively, without any control inputs."""
# Do not include node op types.
node_name = "control_deps/z"
out = self._registry.dispatch_command("list_inputs", [node_name])
self.assertEqual([
"Inputs to node \"%s\" (Depth limit = 1):" % node_name,
"|- (1) control_deps/x/read", "| |- ...",
"|- (1) control_deps/ctrl_dep_y", " |- ...", "", "Legend:",
" (d): recursion depth = d."
], out.lines)
# Include node op types.
out = self._registry.dispatch_command("li", ["-t", node_name])
self.assertEqual([
"Inputs to node \"%s\" (Depth limit = 1):" % node_name,
"|- (1) [Identity] control_deps/x/read", "| |- ...",
"|- (1) [Identity] control_deps/ctrl_dep_y", " |- ...", "", "Legend:",
" (d): recursion depth = d.", " [Op]: Input node has op type Op."
], out.lines)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
# Verify that the node name has bold attribute.
self.assertEqual([(16, 16 + len(node_name), "bold")], out.font_attr_segs[0])
# Verify the menu items (command shortcuts) in the output.
check_menu_item(self, out, 1,
len(out.lines[1]) - len("control_deps/x/read"),
len(out.lines[1]), "li -c -r control_deps/x/read")
check_menu_item(self, out, 3,
len(out.lines[3]) - len("control_deps/ctrl_dep_y"),
len(out.lines[3]), "li -c -r control_deps/ctrl_dep_y")
def testListInputsNonRecursiveNoControlUsingTensorName(self):
"""List inputs using the name of an output tensor of the node."""
# Do not include node op types.
node_name = "control_deps/z"
tensor_name = node_name + ":0"
out = self._registry.dispatch_command("list_inputs", [tensor_name])
self.assertEqual([
"Inputs to node \"%s\" (Depth limit = 1):" % node_name,
"|- (1) control_deps/x/read", "| |- ...",
"|- (1) control_deps/ctrl_dep_y", " |- ...", "", "Legend:",
" (d): recursion depth = d."
], out.lines)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
check_menu_item(self, out, 1,
len(out.lines[1]) - len("control_deps/x/read"),
len(out.lines[1]), "li -c -r control_deps/x/read")
check_menu_item(self, out, 3,
len(out.lines[3]) - len("control_deps/ctrl_dep_y"),
len(out.lines[3]), "li -c -r control_deps/ctrl_dep_y")
def testListInputsNonRecursiveWithControls(self):
"""List inputs non-recursively, with control inputs."""
node_name = "control_deps/ctrl_dep_z"
out = self._registry.dispatch_command("li", ["-t", node_name, "-c"])
self.assertEqual([
"Inputs to node \"%s\" (Depth limit = 1, " % node_name +
"control inputs included):", "|- (1) [Mul] control_deps/z", "| |- ...",
"|- (1) (Ctrl) [Identity] control_deps/ctrl_dep_y", "| |- ...",
"|- (1) (Ctrl) [VariableV2] control_deps/x", "", "Legend:",
" (d): recursion depth = d.", " (Ctrl): Control input.",
" [Op]: Input node has op type Op."
], out.lines)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
check_menu_item(self, out, 1,
len(out.lines[1]) - len("control_deps/z"),
len(out.lines[1]), "li -c -r control_deps/z")
check_menu_item(self, out, 3,
len(out.lines[3]) - len("control_deps/ctrl_dep_y"),
len(out.lines[3]), "li -c -r control_deps/ctrl_dep_y")
check_menu_item(self, out, 5,
len(out.lines[5]) - len("control_deps/x"),
len(out.lines[5]), "li -c -r control_deps/x")
def testListInputsRecursiveWithControls(self):
"""List inputs recursively, with control inputs."""
node_name = "control_deps/ctrl_dep_z"
out = self._registry.dispatch_command("li", ["-c", "-r", "-t", node_name])
self.assertEqual([
"Inputs to node \"%s\" (Depth limit = 20, " % node_name +
"control inputs included):", "|- (1) [Mul] control_deps/z",
"| |- (2) [Identity] control_deps/x/read",
"| | |- (3) [VariableV2] control_deps/x",
"| |- (2) [Identity] control_deps/ctrl_dep_y",
"| |- (3) [Add] control_deps/y",
"| | |- (4) [Identity] control_deps/x/read",
"| | | |- (5) [VariableV2] control_deps/x",
"| | |- (4) [Identity] control_deps/x/read",
"| | |- (5) [VariableV2] control_deps/x",
"| |- (3) (Ctrl) [VariableV2] control_deps/x",
"|- (1) (Ctrl) [Identity] control_deps/ctrl_dep_y",
"| |- (2) [Add] control_deps/y",
"| | |- (3) [Identity] control_deps/x/read",
"| | | |- (4) [VariableV2] control_deps/x",
"| | |- (3) [Identity] control_deps/x/read",
"| | |- (4) [VariableV2] control_deps/x",
"| |- (2) (Ctrl) [VariableV2] control_deps/x",
"|- (1) (Ctrl) [VariableV2] control_deps/x", "", "Legend:",
" (d): recursion depth = d.", " (Ctrl): Control input.",
" [Op]: Input node has op type Op."
], out.lines)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
check_menu_item(self, out, 1,
len(out.lines[1]) - len("control_deps/z"),
len(out.lines[1]), "li -c -r control_deps/z")
check_menu_item(self, out, 11,
len(out.lines[11]) - len("control_deps/ctrl_dep_y"),
len(out.lines[11]), "li -c -r control_deps/ctrl_dep_y")
check_menu_item(self, out, 18,
len(out.lines[18]) - len("control_deps/x"),
len(out.lines[18]), "li -c -r control_deps/x")
def testListInputsRecursiveWithControlsWithDepthLimit(self):
"""List inputs recursively, with control inputs and a depth limit."""
node_name = "control_deps/ctrl_dep_z"
out = self._registry.dispatch_command(
"li", ["-c", "-r", "-t", "-d", "2", node_name])
self.assertEqual([
"Inputs to node \"%s\" (Depth limit = 2, " % node_name +
"control inputs included):", "|- (1) [Mul] control_deps/z",
"| |- (2) [Identity] control_deps/x/read", "| | |- ...",
"| |- (2) [Identity] control_deps/ctrl_dep_y", "| |- ...",
"|- (1) (Ctrl) [Identity] control_deps/ctrl_dep_y",
"| |- (2) [Add] control_deps/y", "| | |- ...",
"| |- (2) (Ctrl) [VariableV2] control_deps/x",
"|- (1) (Ctrl) [VariableV2] control_deps/x", "", "Legend:",
" (d): recursion depth = d.", " (Ctrl): Control input.",
" [Op]: Input node has op type Op."
], out.lines)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
check_menu_item(self, out, 1,
len(out.lines[1]) - len("control_deps/z"),
len(out.lines[1]), "li -c -r control_deps/z")
check_menu_item(self, out, 10,
len(out.lines[10]) - len("control_deps/x"),
len(out.lines[10]), "li -c -r control_deps/x")
def testListInputsNodeWithoutInputs(self):
"""List the inputs to a node without any input."""
node_name = "control_deps/x"
out = self._registry.dispatch_command("li", ["-c", "-r", "-t", node_name])
self.assertEqual([
"Inputs to node \"%s\" (Depth limit = 20, control " % node_name +
"inputs included):", " [None]", "", "Legend:",
" (d): recursion depth = d.", " (Ctrl): Control input.",
" [Op]: Input node has op type Op."
], out.lines)
check_main_menu(
self,
out,
list_tensors_enabled=True,
node_info_node_name=node_name,
print_tensor_node_name=node_name,
list_outputs_node_name=node_name)
def testListInputsNonexistentNode(self):
out = self._registry.dispatch_command(
"list_inputs", ["control_deps/z/foo"])
self.assertEqual([
"ERROR: There is no node named \"control_deps/z/foo\" in the "
"partition graphs"], out.lines)
def testListRecipientsRecursiveWithControlsWithDepthLimit(self):
"""List recipients recursively, with control inputs and a depth limit."""
out = self._registry.dispatch_command(
"lo", ["-c", "-r", "-t", "-d", "1", "control_deps/x"])
self.assertEqual([
"Recipients of node \"control_deps/x\" (Depth limit = 1, control "
"recipients included):",
"|- (1) [Identity] control_deps/x/read",
"| |- ...",
"|- (1) (Ctrl) [Identity] control_deps/ctrl_dep_y",
"| |- ...",
"|- (1) (Ctrl) [Identity] control_deps/ctrl_dep_z",
"", "Legend:", " (d): recursion depth = d.",
" (Ctrl): Control input.",
" [Op]: Input node has op type Op."], out.lines)
check_menu_item(self, out, 1,
len(out.lines[1]) - len("control_deps/x/read"),
len(out.lines[1]), "lo -c -r control_deps/x/read")
check_menu_item(self, out, 3,
len(out.lines[3]) - len("control_deps/ctrl_dep_y"),
len(out.lines[3]), "lo -c -r control_deps/ctrl_dep_y")
check_menu_item(self, out, 5,
len(out.lines[5]) - len("control_deps/ctrl_dep_z"),
len(out.lines[5]), "lo -c -r control_deps/ctrl_dep_z")
# Verify the bold attribute of the node name.
self.assertEqual([(20, 20 + len("control_deps/x"), "bold")],
out.font_attr_segs[0])
class AnalyzerCLIWhileLoopTest(test_util.TensorFlowTestCase):
@classmethod
def setUpClass(cls):
cls._dump_root = tempfile.mkdtemp()
with session.Session() as sess:
loop_var = constant_op.constant(0, name="while_loop_test/loop_var")
cond = lambda loop_var: math_ops.less(loop_var, 10)
body = lambda loop_var: math_ops.add(loop_var, 1)
while_loop = control_flow_ops.while_loop(
cond, body, [loop_var], parallel_iterations=1)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_url = "file://%s" % cls._dump_root
watch_opts = run_options.debug_options.debug_tensor_watch_opts
# Add debug tensor watch for "while/Identity".
watch = watch_opts.add()
watch.node_name = "while/Identity"
watch.output_slot = 0
watch.debug_ops.append("DebugIdentity")
watch.debug_urls.append(debug_url)
# Invoke Session.run().
run_metadata = config_pb2.RunMetadata()
sess.run(while_loop, options=run_options, run_metadata=run_metadata)
cls._debug_dump = debug_data.DebugDumpDir(
cls._dump_root, partition_graphs=run_metadata.partition_graphs)
cls._analyzer = analyzer_cli.DebugAnalyzer(cls._debug_dump)
cls._registry = debugger_cli_common.CommandHandlerRegistry()
cls._registry.register_command_handler(
"list_tensors",
cls._analyzer.list_tensors,
cls._analyzer.get_help("list_tensors"),
prefix_aliases=["lt"])
cls._registry.register_command_handler(
"print_tensor",
cls._analyzer.print_tensor,
cls._analyzer.get_help("print_tensor"),
prefix_aliases=["pt"])
@classmethod
def tearDownClass(cls):
# Tear down temporary dump directory.
shutil.rmtree(cls._dump_root)
def testMultipleDumpsPrintTensorNoNumber(self):
output = self._registry.dispatch_command("pt", ["while/Identity:0"])
self.assertEqual("Tensor \"while/Identity:0\" generated 10 dumps:",
output.lines[0])
for i in xrange(10):
self.assertTrue(output.lines[i + 1].startswith("#%d" % i))
self.assertTrue(output.lines[i + 1].endswith(
" ms] while/Identity:0:DebugIdentity"))
self.assertEqual(
"You can use the -n (--number) flag to specify which dump to print.",
output.lines[-3])
self.assertEqual("For example:", output.lines[-2])
self.assertEqual(" print_tensor while/Identity:0 -n 0", output.lines[-1])
def testMultipleDumpsPrintTensorWithNumber(self):
for i in xrange(5):
output = self._registry.dispatch_command(
"pt", ["while/Identity:0", "-n", "%d" % i])
self.assertEqual("Tensor \"while/Identity:0:DebugIdentity (dump #%d)\":" %
i, output.lines[0])
self.assertEqual(" dtype: int32", output.lines[1])
self.assertEqual(" shape: ()", output.lines[2])
self.assertEqual("", output.lines[3])
self.assertTrue(output.lines[4].startswith("array(%d" % i))
self.assertTrue(output.lines[4].endswith(")"))
def testMultipleDumpsPrintTensorInvalidNumber(self):
output = self._registry.dispatch_command("pt",
["while/Identity:0", "-n", "10"])
self.assertEqual([
"ERROR: Specified number (10) exceeds the number of available dumps "
"(10) for tensor while/Identity:0"
], output.lines)
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
kernel-sanders/arsenic-mobile | Dependencies/Twisted-13.0.0/twisted/test/test_unix.py | 41 | 14557 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for implementations of L{IReactorUNIX} and L{IReactorUNIXDatagram}.
"""
import stat, os, sys, types
import socket
from twisted.internet import interfaces, reactor, protocol, error, address, defer, utils
from twisted.python import lockfile
from twisted.trial import unittest
from twisted.test.test_tcp import MyServerFactory, MyClientFactory
class FailedConnectionClientFactory(protocol.ClientFactory):
def __init__(self, onFail):
self.onFail = onFail
def clientConnectionFailed(self, connector, reason):
self.onFail.errback(reason)
class UnixSocketTestCase(unittest.TestCase):
"""
Test unix sockets.
"""
def test_peerBind(self):
"""
The address passed to the server factory's C{buildProtocol} method and
the address returned by the connected protocol's transport's C{getPeer}
method match the address the client socket is bound to.
"""
filename = self.mktemp()
peername = self.mktemp()
serverFactory = MyServerFactory()
connMade = serverFactory.protocolConnectionMade = defer.Deferred()
unixPort = reactor.listenUNIX(filename, serverFactory)
self.addCleanup(unixPort.stopListening)
unixSocket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.addCleanup(unixSocket.close)
unixSocket.bind(peername)
unixSocket.connect(filename)
def cbConnMade(proto):
expected = address.UNIXAddress(peername)
self.assertEqual(serverFactory.peerAddresses, [expected])
self.assertEqual(proto.transport.getPeer(), expected)
connMade.addCallback(cbConnMade)
return connMade
def test_dumber(self):
"""
L{IReactorUNIX.connectUNIX} can be used to connect a client to a server
started with L{IReactorUNIX.listenUNIX}.
"""
filename = self.mktemp()
serverFactory = MyServerFactory()
serverConnMade = defer.Deferred()
serverFactory.protocolConnectionMade = serverConnMade
unixPort = reactor.listenUNIX(filename, serverFactory)
self.addCleanup(unixPort.stopListening)
clientFactory = MyClientFactory()
clientConnMade = defer.Deferred()
clientFactory.protocolConnectionMade = clientConnMade
c = reactor.connectUNIX(filename, clientFactory)
d = defer.gatherResults([serverConnMade, clientConnMade])
def allConnected((serverProtocol, clientProtocol)):
# Incidental assertion which may or may not be redundant with some
# other test. This probably deserves its own test method.
self.assertEqual(clientFactory.peerAddresses,
[address.UNIXAddress(filename)])
clientProtocol.transport.loseConnection()
serverProtocol.transport.loseConnection()
d.addCallback(allConnected)
return d
def test_pidFile(self):
"""
A lockfile is created and locked when L{IReactorUNIX.listenUNIX} is
called and released when the Deferred returned by the L{IListeningPort}
provider's C{stopListening} method is called back.
"""
filename = self.mktemp()
serverFactory = MyServerFactory()
serverConnMade = defer.Deferred()
serverFactory.protocolConnectionMade = serverConnMade
unixPort = reactor.listenUNIX(filename, serverFactory, wantPID=True)
self.assertTrue(lockfile.isLocked(filename + ".lock"))
# XXX This part would test something about the checkPID parameter, but
# it doesn't actually. It should be rewritten to test the several
# different possible behaviors. -exarkun
clientFactory = MyClientFactory()
clientConnMade = defer.Deferred()
clientFactory.protocolConnectionMade = clientConnMade
c = reactor.connectUNIX(filename, clientFactory, checkPID=1)
d = defer.gatherResults([serverConnMade, clientConnMade])
def _portStuff((serverProtocol, clientProto)):
# Incidental assertion which may or may not be redundant with some
# other test. This probably deserves its own test method.
self.assertEqual(clientFactory.peerAddresses,
[address.UNIXAddress(filename)])
clientProto.transport.loseConnection()
serverProtocol.transport.loseConnection()
return unixPort.stopListening()
d.addCallback(_portStuff)
def _check(ignored):
self.failIf(lockfile.isLocked(filename + ".lock"), 'locked')
d.addCallback(_check)
return d
def test_socketLocking(self):
"""
L{IReactorUNIX.listenUNIX} raises L{error.CannotListenError} if passed
the name of a file on which a server is already listening.
"""
filename = self.mktemp()
serverFactory = MyServerFactory()
unixPort = reactor.listenUNIX(filename, serverFactory, wantPID=True)
self.assertRaises(
error.CannotListenError,
reactor.listenUNIX, filename, serverFactory, wantPID=True)
def stoppedListening(ign):
unixPort = reactor.listenUNIX(filename, serverFactory, wantPID=True)
return unixPort.stopListening()
return unixPort.stopListening().addCallback(stoppedListening)
def _uncleanSocketTest(self, callback):
self.filename = self.mktemp()
source = ("from twisted.internet import protocol, reactor\n"
"reactor.listenUNIX(%r, protocol.ServerFactory(), wantPID=True)\n") % (self.filename,)
env = {'PYTHONPATH': os.pathsep.join(sys.path)}
d = utils.getProcessValue(sys.executable, ("-u", "-c", source), env=env)
d.addCallback(callback)
return d
def test_uncleanServerSocketLocking(self):
"""
If passed C{True} for the C{wantPID} parameter, a server can be started
listening with L{IReactorUNIX.listenUNIX} when passed the name of a
file on which a previous server which has not exited cleanly has been
listening using the C{wantPID} option.
"""
def ranStupidChild(ign):
# If this next call succeeds, our lock handling is correct.
p = reactor.listenUNIX(self.filename, MyServerFactory(), wantPID=True)
return p.stopListening()
return self._uncleanSocketTest(ranStupidChild)
def test_connectToUncleanServer(self):
"""
If passed C{True} for the C{checkPID} parameter, a client connection
attempt made with L{IReactorUNIX.connectUNIX} fails with
L{error.BadFileError}.
"""
def ranStupidChild(ign):
d = defer.Deferred()
f = FailedConnectionClientFactory(d)
c = reactor.connectUNIX(self.filename, f, checkPID=True)
return self.assertFailure(d, error.BadFileError)
return self._uncleanSocketTest(ranStupidChild)
def _reprTest(self, serverFactory, factoryName):
"""
Test the C{__str__} and C{__repr__} implementations of a UNIX port when
used with the given factory.
"""
filename = self.mktemp()
unixPort = reactor.listenUNIX(filename, serverFactory)
connectedString = "<%s on %r>" % (factoryName, filename)
self.assertEqual(repr(unixPort), connectedString)
self.assertEqual(str(unixPort), connectedString)
d = defer.maybeDeferred(unixPort.stopListening)
def stoppedListening(ign):
unconnectedString = "<%s (not listening)>" % (factoryName,)
self.assertEqual(repr(unixPort), unconnectedString)
self.assertEqual(str(unixPort), unconnectedString)
d.addCallback(stoppedListening)
return d
def test_reprWithClassicFactory(self):
"""
The two string representations of the L{IListeningPort} returned by
L{IReactorUNIX.listenUNIX} contains the name of the classic factory
class being used and the filename on which the port is listening or
indicates that the port is not listening.
"""
class ClassicFactory:
def doStart(self):
pass
def doStop(self):
pass
# Sanity check
self.assertIsInstance(ClassicFactory, types.ClassType)
return self._reprTest(
ClassicFactory(), "twisted.test.test_unix.ClassicFactory")
def test_reprWithNewStyleFactory(self):
"""
The two string representations of the L{IListeningPort} returned by
L{IReactorUNIX.listenUNIX} contains the name of the new-style factory
class being used and the filename on which the port is listening or
indicates that the port is not listening.
"""
class NewStyleFactory(object):
def doStart(self):
pass
def doStop(self):
pass
# Sanity check
self.assertIsInstance(NewStyleFactory, type)
return self._reprTest(
NewStyleFactory(), "twisted.test.test_unix.NewStyleFactory")
class ClientProto(protocol.ConnectedDatagramProtocol):
started = stopped = False
gotback = None
def __init__(self):
self.deferredStarted = defer.Deferred()
self.deferredGotBack = defer.Deferred()
def stopProtocol(self):
self.stopped = True
def startProtocol(self):
self.started = True
self.deferredStarted.callback(None)
def datagramReceived(self, data):
self.gotback = data
self.deferredGotBack.callback(None)
class ServerProto(protocol.DatagramProtocol):
started = stopped = False
gotwhat = gotfrom = None
def __init__(self):
self.deferredStarted = defer.Deferred()
self.deferredGotWhat = defer.Deferred()
def stopProtocol(self):
self.stopped = True
def startProtocol(self):
self.started = True
self.deferredStarted.callback(None)
def datagramReceived(self, data, addr):
self.gotfrom = addr
self.transport.write("hi back", addr)
self.gotwhat = data
self.deferredGotWhat.callback(None)
class DatagramUnixSocketTestCase(unittest.TestCase):
"""
Test datagram UNIX sockets.
"""
def test_exchange(self):
"""
Test that a datagram can be sent to and received by a server and vice
versa.
"""
clientaddr = self.mktemp()
serveraddr = self.mktemp()
sp = ServerProto()
cp = ClientProto()
s = reactor.listenUNIXDatagram(serveraddr, sp)
self.addCleanup(s.stopListening)
c = reactor.connectUNIXDatagram(serveraddr, cp, bindAddress=clientaddr)
self.addCleanup(c.stopListening)
d = defer.gatherResults([sp.deferredStarted, cp.deferredStarted])
def write(ignored):
cp.transport.write("hi")
return defer.gatherResults([sp.deferredGotWhat,
cp.deferredGotBack])
def _cbTestExchange(ignored):
self.assertEqual("hi", sp.gotwhat)
self.assertEqual(clientaddr, sp.gotfrom)
self.assertEqual("hi back", cp.gotback)
d.addCallback(write)
d.addCallback(_cbTestExchange)
return d
def test_cannotListen(self):
"""
L{IReactorUNIXDatagram.listenUNIXDatagram} raises
L{error.CannotListenError} if the unix socket specified is already in
use.
"""
addr = self.mktemp()
p = ServerProto()
s = reactor.listenUNIXDatagram(addr, p)
self.failUnlessRaises(error.CannotListenError, reactor.listenUNIXDatagram, addr, p)
s.stopListening()
os.unlink(addr)
# test connecting to bound and connected (somewhere else) address
def _reprTest(self, serverProto, protocolName):
"""
Test the C{__str__} and C{__repr__} implementations of a UNIX datagram
port when used with the given protocol.
"""
filename = self.mktemp()
unixPort = reactor.listenUNIXDatagram(filename, serverProto)
connectedString = "<%s on %r>" % (protocolName, filename)
self.assertEqual(repr(unixPort), connectedString)
self.assertEqual(str(unixPort), connectedString)
stopDeferred = defer.maybeDeferred(unixPort.stopListening)
def stoppedListening(ign):
unconnectedString = "<%s (not listening)>" % (protocolName,)
self.assertEqual(repr(unixPort), unconnectedString)
self.assertEqual(str(unixPort), unconnectedString)
stopDeferred.addCallback(stoppedListening)
return stopDeferred
def test_reprWithClassicProtocol(self):
"""
The two string representations of the L{IListeningPort} returned by
L{IReactorUNIXDatagram.listenUNIXDatagram} contains the name of the
classic protocol class being used and the filename on which the port is
listening or indicates that the port is not listening.
"""
class ClassicProtocol:
def makeConnection(self, transport):
pass
def doStop(self):
pass
# Sanity check
self.assertIsInstance(ClassicProtocol, types.ClassType)
return self._reprTest(
ClassicProtocol(), "twisted.test.test_unix.ClassicProtocol")
def test_reprWithNewStyleProtocol(self):
"""
The two string representations of the L{IListeningPort} returned by
L{IReactorUNIXDatagram.listenUNIXDatagram} contains the name of the
new-style protocol class being used and the filename on which the port
is listening or indicates that the port is not listening.
"""
class NewStyleProtocol(object):
def makeConnection(self, transport):
pass
def doStop(self):
pass
# Sanity check
self.assertIsInstance(NewStyleProtocol, type)
return self._reprTest(
NewStyleProtocol(), "twisted.test.test_unix.NewStyleProtocol")
if not interfaces.IReactorUNIX(reactor, None):
UnixSocketTestCase.skip = "This reactor does not support UNIX domain sockets"
if not interfaces.IReactorUNIXDatagram(reactor, None):
DatagramUnixSocketTestCase.skip = "This reactor does not support UNIX datagram sockets"
| gpl-3.0 |
andeplane/lammps | tools/moltemplate/src/ltemplify.py | 5 | 156261 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Andrew Jewett (jewett.aij at g mail)
# http://www.chem.ucsb.edu/~sheagroup
# License: 3-clause BSD License (See LICENSE.TXT)
# Copyright (c) 2012, Regents of the University of California
# All rights reserved.
"""
ltemplify.py
The "ltemplify.py" script can be used to convert existing LAMMPS
input script and data files into a single .lt file
(which includes both topology and force-field information
for a single molecule in your system).
Example:
ltemplify.py -name Mol file.in file.data > mol.lt
This creates a template for a new type of molecule (named "Mol"),
consisting of all the atoms in the lammps files you included,
and saves this data in a single ttree file ("mol.lt").
This file can be used with moltemplate (ttree) to
define large systems containing this molecule.
"""
import sys
from ttree_lex import *
from lttree_styles import *
def Intify(s):
if s.isdigit():
return int(s)
elif s[0:2] == 'id':
return int(s[2:])
elif s[0:4] == 'type':
return int(s[4:])
else:
return s
def IsNumber(s):
try:
float(s)
return True
except ValueError, TypeError:
return False
def StringToInterval(sel_str, slice_delim='*'):
# Split a string into 1-3 tokens using the slice_delim and convert to int.
# What a mess. I should rewrite this function
i_slice = sel_str.find(slice_delim)
if i_slice == -1:
a = sel_str
b = sel_str
c = ''
else:
a = sel_str[:i_slice]
bc = sel_str[i_slice+len(slice_delim):]
b = ''
c = ''
i_slice = bc.find(slice_delim)
if i_slice == -1:
b = bc
c = ''
else:
b = bc[:i_slice]
c = bc[i_slice+len(slice_delim):]
if a == '':
a = None
elif a.isdigit():
a = int(a)
else:
raise InputError('Error: invalid selection string \"'+
sel_str+'\"\n')
if b == '':
b = None
elif b.isdigit():
b = int(b)
else:
raise InputError('Error: invalid selection string \"'+
sel_str+'\"\n')
if c == '':
c = None
elif c.isdigit():
c = int(c)
else:
raise InputError('Error: invalid selection string \"'+
sel_str+'\"\n')
if c == None:
return (a,b)
else:
return (a,b,c)
# Selections are simply lists of 2-tuples (pairs)
def LammpsSelectToIntervals(sel_str, slice_delim='*', or_delim=', '):
"""
This function converts a string such as "1*4 6 9*12 50*70*10" into
a list of tuples, for example: [(1,4), (6,6), (9,12), (50,50), (60,60), (70,70)]
In general, the of intervals has the form:
[(a1,b1), (a2,b2), (a3,b3), ... ]
An atom is considered to belong to this selection
if it happens to lie within the closed interval [a,b]
for any pair of a,b values in the list of intervals.
If for a given pair a,b, either a or b is "None", then that a or b
value is not used to disqualify membership in the interval.
(Similar to -infinity or +infinity. In other words if a is set to None,
then to belong to the interval it is enough to be less than b.)
"""
selection_list = []
#tokens = sel_str.split(or_delim) <-- Not what we want when len(or_delim)>1
tokens = LineLex.TextBlock2Lines(sel_str, or_delim, keep_delim=False)
for token in tokens:
token = token.strip()
interval = StringToInterval(token, slice_delim)
if len(interval)==2:
# Normally, "interval" should be a tuple containing 2 entries
selection_list.append(interval)
else:
assert(len(interval)==3)
# Handle 1000:2000:10 notation
# (corresponding to 1000, 1010, 1020, 1030, ..., 1990, 2000)
a=interval[0]
b=interval[1]
incr=interval[2]
i=a
while i<=b:
selection_list.append((i,i))
i += incr
return selection_list
def IntervalListToMinMax(interval_list):
min_a = None
max_b = None
for (a,b) in interval_list:
if ((not (type(a) is int)) or (not (type(b) is int))):
return None,None #only integer min/max makes sense. otherwise skip
if (min_a == None) or (a < min_a):
min_a = a
if (max_b == None) or (b > max_b):
max_b = b
return min_a, max_b
def MergeIntervals(interval_list):
"""
A crude simple function that merges consecutive intervals in the list
whenever they overlap. (This function does not bother to compare
non-consecutive entries in the interval_list.)
"""
i = 1
while i < len(interval_list):
if ((interval_list[i-1][1] == None) or
(interval_list[i-1][1]+1 >= interval_list[i][0])):
interval_list[i-1] = (interval_list[i-1][0], interval_list[i][1])
del interval_list[i]
else:
i += 1
def BelongsToSel(i, sel):
if (i == None) or (sel == None) or (len(sel) == 0):
# If the user has not specified a selection for this category,
# then by default all objects are accepted
return True
elif (type(i) is str):
if i.isdigit():
i = int(i)
else:
return True
belongs = False
for interval in sel:
assert(len(interval) == 2)
if interval[0]:
if i >= interval[0]:
if (interval[1] == None) or (i <= interval[1]):
belongs = True
break
elif interval[1]:
if i <= interval[1]:
belongs = True
break
else:
# In that case, the user entered something like "*"
# which covers all possible numbers
belongs = True
break
return belongs
try:
g_program_name = __file__.split('/')[-1] # = 'ltemplify.py'
g_version_str = '0.51'
g_date_str = '2015-10-27'
sys.stderr.write(g_program_name+' v'+g_version_str+' '+g_date_str+'\n')
non_empty_output = False
no_warnings = True
indent = 2
cindent = 0
atomid_selection = []
atomtype_selection = []
molid_selection = []
mol_name = ''
min_sel_atomid = None
min_sel_atomtype = None
min_sel_bondid = None
min_sel_bondtype = None
min_sel_angleid = None
min_sel_angletype = None
min_sel_dihedralid = None
min_sel_dihedraltype = None
min_sel_improperid = None
min_sel_impropertype = None
max_sel_atomid = None
max_sel_atomtype = None
max_sel_bondid = None
max_sel_bondtype = None
max_sel_angleid = None
max_sel_angletype = None
max_sel_dihedralid = None
max_sel_dihedraltype = None
max_sel_improperid = None
max_sel_impropertype = None
needed_atomids = set([])
needed_atomtypes = set([])
needed_molids = set([])
needed_bondids = set([])
needed_bondtypes = set([])
needed_angleids = set([])
needed_angletypes = set([])
needed_dihedralids = set([])
needed_dihedraltypes = set([])
needed_improperids = set([])
needed_impropertypes = set([])
min_needed_atomtype = None
max_needed_atomtype = None
min_needed_bondtype = None
max_needed_bondtype = None
min_needed_angletype = None
max_needed_angletype = None
min_needed_dihedraltype = None
max_needed_dihedraltype = None
min_needed_impropertype = None
max_needed_impropertype = None
min_needed_atomid = None
max_needed_atomid = None
min_needed_molid = None
max_needed_molid = None
min_needed_bondid = None
max_needed_bondid = None
min_needed_angleid = None
max_needed_angleid = None
min_needed_dihedralid = None
max_needed_dihedralid = None
min_needed_improperid = None
max_needed_improperid = None
# To process the selections, we need to know the atom style:
atom_style_undefined = True
i_atomid = None
i_atomtype = None
i_molid = None
i_x = None
i_y = None
i_z = None
l_in_init = []
l_in_settings = []
l_in_masses = []
l_in_pair_coeffs = []
l_in_bond_coeffs = []
l_in_angle_coeffs = []
l_in_dihedral_coeffs = []
l_in_improper_coeffs = []
l_in_group = []
l_in_fix_shake = []
l_in_fix_rigid = []
l_in_fix_poems = []
l_in_fix_qeq = []
l_in_fix_qmmm = []
l_data_masses = []
l_data_bond_coeffs = []
l_data_angle_coeffs = []
l_data_dihedral_coeffs = []
l_data_improper_coeffs = []
l_data_pair_coeffs = []
l_data_pairij_coeffs = []
l_data_atoms = []
l_data_velocities = []
l_data_bonds = []
l_data_angles = []
l_data_dihedrals = []
l_data_impropers = []
# class2 force fields
#l_in_bondbond_coeffs = [] <--not needed, included in l_in_angle_coeff
#l_in_bondangle_coeffs = [] <--not needed, included in l_in_angle_coeff
#l_in_middlebondtorsion_coeffs = [] not needed, included in l_in_dihedral_coeff
#l_in_endbondtorsion_coeffs = [] <--not needed, included in l_in_dihedral_coeff
#l_in_angletorsion_coeffs = [] <--not needed, included in l_in_dihedral_coeff
#l_in_angleangletorsion_coeffs = [] not needed, included in l_in_dihedral_coeff
#l_in_bondbond13_coeffs = [] <--not needed, included in l_in_dihedral_coeff
#l_in_angleangle_coeffs = [] <--not needed, included in l_in_improper_coeff
l_data_bondbond_coeffs = []
l_data_bondangle_coeffs = []
l_data_middlebondtorsion_coeffs = []
l_data_endbondtorsion_coeffs = []
l_data_angletorsion_coeffs = []
l_data_angleangletorsion_coeffs = []
l_data_bondbond13_coeffs = []
l_data_angleangle_coeffs = []
# non-point-like particles:
l_data_ellipsoids = []
l_data_lines = []
l_data_triangles = []
# automatic generation of bonded interactions by type:
l_data_angles_by_type = []
l_data_dihedrals_by_type = []
l_data_impropers_by_type = []
atoms_already_read = False
some_pair_coeffs_read = False
complained_atom_style_mismatch = False
infer_types_from_comments = False
remove_coeffs_from_data_file = True
argv = [arg for arg in sys.argv]
i = 1
while i < len(argv):
#sys.stderr.write('argv['+str(i)+'] = \"'+argv[i]+'\"\n')
if argv[i] == '-columns':
if i+1 >= len(argv):
raise InputError('Error: the \"'+argv[i]+'\" argument should be followed by a quoted\n'
' string which contains a space-delimited list of the names of\n'
' of columns in the \"Atoms\" section of the LAMMPS data file.\n'
' If the list contains the symbols:\n'
' \"atom-ID\" or \"atomid\", they are interpreted\n'
' as unique atom ID numbers, and columns named\n'
' \"atom-type\" or \"atomtype\" are interpreted\n'
' as atom types. Finally, columns named\n'
' \"molecule-ID\", \"molecule\", or \"mol-ID\", or \"mol\"\n'
' are interpreted as unique molecule id numbers.\n'
'Example:\n'
' '+argv[i]+' \'atom-ID atom-type q polarizability molecule-ID x y z\'\n'
' defines a custom atom_style containing the properties\n'
' atom-ID atom-type q polarizability molecule-ID x y z\n'
' Make sure you enclose the entire list in quotes.\n');
column_names = argv[i+1].strip('\"\'').strip().split()
del argv[i:i+2]
elif (argv[i] == '-ignore-comments'):
infer_types_from_comments = False
del argv[i:i+1]
elif (argv[i] == '-infer-comments'):
infer_types_from_comments = True
del argv[i:i+1]
elif ((argv[i] == '-name') or
(argv[i] == '-molname') or
(argv[i] == '-molecule-name') or
(argv[i] == '-molecule_name')):
if i+1 >= len(argv):
raise InputError('Error: '+argv[i]+' flag should be followed by a a molecule type name.\n')
cindent = 2
indent += cindent
mol_name = argv[i+1]
del argv[i:i+2]
elif ((argv[i].lower() == '-atomstyle') or
(argv[i].lower() == '-atom_style') or
(argv[i].lower() == '-atom-style')):
if i+1 >= len(argv):
raise InputError('Error: '+argv[i]+' flag should be followed by a an atom_style name.\n'
' (or single quoted string which includes a space-separated\n'
' list of column names).\n')
atom_style_undefined = False
column_names = AtomStyle2ColNames(argv[i+1])
if (argv[i+1].strip().split()[0] in g_style_map):
l_in_init.append((' '*indent) + 'atom_style ' + argv[i+1] + '\n')
sys.stderr.write('\n \"Atoms\" column format:\n')
sys.stderr.write(' '+(' '.join(column_names))+'\n')
i_atomid, i_atomtype, i_molid = ColNames2AidAtypeMolid(column_names)
# Which columns contain the coordinates?
ii_coords = ColNames2Coords(column_names)
assert(len(ii_coords) == 1)
i_x = ii_coords[0][0]
i_y = ii_coords[0][1]
i_z = ii_coords[0][2]
if i_molid:
sys.stderr.write(' (i_atomid='+str(i_atomid+1)+', i_atomtype='+str(i_atomtype+1)+', i_molid='+str(i_molid+1)+')\n\n')
else:
sys.stderr.write(' (i_atomid='+str(i_atomid+1)+', i_atomtype='+str(i_atomtype+1)+')\n')
del argv[i:i+2]
elif ((argv[i].lower() == '-id') or
#(argv[i].lower() == '-a') or
#(argv[i].lower() == '-atoms') or
(argv[i].lower() == '-atomid') or
#(argv[i].lower() == '-atomids') or
(argv[i].lower() == '-atom-id')
#(argv[i].lower() == '-atom-ids') or
#(argv[i].lower() == '-$atom') or
#(argv[i].lower() == '-$atoms')
):
if i+1 >= len(argv):
raise InputError('Error: '+argv[i]+' flag should be followed by a list of integers\n'
' (or strings). These identify the group of atoms you want to\n'
' to include in the template you are creating.\n')
atomid_selection += LammpsSelectToIntervals(argv[i+1])
min_sel_atomid, max_sel_atomid = IntervalListToMinMax(atomid_selection)
del argv[i:i+2]
elif ((argv[i].lower() == '-datacoeffs') or
(argv[i].lower() == '-datacoeff') or
(argv[i].lower() == '-Coeff') or
(argv[i].lower() == '-Coeffs')):
remove_coeffs_from_data_file = False
del argv[i:i+1]
elif ((argv[i].lower() == '-type') or
#(argv[i].lower() == '-t') or
(argv[i].lower() == '-atomtype') or
(argv[i].lower() == '-atom-type')
#(argv[i].lower() == '-atomtypes') or
#(argv[i].lower() == '-atom-types') or
#(argv[i].lower() == '-@atom') or
#(argv[i].lower() == '-@atoms') or
#(argv[i].lower() == '-@atomtype') or
#(argv[i].lower() == '-@atomtypes')
):
if i+1 >= len(argv):
raise InputError('Error: '+argv[i]+' flag should be followed by a list of integers.\n'
' (or strings). These identify the group of atom types you want to\n'
' to include in the template you are creating.\n')
atomtype_selection += LammpsSelectToIntervals(argv[i+1])
min_sel_atomtype, max_sel_atomtype = IntervalListToMinMax(atomtype_selection)
del argv[i:i+2]
elif ((argv[i].lower() == '-mol') or
#(argv[i].lower() == '-m') or
(argv[i].lower() == '-molid') or
#(argv[i].lower() == '-molids') or
(argv[i].lower() == '-mol-id') or
#(argv[i].lower() == '-mol-ids') or
#(argv[i].lower() == '-molecule') or
(argv[i].lower() == '-moleculeid') or
(argv[i].lower() == '-molecule-id')
#(argv[i].lower() == '-molecules') or
#(argv[i].lower() == '-molecule-ids') or
#(argv[i].lower() == '-$mol') or
#(argv[i].lower() == '-$molecule')
):
if i+1 >= len(argv):
sys.stderr.write('Error: '+argv[i]+' flag should be followed by a list of integers.\n'
' (or strings). These identify the group of molecules you want to\n'
' include in the template you are creating.\n')
molid_selection += LammpsSelectToIntervals(argv[i+1])
del argv[i:i+2]
else:
i += 1
# We might need to parse the simulation boundary-box.
# If so, use these variables. (None means uninitialized.)
boundary_xlo = None
boundary_xhi = None
boundary_ylo = None
boundary_yhi = None
boundary_zlo = None
boundary_zhi = None
boundary_xy = None
boundary_yz = None
boundary_xz = None
# atom type names
atomtypes_name2int = {}
atomtypes_int2name = {}
#atomids_name2int = {} not needed
atomids_int2name = {}
atomids_by_type = {}
if atom_style_undefined:
# The default atom_style is "full"
column_names = AtomStyle2ColNames('full')
i_atomid, i_atomtype, i_molid = ColNames2AidAtypeMolid(column_names)
# Which columns contain the coordinates?
ii_coords = ColNames2Coords(column_names)
assert(len(ii_coords) == 1)
i_x = ii_coords[0][0]
i_y = ii_coords[0][1]
i_z = ii_coords[0][2]
#---------------------------------------------------------
#-- The remaining arguments are files that the user wants
#-- us to read and convert. It is typical to have
#-- multiple input files, because LAMMPS users often
#-- store their force field parameters in either the LAMMPS
#-- data files and input script files, or both.
#-- We want to search all of the LAMMPS input files in
#-- order to make sure we extracted all the force field
#-- parameters (coeff commands).
#---------------------------------------------------------
for i_arg in range(1,len(argv)):
fname = argv[i_arg]
try:
lammps_file = open(fname, 'r')
except IOError:
raise InputError('Error: unrecognized argument (\"'+fname+'\"),\n'
' OR unable to open file:\n'
'\n'
' \"'+fname+'\"\n'
' for reading.\n'
'\n'
' (If you were not trying to open a file with this name,\n'
' then there is a problem in your argument list.)\n')
sys.stderr.write('reading file \"'+fname+'\"\n')
atomid2type = {}
atomid2mol = {}
data_file_header_names = set(['LAMMPS Description',
'Atoms', 'Masses', 'Velocities', 'Bonds',
'Angles', 'Dihedrals', 'Impropers',
'Pair Coeffs',
'Bond Coeffs', 'Angle Coeffs',
'Dihedral Coeffs', 'Improper Coeffs',
#class2 force fields:
'BondBond Coeffs', 'BondAngle Coeffs',
'MiddleBondTorsion Coeffs', 'EndBondTorsion Coeffs',
'AngleTorsion Coeffs', 'AngleAngleTorsion Coeffs',
'BondBond13 Coeffs',
'AngleAngle Coeffs',
# non-point-like particles:
'Ellipsoids', 'Triangles', 'Lines',
#specifying bonded interactions by type:
'Angles By Type', 'Dihedrals By Type', 'Impropers By Type'
])
lex=LineLex(lammps_file, fname)
lex.source_triggers = set(['include','import'])
# set up lex to accept most characters in file names:
lex.wordterminators = '(){}' + lex.whitespace
# set up lex to understand the "include" statement:
lex.source = 'include'
lex.escape = '\\'
while lex:
infile = lex.infile
lineno = lex.lineno
line = lex.ReadLine()
if (lex.infile != infile):
infile = lex.infile
lineno = lex.lineno
#sys.stderr.write(' processing \"'+line.strip()+'\", (\"'+infile+'\":'+str(lineno)+')\n')
if line == '':
break
tokens = line.strip().split()
if (len(tokens) > 0):
if ((tokens[0] == 'atom_style') and
atom_style_undefined):
sys.stderr.write(' Atom Style found. Processing: \"'+line.strip()+'\"\n')
if atoms_already_read:
raise InputError('Error: The file containing the \"atom_style\" command must\n'
' come before the data file in the argument list.\n'
' (The templify program needs to know the atom style before reading\n'
' the data file. Either change the order of arguments so that the\n'
' LAMMPS input script file is processed before the data file, or use\n'
' the \"-atom_style\" command line argument to specify the atom_style.)\n')
column_names = AtomStyle2ColNames(line.split()[1])
i_atomid, i_atomtype, i_molid = ColNames2AidAtypeMolid(column_names)
# Which columns contain the coordinates?
ii_coords = ColNames2Coords(column_names)
assert(len(ii_coords) == 1)
i_x = ii_coords[0][0]
i_y = ii_coords[0][1]
i_z = ii_coords[0][2]
sys.stderr.write('\n \"Atoms\" column format:\n')
sys.stderr.write(' '+(' '.join(column_names))+'\n')
if i_molid:
sys.stderr.write(' (i_atomid='+str(i_atomid+1)+', i_atomtype='+str(i_atomtype+1)+', i_molid='+str(i_molid+1)+')\n\n')
else:
sys.stderr.write(' (i_atomid='+str(i_atomid+1)+', i_atomtype='+str(i_atomtype+1)+')\n\n')
l_in_init.append((' '*indent)+line.lstrip())
elif (tokens[0] in set(['units',
'angle_style',
'bond_style',
'dihedral_style',
'improper_style',
'min_style',
'pair_style',
'pair_modify',
'special_bonds',
'kspace_style',
'kspace_modify'])):
l_in_init.append((' '*indent)+line.lstrip())
#if (line.strip() == 'LAMMPS Description'):
# sys.stderr.write(' reading \"'+line.strip()+'\"\n')
# # skip over this section
# while lex:
# line = lex.ReadLine()
# if line.strip() in data_file_header_names:
# lex.push_raw_text(line) # <- Save line for later
# break
elif (line.strip() == 'Atoms'):
sys.stderr.write(' reading \"'+line.strip()+'\"\n')
atoms_already_read = True
# Before attempting to read atomic coordinates, first find
# the lattice vectors of the simulation's boundary box:
# Why do we care about the Simulation Boundary?
# Some LAMMPS data files store atomic coordinates in a
# complex format with 6 numbers, 3 floats, and 3 integers.
# The 3 floats are x,y,z coordinates. Any additional numbers
# following these are integers which tell LAMMPS which cell
# the particle belongs to, (in case it has wandered out of
# the original periodic boundary box). In order to find
# the true location of the particle, we need to offset that
# particle's position with the unit-cell lattice vectors:
# avec, bvec, cvec (or multiples thereof)
# avec, bvec, cvec are the axis of the parallelepiped which
# define the simulation's boundary. They are described here:
#http://lammps.sandia.gov/doc/Section_howto.html#howto-12
if ((boundary_xlo==None) or (boundary_xhi==None) or
(boundary_ylo==None) or (boundary_yhi==None) or
(boundary_zlo==None) or (boundary_zhi==None)):
raise InputError('Error: Either DATA file lacks a boundary-box header, or it is in the wrong\n'
' place. At the beginning of the file, you need to specify the box size:\n'
' xlo xhi ylo yhi zlo zhi (and xy xz yz if triclinic)\n'
' These numbers should appear BEFORE the other sections in the data file\n'
' (such as the \"Atoms\", \"Masses\", \"Bonds\", \"Pair Coeffs\" sections)\n'
'\n'
' Use this format (example):\n'
' -100.0 100.0 xhi xlo\n'
' 0.0 200.0 yhi ylo\n'
' -25.0 50.0 zhi zlo\n'
'\n'
'For details, see http://lammps.sandia.gov/doc/read_data.html\n'
'\n'
' (NOTE: If the atom coordinates are NOT followed by integers, then\n'
' these numbers are all ignored, however you must still specify\n'
' xlo, xhi, ylo, yhi, zlo, zhi. You can set them all to 0.0.)\n')
if not (boundary_xy and boundary_yz and boundary_xz):
# Then use a simple rectangular boundary box:
avec = (boundary_xhi-boundary_xlo, 0.0, 0.0)
bvec = (0.0, boundary_yhi-boundary_ylo, 0.0)
cvec = (0.0, 0.0, boundary_zhi-boundary_zlo)
else:
# Triclinic geometry in LAMMPS is explained here:
# http://lammps.sandia.gov/doc/Section_howto.html#howto-12
# http://lammps.sandia.gov/doc/read_data.html
avec = (boundary_xhi-boundary_xlo, 0.0, 0.0)
bvec = (boundary_xy, boundary_yhi-boundary_ylo, 0.0)
cvec = (boundary_xz, boundary_yz, boundary_zhi-boundary_zlo)
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
if ((len(tokens) <= i_atomid) or
(len(tokens) <= i_atomtype) or
((i_molid != None) and
(len(tokens) <= i_molid))):
raise InputError('Error: The number of columns in the \"Atoms\" section does\n'
' not match the atom_style (see column name list above).\n')
elif ((len(tokens) != len(column_names)) and
(len(tokens) != len(column_names)+3) and
(not complained_atom_style_mismatch)):
complained_atom_style_mismatch = True
sys.stderr.write('Warning: The number of columns in the \"Atoms\" section does\n'
' not match the atom_style (see column name list above).\n')
# this is not a very serious warning.
#no_warnings = False <--no need. commenting out
atomid = Intify(tokens[i_atomid])
atomtype = Intify(tokens[i_atomtype])
molid = None
if i_molid:
molid = Intify(tokens[i_molid])
atomid2type[atomid] = atomtype
if i_molid:
atomid2mol[atomid] = molid
if (BelongsToSel(atomid, atomid_selection) and
BelongsToSel(atomtype, atomtype_selection) and
BelongsToSel(molid, molid_selection)):
tokens[i_atomid] = '$atom:id'+tokens[i_atomid]
#tokens[i_atomid] = '$atom:'+atomids_int2name[atomid]
# fill atomtype_int2str[] with a default name (change later):
#tokens[i_atomtype] = '@atom:type'+tokens[i_atomtype]
atomtype_name = 'type'+tokens[i_atomtype]
atomtypes_int2name[atomtype] = atomtype_name
tokens[i_atomtype] = '@atom:'+atomtype_name
# Interpreting unit-cell counters
# If present, then unit-cell "flags" must be
# added to the x,y,z coordinates.
#
# For more details on unit-cell "flags", see:
# http://lammps.sandia.gov/doc/read_data.html
# "In the data file, atom lines (all lines or
# none of them) can optionally list 3 trailing
# integer values (nx,ny,nz), which are used to
# initialize the atom’s image flags.
# If nx,ny,nz values are not listed in the
# data file, LAMMPS initializes them to 0.
# Note that the image flags are immediately
# updated if an atom’s coordinates need to
# wrapped back into the simulation box."
if (len(tokens) == len(column_names)+3):
nx = int(tokens[-3])
ny = int(tokens[-2])
nz = int(tokens[-1])
x = float(tokens[i_x]) + nx*avec[0]+ny*bvec[0]+nz*cvec[0]
y = float(tokens[i_y]) + nx*avec[1]+ny*bvec[1]+nz*cvec[1]
z = float(tokens[i_z]) + nx*avec[2]+ny*bvec[2]+nz*cvec[2]
tokens[i_x] = str(x)
tokens[i_y] = str(y)
tokens[i_z] = str(z)
# Now get rid of them:
del tokens[-3:]
# I can't use atomids_int2name or atomtypes_int2name yet
# because they probably have not been defined yet.
# (Instead assign these names in a later pass.)
if i_molid:
tokens[i_molid] = '$mol:id'+tokens[i_molid]
l_data_atoms.append((' '*indent)+(' '.join(tokens)+'\n'))
needed_atomids.add(atomid)
needed_atomtypes.add(atomtype)
# Not all atom_styles have molids.
# Check for this before adding.
if molid != None:
needed_molids.add(molid)
for atomtype in needed_atomtypes:
assert(type(atomtype) is int)
if ((min_needed_atomtype == None) or
(min_needed_atomtype > atomtype)):
min_needed_atomtype = atomtype
if ((max_needed_atomtype == None) or
(max_needed_atomtype < atomtype)):
max_needed_atomtype = atomtype
for atomid in needed_atomids:
assert(type(atomid) is int)
if ((min_needed_atomid == None) or
(min_needed_atomid > atomid)):
min_needed_atomid = atomid
if ((max_needed_atomid == None) or
(max_needed_atomid < atomid)):
max_needed_atomid = atomid
for molid in needed_molids:
assert(type(molid) is int)
if ((min_needed_molid == None) or
(min_needed_molid > molid)):
min_needed_molid = molid
if ((max_needed_molid == None) or
(max_needed_molid < molid)):
max_needed_molid = molid
elif (line.strip() == 'Masses'):
sys.stderr.write(' reading \"'+line.strip()+'\"\n')
while lex:
# Read the next line of text but don't skip comments
comment_char_backup = lex.commenters
lex.commenters = ''
line = lex.ReadLine()
lex.commenters = comment_char_backup
comment_text = ''
ic = line.find('#')
if ic != -1:
line = line[:ic]
comment_text = line[ic+1:].strip()
line = line.rstrip()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
atomtype = Intify(tokens[0])
atomtype_name = str(atomtype)
if comment_text != '':
comment_tokens = comment_text.split()
# Assume the first word after the # is the atom type name
atomtype_name = comment_tokens[0]
if BelongsToSel(atomtype, atomtype_selection):
#tokens[0] = '@atom:type'+tokens[0]
l_data_masses.append((' '*indent)+(' '.join(tokens)+'\n'))
# infer atom type names from comment strings?
if infer_types_from_comments:
if atomtype_name in atomtypes_name2int:
raise InputError('Error: duplicate atom type names in mass section: \"'+atomtype_name+'\"\n'
' (By default '+g_program_name+' attempts to infer atom type names from\n'
' comments which appear in the \"Masses\" section of your data file.)\n'
' You can avoid this error by adding the \"-ignore-comments\" argument.\n')
atomtypes_name2int[atomtype_name] = atomtype
atomtypes_int2name[atomtype] = atomtype_name
else:
atomtypes_int2name[atomtype] = 'type'+str(atomtype)
elif (line.strip() == 'Velocities'):
sys.stderr.write(' reading \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
atomid = Intify(tokens[0])
atomtype = None
if atomid in atomid2type:
atomtype = atomid2type[atomid]
moldid = None
if atomid in atomid2mol:
molid = atomid2mol[atomid]
if (BelongsToSel(atomid, atomid_selection) and
BelongsToSel(atomtype, atomtype_selection) and
BelongsToSel(molid, molid_selection)):
tokens[0] = '$atom:id'+tokens[0]
#tokens[0] = '$atom:'+atomids_int2name[atomid]
#NOTE:I can't use "atomids_int2name" yet because
# they probably have not been defined yet.
# (Instead assign these names in a later pass.)
l_data_velocities.append((' '*indent)+(' '.join(tokens)+'\n'))
# non-point-like-particles:
elif (line.strip() == 'Ellipsoids'):
sys.stderr.write(' reading \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
atomid = Intify(tokens[0])
atomtype = None
if atomid in atomid2type:
atomtype = atomid2type[atomid]
moldid = None
if atomid in atomid2mol:
molid = atomid2mol[atomid]
if (BelongsToSel(atomid, atomid_selection) and
BelongsToSel(atomtype, atomtype_selection) and
BelongsToSel(molid, molid_selection)):
tokens[0] = '$atom:id'+tokens[0]
#tokens[0] = '$atom:'+atomids_int2name[atomid]
#NOTE:I can't use "atomids_int2name" yet because
# they probably have not been defined yet.
# (Instead assign these names in a later pass.)
l_data_ellipsoids.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'Lines'):
sys.stderr.write(' reading \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
atomid = Intify(tokens[0])
atomtype = None
if atomid in atomid2type:
atomtype = atomid2type[atomid]
moldid = None
if atomid in atomid2mol:
molid = atomid2mol[atomid]
if (BelongsToSel(atomid, atomid_selection) and
BelongsToSel(atomtype, atomtype_selection) and
BelongsToSel(molid, molid_selection)):
tokens[0] = '$atom:id'+tokens[0]
#tokens[0] = '$atom:'+atomids_int2name[atomid]
#NOTE:I can't use "atomids_int2name" yet because
# they probably have not been defined yet.
# (Instead assign these names in a later pass.)
l_data_lines.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'Triangles'):
sys.stderr.write(' reading \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
atomid = Intify(tokens[0])
atomtype = None
if atomid in atomid2type:
atomtype = atomid2type[atomid]
moldid = None
if atomid in atomid2mol:
molid = atomid2mol[atomid]
if (BelongsToSel(atomid, atomid_selection) and
BelongsToSel(atomtype, atomtype_selection) and
BelongsToSel(molid, molid_selection)):
tokens[0] = '$atom:id'+tokens[0]
#tokens[0] = '$atom:'+atomids_int2name[atomid]
#NOTE:I can't use "atomids_int2name" yet because
# they probably have not been defined yet.
# (Instead assign these names in a later pass.)
l_data_triangles.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'Bonds'):
sys.stderr.write(' reading \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
if (len(tokens) < 4):
raise InputError('Error: near or before '+ErrorLeader(infile, lineno)+'\n'
' Nonsensical line in Bonds section:\n'
' \"'+line.strip()+'\"\n')
#tokens[0] = '$bond:id'+tokens[0]
#tokens[1] = '@bond:type'+tokens[1]
atomids = [None, None]
atomtypes = [None, None]
molids = [None, None]
in_selections = True
some_in_selection = False
for n in range(0,2):
atomids[n] = Intify(tokens[2+n])
if atomids[n] in atomid2type:
atomtypes[n] = atomid2type[atomids[n]]
if atomids[n] in atomid2mol:
molids[n] = atomid2mol[atomids[n]]
if (BelongsToSel(atomids[n], atomid_selection) and
BelongsToSel(atomtypes[n], atomtype_selection) and
BelongsToSel(molids[n], molid_selection)):
#tokens[2+n] = '$atom:id'+tokens[2+n]
#tokens[2+n] = '$atom:'+atomids_int2name[atomids[n]]
some_in_selection = True
else:
in_selections = False
if in_selections:
l_data_bonds.append((' '*indent)+(' '.join(tokens)+'\n'))
elif some_in_selection:
sys.stderr.write('WARNING: SELECTION BREAKS BONDS\n')
sys.stderr.write(' (between atom ids: ')
for n in range(0,2):
sys.stderr.write(str(atomids[n])+' ')
sys.stderr.write(')\n'
' The atoms you selected are bonded\n'
' to other atoms you didn\'t select.\n'
' Are you sure you selected the correct atoms?\n')
no_warnings = False
elif (line.strip() == 'Angles'):
sys.stderr.write(' reading \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line == '':
break
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
if (len(tokens) < 5):
raise InputError('Error: near or before '+ErrorLeader(infile, lineno)+'\n'
' Nonsensical line in Angles section:\n'
' \"'+line.strip()+'\"\n')
#tokens[0] = '$angle:id'+tokens[0]
#tokens[1] = '@angle:type'+tokens[1]
atomids = [None, None, None]
atomtypes = [None, None, None]
molids = [None, None, None]
in_selections = True
some_in_selection = False
for n in range(0,3):
atomids[n] = Intify(tokens[2+n])
if atomids[n] in atomid2type:
atomtypes[n] = atomid2type[atomids[n]]
if atomids[n] in atomid2mol:
molids[n] = atomid2mol[atomids[n]]
if (BelongsToSel(atomids[n], atomid_selection) and
BelongsToSel(atomtypes[n], atomtype_selection) and
BelongsToSel(molids[n], molid_selection)):
#tokens[2+n] = '$atom:id'+tokens[2+n]
#tokens[2+n] = '$atom:'+atomids_int2name[atomids[n]]
some_in_selection = True
else:
in_selections = False
if in_selections:
l_data_angles.append((' '*indent)+(' '.join(tokens)+'\n'))
elif some_in_selection:
sys.stderr.write('WARNING: SELECTION BREAKS ANGLES\n')
sys.stderr.write(' (between atom ids: ')
for n in range(0,3):
sys.stderr.write(str(atomids[n])+' ')
sys.stderr.write(')\n'
' The atoms you selected participate in 3-body \"Angle\"\n'
' interactions with other atoms you didn\'t select.\n'
' (They will be ignored.)\n'
' Are you sure you selected the correct atoms?\n')
no_warnings = False
elif (line.strip() == 'Dihedrals'):
sys.stderr.write(' reading \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
if (len(tokens) < 6):
raise InputError('Error: near or before '+ErrorLeader(infile, lineno)+'\n'
' Nonsensical line in Dihedrals section:\n'
' \"'+line.strip()+'\"\n')
#tokens[0] = '$dihedral:id'+tokens[0]
#tokens[1] = '@dihedral:type'+tokens[1]
atomids = [None, None, None, None]
atomtypes = [None, None, None, None]
molids = [None, None, None, None]
in_selections = True
some_in_selection = False
for n in range(0,4):
atomids[n] = Intify(tokens[2+n])
if atomids[n] in atomid2type:
atomtypes[n] = atomid2type[atomids[n]]
if atomids[n] in atomid2mol:
molids[n] = atomid2mol[atomids[n]]
if (BelongsToSel(atomids[n], atomid_selection) and
BelongsToSel(atomtypes[n], atomtype_selection) and
BelongsToSel(molids[n], molid_selection)):
#tokens[2+n] = '$atom:id'+tokens[2+n]
#tokens[2+n] = '$atom:'+atomids_int2name[atomids[n]]
some_in_selection = True
else:
in_selections = False
if in_selections:
l_data_dihedrals.append((' '*indent)+(' '.join(tokens)+'\n'))
elif some_in_selection:
sys.stderr.write('WARNING: SELECTION BREAKS DIHEDRALS\n')
sys.stderr.write(' (between atom ids: ')
for n in range(0,4):
sys.stderr.write(str(atomids[n])+' ')
sys.stderr.write(')\n'
' The atoms you selected participate in 4-body \"Dihedral\"\n'
' interactions with other atoms you didn\'t select.\n'
' (They will be ignored.)\n'
' Are you sure you selected the correct atoms?\n')
no_warnings = False
elif (line.strip() == 'Impropers'):
sys.stderr.write(' reading \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
if (len(tokens) < 6):
raise InputError('Error: near or before '+ErrorLeader(infile, lineno)+'\n'
' Nonsensical line in Impropers section:\n'
' \"'+line.strip()+'\"\n')
#tokens[0] = '$improper:id'+tokens[0]
#tokens[1] = '@improper:type'+tokens[1]
atomids = [None, None, None, None]
atomtypes = [None, None, None, None]
molids = [None, None, None, None]
in_selections = True
some_in_selection = False
for n in range(0,4):
atomids[n] = Intify(tokens[2+n])
if atomids[n] in atomid2type:
atomtypes[n] = atomid2type[atomids[n]]
if atomids[n] in atomid2mol:
molids[n] = atomid2mol[atomids[n]]
if (BelongsToSel(atomids[n], atomid_selection) and
BelongsToSel(atomtypes[n], atomtype_selection) and
BelongsToSel(molids[n], molid_selection)):
#tokens[2+n] = '$atom:id'+tokens[2+n]
#tokens[2+n] = '$atom:'+atomids_int2name[atomids[n]]
some_in_selection = True
else:
in_selections = False
if in_selections:
l_data_impropers.append((' '*indent)+(' '.join(tokens)+'\n'))
elif some_in_selection:
sys.stderr.write('WARNING: SELECTION BREAKS IMPROPERS\n')
sys.stderr.write(' (between atom ids: ')
for n in range(0,4):
sys.stderr.write(str(atomids[n])+' ')
sys.stderr.write(')\n'
' The atoms you selected participate in 4-body \"Improper\"\n'
' interactions with other atoms you didn\'t select.\n'
' (They will be ignored.)\n'
' Are you sure you selected the correct atoms?\n')
no_warnings = False
elif (line.strip() == 'Bond Coeffs'):
sys.stderr.write(' reading \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
#tokens[0] = '@bond:type'+tokens[0]
l_data_bond_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'Angle Coeffs'):
sys.stderr.write(' reading \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
#tokens[0] = '@angle:type'+tokens[0]
l_data_angle_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'Dihedral Coeffs'):
sys.stderr.write(' reading \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
#tokens[0] = '@dihedral:type'+tokens[0]
l_data_dihedral_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'Improper Coeffs'):
sys.stderr.write(' reading \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
#tokens[0] = '@improper:type'+tokens[0]
l_data_improper_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'Pair Coeffs'):
sys.stderr.write(' reading \"'+line.strip()+'\"\n')
some_pair_coeffs_read = True
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
if (len(tokens) < 2):
raise InputError('Error: near or before '+ErrorLeader(infile, lineno)+'\n'
' Nonsensical line in Pair Coeffs section:\n'
' \"'+line.strip()+'\"\n')
atomtype_i_str = tokens[0]
if '*' in atomtype_i_str:
raise InputError('PROBLEM near or before '+ErrorLeader(infile, lineno)+'\n'
' As of 2015-8, moltemplate forbids use of the "\*\" wildcard\n'
' character in the \"Pair Coeffs\" section.\n')
else:
i = int(atomtype_i_str)
if ((not i) or
BelongsToSel(i, atomtype_selection)):
i_str = '@atom:type'+str(i)
tokens[0] = i_str
l_data_pair_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'PairIJ Coeffs'):
sys.stderr.write(' reading \"'+line.strip()+'\"\n')
some_pair_coeffs_read = True
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
if (len(tokens) < 2):
raise InputError('Error: near or before '+ErrorLeader(infile, lineno)+'\n'
' Nonsensical line in Pair Coeffs section:\n'
' \"'+line.strip()+'\"\n')
atomtype_i_str = tokens[0]
atomtype_j_str = tokens[1]
if (('*' in atomtype_i_str) or ('*' in atomtype_j_str)):
raise InputError('PROBLEM near or before '+ErrorLeader(infile, lineno)+'\n'
' As of 2015-8, moltemplate forbids use of the "\*\" wildcard\n'
' character in the \"PairIJ Coeffs\" section.\n')
else:
i = int(atomtype_i_str)
j = int(atomtype_j_str)
if (((not i) or BelongsToSel(i, atomtype_selection)) and
((not j) or BelongsToSel(j, atomtype_selection))):
i_str = '@atom:type'+str(i)
j_str = '@atom:type'+str(j)
tokens[0] = i_str
tokens[1] = j_str
l_data_pair_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (tokens[0] == 'pair_coeff'):
some_pair_coeffs_read = True
if (len(tokens) < 3):
raise InputError('Error: near or before '+ErrorLeader(infile, lineno)+'\n'
' Nonsensical pair_coeff command:\n'
' \"'+line.strip()+'\"\n')
l_in_pair_coeffs.append(' '*indent+line.strip())
elif (tokens[0] == 'mass'):
some_pair_coeffs_read = True
if (len(tokens) < 3):
raise InputError('Error: near or before '+ErrorLeader(infile, lineno)+'\n'
' Nonsensical \"mass\" command:\n'
' \"'+line.strip()+'\"\n')
l_in_masses.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (tokens[0] == 'bond_coeff'):
if (len(tokens) < 2):
raise InputError('Error: near or before '+ErrorLeader(infile, lineno)+'\n'
' Nonsensical bond_coeff command:\n'
' \"'+line.strip()+'\"\n')
#tokens[1] = '@bond:type'+tokens[1]
l_in_bond_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (tokens[0] == 'angle_coeff'):
if (len(tokens) < 2):
raise InputError('Error: near or before '+ErrorLeader(infile, lineno)+'\n'
' Nonsensical angle_coeff command:\n'
' \"'+line.strip()+'\"\n')
#tokens[1] = '@angle:type'+tokens[1]
l_in_angle_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (tokens[0] == 'dihedral_coeff'):
if (len(tokens) < 2):
raise InputError('Error: near or before '+ErrorLeader(infile, lineno)+'\n'
' Nonsensical dihedral_coeff command:\n'
' \"'+line.strip()+'\"\n')
#tokens[1] = '@dihedral:type'+tokens[1]
l_in_dihedral_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (tokens[0] == 'improper_coeff'):
if (len(tokens) < 2):
raise InputError('Error: near or before '+ErrorLeader(infile, lineno)+'\n'
' Nonsensical improper_coeff command:\n'
' \"'+line.strip()+'\"\n')
#tokens[1] = '@improper:type'+tokens[1]
l_in_improper_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
# -- class2 force fields --
elif (line.strip() == 'BondBond Coeffs'):
sys.stderr.write(' reading \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
#tokens[0] = '@angle:type'+tokens[0]
l_data_bondbond_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'BondAngle Coeffs'):
sys.stderr.write(' reading \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
#tokens[0] = '@angle:type'+tokens[0]
l_data_bondangle_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'MiddleBondTorsion Coeffs'):
sys.stderr.write(' reading \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
#tokens[0] = '@dihedral:type'+tokens[0]
l_data_middlebondtorsion_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'EndBondTorsion Coeffs'):
sys.stderr.write(' reading \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
#tokens[0] = '@dihedral:type'+tokens[0]
l_data_endbondtorsion_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'AngleTorsion Coeffs'):
sys.stderr.write(' reading \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
#tokens[0] = '@dihedral:type'+tokens[0]
l_data_angletorsion_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'AngleAngleTorsion Coeffs'):
sys.stderr.write(' reading \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
#tokens[0] = '@dihedral:type'+tokens[0]
l_data_angleangletorsion_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'BondBond13 Coeffs'):
sys.stderr.write(' reading \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
#tokens[0] = '@dihedral:type'+tokens[0]
l_data_bondbond13_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'AngleAngle Coeffs'):
sys.stderr.write(' reading \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
#tokens[0] = '@improper:type'+tokens[0]
l_data_angleangle_coeffs.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'Angles By Type'):
sys.stderr.write(' reading \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
tokens[0] = '@angle:type'+tokens[0]
l_data_angles_by_type.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'Dihedrals By Type'):
sys.stderr.write(' reading \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
tokens[0] = '@dihedral:type'+tokens[0]
l_data_dihedrals_by_type.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (line.strip() == 'Impropers By Type'):
sys.stderr.write(' reading \"'+line.strip()+'\"\n')
while lex:
line = lex.ReadLine()
if line.strip() in data_file_header_names:
lex.push_raw_text(line) # <- Save line for later
break
tokens = line.strip().split()
if len(tokens) > 0:
tokens[0] = '@improper:type'+tokens[0]
l_data_impropers_by_type.append((' '*indent)+(' '.join(tokens)+'\n'))
# Figure out the size of the simulation box boundary:
elif ((len(tokens)==4) and
(tokens[2] == 'xlo') and
(tokens[3] == 'xhi') and
IsNumber(tokens[0]) and
IsNumber(tokens[1])):
boundary_xlo = float(tokens[0])
boundary_xhi = float(tokens[1])
elif ((len(tokens)==4) and
(tokens[2] == 'ylo') and
(tokens[3] == 'yhi') and
IsNumber(tokens[0]) and
IsNumber(tokens[1])):
boundary_ylo = float(tokens[0])
boundary_yhi = float(tokens[1])
elif ((len(tokens)==4) and
(tokens[2] == 'zlo') and
(tokens[3] == 'zhi') and
IsNumber(tokens[0]) and
IsNumber(tokens[1])):
boundary_zlo = float(tokens[0])
boundary_zhi = float(tokens[1])
elif ((len(tokens)==6) and
(tokens[3] == 'xy') and
(tokens[4] == 'xz') and
(tokens[5] == 'yz') and
IsNumber(tokens[0]) and
IsNumber(tokens[1]) and
IsNumber(tokens[2])):
boundary_xy = float(tokens[0])
boundary_xz = float(tokens[1])
boundary_yz = float(tokens[2])
elif (tokens[0] == 'group'):
if (len(tokens) < 3):
raise InputError('Error: near or before '+ErrorLeader(infile, lineno)+'\n'
' Nonsensical group command:\n'
' \"'+line.strip()+'\"\n')
l_in_group.append((' '*indent)+(' '.join(tokens)+'\n'))
elif ((tokens[0] == 'fix') and (len(tokens) >= 4)):
if (tokens[3].find('rigid') == 0):
if (len(tokens) < 6):
raise InputError('Error: near or before '+ErrorLeader(infile, lineno)+'\n'
' Nonsensical '+tokens[0]+' '+tokens[3]+' command:\n'
' \"'+line.strip()+'\"\n')
l_in_fix_rigid.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (tokens[3].find('shake') == 0):
if (len(tokens) < 7):
raise InputError('Error: near or before '+ErrorLeader(infile, lineno)+'\n'
' Nonsensical '+tokens[0]+' '+tokens[3]+' command:\n'
' \"'+line.strip()+'\"\n')
l_in_fix_shake.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (tokens[3].find('poems') == 0):
if (len(tokens) < 4):
raise InputError('Error: near or before '+ErrorLeader(infile, lineno)+'\n'
' Nonsensical '+tokens[0]+' '+tokens[3]+' command:\n'
' \"'+line.strip()+'\"\n')
l_in_fix_poems.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (tokens[3].find('qeq') == 0):
if (len(tokens) < 8):
raise InputError('Error: near or before '+ErrorLeader(infile, lineno)+'\n'
' Nonsensical '+tokens[0]+' '+tokens[3]+' command:\n'
' \"'+line.strip()+'\"\n')
l_in_fix_qeq.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (tokens[3].find('qmmm') == 0):
if (len(tokens) < 8):
raise InputError('Error: near or before '+ErrorLeader(infile, lineno)+'\n'
' Nonsensical '+tokens[0]+' '+tokens[3]+' command:\n'
' \"'+line.strip()+'\"\n')
l_in_fix_qmmm.append((' '*indent)+(' '.join(tokens)+'\n'))
elif (tokens[3].find('restrain') == 0):
sys.stderr('WARNING: fix \"'+tokens[3]+'\" commands are NOT understood by '+g_program_name+'.\n'
' If you need restraints, add them to your final .LT file (eg. \"system.lt\"),\n'
' (And be sure to use unique (full, long) moltemplate names for each $atom:.)\n'
' Ignoring line \"'+line.strip()+'\"\n')
else:
sys.stderr.write(' Ignoring line \"'+line.strip()+'\"\n')
sys.stderr.write('\n\n')
sys.stderr.write(' processing \"Atoms\" section (')
# post-processing:
if len(l_data_masses) == 0:
infer_types_from_comments = False
# Pass 1 through l_data_atoms:
# Now do a second-pass throught the "l_data_atoms" section, and
# finish dealing with "infer_types_from_comments".
# During this pass, peplace the atomtype names and atomid names with
# atom type names which were inferred from comments read earlier.
sys.stderr.write('pass1')
for i in range(0, len(l_data_atoms)):
tokens = l_data_atoms[i].split()
atomid = tokens[i_atomid]
if atomid.find('$atom:') == 0:
atomid = atomid[6:]
# convert to an integer
atomid = Intify(atomid)
if infer_types_from_comments:
atomtype = tokens[i_atomtype]
# remove the "@atom:" prefix (we will put it back later)
if atomtype.find('@atom:') == 0:
atomtype = atomtype[6:]
# convert to an integer
atomtype = Intify(atomtype)
atomtype_name = atomtypes_int2name[atomtype]
if atomtype in atomids_by_type:
l_atomids = atomids_by_type[atomtype]
prev_count = len(l_atomids)
# lookup the most recently added atom of this type:
#prev_atomid_name = l_atomids[-1]
#ic = prev_atomid_name.rfind('_')
#prev_count = int(prev_atomid_name[ic+1:])
atomid_name = atomtype_name+'_'+str(prev_count+1)
atomids_by_type[atomtype].append(atomid)
else:
atomids_by_type[atomtype] = [atomid]
atomid_name = atomtype_name+'_1'
atomids_int2name[atomid] = atomid_name
#atomids_name2str[atomid_name] = atomid
else:
atomids_int2name[atomid] = 'id'+str(atomid)
sys.stderr.write(', pass2')
# Pass 2: If any atom types only appear once, simplify their atomid names.
for i in range(0, len(l_data_atoms)):
tokens = l_data_atoms[i].split()
# remove the "@atom:" prefix (we will put it back later)
atomtype = tokens[i_atomtype]
if atomtype.find('@atom:') == 0:
atomtype = atomtype[6:]
atomtype = Intify(atomtype)
if infer_types_from_comments:
if len(atomids_by_type[atomtype]) == 1:
atomid = tokens[i_atomid]
if atomid.find('$atom:') == 0:
atomid = atomid[6:]
atomid = Intify(atomid)
atomtype_name = atomtypes_int2name[atomtype]
atomids_int2name[atomid] = atomtype_name
sys.stderr.write(', pass3')
# Pass 3: substitute the atomid names and atom type names into l_data_atoms
for i in range(0, len(l_data_atoms)):
tokens = l_data_atoms[i].split()
atomid = tokens[i_atomid]
if atomid.find('$atom:') == 0:
atomid = atomid[6:]
# convert to an integer
atomid = Intify(atomid)
atomtype = tokens[i_atomtype]
if atomtype.find('@atom:') == 0:
atomtype = atomtype[6:]
atomtype = Intify(atomtype)
tokens = l_data_atoms[i].split()
tokens[i_atomid] = '$atom:'+atomids_int2name[atomid]
tokens[i_atomtype] = '@atom:'+atomtypes_int2name[atomtype]
l_data_atoms[i] = (' '*indent)+(' '.join(tokens)+'\n')
sys.stderr.write(')\n')
if len(l_data_atoms) == 0:
raise InputError('Error('+g_program_name+'): You have no atoms in you selection!\n'
'\n'
' Either you have chosen a set of atoms, molecules, or atom types which\n'
' does not exist, or there is a problem with (the format of) your\n'
' arguments. Check the documentation and examples.\n')
# --- Now delete items that were not selected from the other lists ---
# --- MASSES ---
# delete masses for atom types we don't care about anymore:
i_line = 0
while i_line < len(l_data_masses):
line = l_data_masses[i_line]
tokens = line.strip().split()
atomtype = Intify(tokens[0])
if ((not (atomtype in needed_atomtypes)) and
(not ((len(atomtype_selection) > 0) and
BelongsToSel(atomtype, atomtype_selection)))):
del l_data_masses[i_line]
else:
atomtype_name = atomtypes_int2name[atomtype]
tokens[0] = '@atom:'+atomtype_name
l_data_masses[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
# --- PAIR COEFFS ---
# delete data_pair_coeffs for atom types we don't care about anymore:
i_line = 0
while i_line < len(l_data_pair_coeffs):
line = l_data_pair_coeffs[i_line]
tokens = line.strip().split()
assert(len(tokens) > 0)
split_colon = tokens[0].split(':')
assert(len(split_colon) == 2)
atomtype = Intify(split_colon[1])
if ((not (atomtype in needed_atomtypes)) and
(not ((len(atomtype_selection) > 0) and
BelongsToSel(atomtype, atomtype_selection)))):
del l_data_pair_coeffs[i_line]
else:
i_line += 1
# delete data_pairij_coeffs for atom types we don't care about anymore:
i_line = 0
while i_line < len(l_data_pairij_coeffs):
line = l_data_pairij_coeffs[i_line]
tokens = line.strip().split()
assert(len(tokens) > 0)
split_colon_I = tokens[0].split(':')
assert(len(split_colon_I) == 2)
atomtype_I = Intify(split_colon_I[1])
split_colon_J = tokens[1].split(':')
assert(len(split_colon_J) == 2)
atomtype_J = Intify(split_colon_J[1])
if (((not (atomtype_I in needed_atomtypes)) and
(not ((len(atomtype_selection) > 0) and
BelongsToSel(atomtype_I, atomtype_selection))))
or
((not (atomtype_J in needed_atomtypes)) and
(not ((len(atomtype_selection) > 0) and
BelongsToSel(atomtype_J, atomtype_selection))))):
del l_data_pairij_coeffs[i_line]
else:
i_line += 1
# delete in_pair_coeffs for atom we don't care about anymore:
i_line = 0
while i_line < len(l_in_pair_coeffs):
line = l_in_pair_coeffs[i_line]
tokens = line.strip().split()
atomtype_i_str = tokens[1]
atomtype_j_str = tokens[2]
#if (('*' in atomtype_i_str) or
# ('*' in atomtype_j_str)):
# sys.stderr.write('WARNING: near or before '+ErrorLeader(infile, lineno)+'\n'
# ' pair_coeff command contains a \"*\" character.\n'
# ' Keep in mind that using moltemplate.sh you can manually change the\n'
# ' numbers assigned to each atom type (when using -a or -b). Make sure\n'
# ' nor to accidentally change the order of atom types in one of these\n'
# ' pair_coeff commands. For example, commands like\n'
# ' pair_coeff 10*4 20*10 0.15 3.6\n'
# ' can be generated by moltemplate.sh, however\n'
# ' they may be rejected by LAMMPS (because LAMMPS prefers this\n'
# ' pair_coeff 4*10 10*20 0.15 3.6)\n'
# ' Later on, you may want to check to make sure moltemplate.sh\n'
# ' is not doing this. (Fortunately you never have to worry unless\n'
# ' you are using the -a or -b arguments with moltemplate.sh)\n')
if ('*' in atomtype_i_str):
atomtype_i_tokens = atomtype_i_str.split('*')
if atomtype_i_tokens[0] == '':
if (min_sel_atomtype and
(min_sel_atomtype < min_needed_atomtype)):
i_a = min_sel_atomtype
else:
i_a = min_needed_atomtype
else:
i_a = Intify(atomtype_i_tokens[0])
if atomtype_i_tokens[1] == '':
if (max_sel_atomtype and
(max_sel_atomtype > max_needed_atomtype)):
i_b = max_sel_atomtype
else:
i_b = max_needed_atomtype
else:
i_b = Intify(atomtype_i_tokens[1])
else:
i_a = i_b = Intify(atomtype_i_str)
i_a_final = None
i_b_final = None
for i in range(i_a, i_b+1):
if ((i in needed_atomtypes) or (min_sel_atomtype <= i)):
i_a_final = i
break
for i in reversed(range(i_a, i_b+1)):
if ((i in needed_atomtypes) or (max_sel_atomtype >= i)):
i_b_final = i
break
#if i_a_final and i_b_final:
# if i_a_final == i_b_final:
# i_str = '@atom:type'+str(i_a_final)
# tokens[1] = i_str
# else:
# i_str = '@{atom:type'+str(i_a_final)+'}*@{atom:type'+str(i_b_final)+'}'
if ('*' in atomtype_j_str):
atomtype_j_tokens = atomtype_j_str.split('*')
if atomtype_j_tokens[0] == '':
if (min_sel_atomtype and
(min_sel_atomtype < min_needed_atomtype)):
j_a = min_sel_atomtype
else:
j_a = min_needed_atomtype
else:
j_a = Intify(atomtype_j_tokens[0])
if atomtype_j_tokens[1] == '':
if (max_sel_atomtype and
(max_sel_atomtype > max_needed_atomtype)):
j_b = max_sel_atomtype
else:
j_b = max_needed_atomtype
else:
j_b = Intify(atomtype_j_tokens[1])
else:
j_a = j_b = Intify(atomtype_j_str)
j_a_final = None
j_b_final = None
for j in range(j_a, j_b+1):
if ((j in needed_atomtypes) or (min_sel_atomtype <= j)):
j_a_final = j
break
for j in reversed(range(j_a, j_b+1)):
if ((j in needed_atomtypes) or (max_sel_atomtype >= j)):
j_b_final = j
break
#if j_a_final and j_b_final:
# if j_a_final == j_b_final:
# j_str = '@atom:type'+str(j_a_final)
# tokens[1] = j_str
# else:
# j_str = '@{atom:type'+str(j_a_final)+'}*@{atom:type'+str(j_b_final)+'}'
if not (i_a_final and i_b_final and j_a_final and j_b_final):
del l_in_pair_coeffs[i_line]
elif (('*' in atomtype_i_str) or ('*' in atomtype_j_str)):
del l_in_pair_coeffs[i_line]
for i in range(i_a_final, i_b_final+1):
for j in range(j_a_final, j_b_final+1):
if j >= i:
#tokens[1] = '@atom:type'+str(i)
#tokens[2] = '@atom:type'+str(j)
tokens[1] = '@atom:'+atomtypes_int2name[i]
tokens[2] = '@atom:'+atomtypes_int2name[j]
l_in_pair_coeffs.insert(i_line,
(' '*indent)+(' '.join(tokens)+'\n'))
i_line += 1
else:
#tokens[1] = '@atom:type'+tokens[1]
#tokens[2] = '@atom:type'+tokens[2]
tokens[1] = '@atom:'+atomtypes_int2name[int(tokens[1])]
tokens[2] = '@atom:'+atomtypes_int2name[int(tokens[2])]
l_in_pair_coeffs[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
# delete mass commands for atom types we don't care about anymore:
i_line = 0
while i_line < len(l_in_masses):
line = l_in_masses[i_line]
tokens = line.strip().split()
atomtype_i_str = tokens[1]
#if (('*' in atomtype_i_str) or
# ('*' in atomtype_j_str)):
# sys.stderr.write('WARNING: near or before '+ErrorLeader(infile, lineno)+'\n'
# ' pair_coeff command contains a \"*\" character.\n'
# ' Keep in mind that using moltemplate.sh you can manually change the\n'
# ' numbers assigned to each atom type (when using -a or -b). Make sure\n'
# ' nor to accidentally change the order of atom types in one of these\n'
# ' pair_coeff commands. For example, commands like\n'
# ' pair_coeff 10*4 20*10 0.15 3.6\n'
# ' can be generated by moltemplate.sh, however\n'
# ' they may be rejected by LAMMPS (because LAMMPS prefers this\n'
# ' pair_coeff 4*10 10*20 0.15 3.6)\n'
# ' Later on, you may want to check to make sure moltemplate.sh\n'
# ' is not doing this. (Fortunately you never have to worry unless\n'
# ' you are using the -a or -b arguments with moltemplate.sh)\n')
if ('*' in atomtype_i_str):
atomtype_i_tokens = atomtype_i_str.split('*')
if atomtype_i_tokens[0] == '':
if (min_sel_atomtype and
(min_sel_atomtype < min_needed_atomtype)):
i_a = min_sel_atomtype
else:
i_a = min_needed_atomtype
else:
i_a = Intify(atomtype_i_tokens[0])
if atomtype_i_tokens[1] == '':
if (max_sel_atomtype and
(max_sel_atomtype > max_needed_atomtype)):
i_b = max_sel_atomtype
else:
i_b = max_needed_atomtype
else:
i_b = Intify(atomtype_i_tokens[1])
else:
i_a = i_b = Intify(atomtype_i_str)
i_a_final = None
i_b_final = None
for i in range(i_a, i_b+1):
if ((i in needed_atomtypes) or (min_sel_atomtype <= i)):
i_a_final = i
break
for i in reversed(range(i_a, i_b+1)):
if ((i in needed_atomtypes) or (max_sel_atomtype >= i)):
i_b_final = i
break
#if i_a_final and i_b_final:
# if i_a_final == i_b_final:
# i_str = '@atom:type'+str(i_a_final)
# tokens[1] = i_str
# else:
# i_str = '@{atom:type'+str(i_a_final)+'}*@{atom:type'+str(i_b_final)+'}'
if not (i_a_final and i_b_final and j_a_final and j_b_final):
del l_in_masses[i_line]
elif ('*' in atomtype_i_str):
del l_in_masses[i_line]
for i in range(i_a_final, i_b_final+1):
#tokens[1] = '@atom:type'+str(i)
tokens[1] = '@atom:'+atomtypes_int2name[i]
# CONTINUEHERE: CHECK THAT THIS IS WORKING
l_in_masses.insert(i_line, (' '*indent)+(' '.join(tokens)+'\n'))
i_line += 1
else:
assert(i_a == i_b)
#tokens[1] = '@atom:type'+str(i_a)
tokens[1] = '@atom:'+atomtypes_int2name[i_a]
# CONTINUEHERE: CHECK THAT THIS IS WORKING
l_in_masses[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
# --- BONDS AND BOND COEFFS ---
# delete lines from data_bonds if they involve atoms we don't care about
i_line = 0
while i_line < len(l_data_bonds):
line = l_data_bonds[i_line]
tokens = line.strip().split()
assert(len(tokens) == 4)
bondid = Intify(tokens[0])
bondtype = Intify(tokens[1])
atomid1 = Intify(tokens[2])
atomid2 = Intify(tokens[3])
#if ((atomid1 in needed_atomids) and
# (atomid2 in needed_atomids)):
tokens[0] = '$bond:id'+str(bondid)
tokens[1] = '@bond:type'+str(bondtype)
#tokens[2] = '$atom:id'+str(atomid1)
#tokens[3] = '$atom:id'+str(atomid2)
tokens[2] = '$atom:'+atomids_int2name[atomid1]
tokens[3] = '$atom:'+atomids_int2name[atomid2]
needed_bondids.add(bondid)
needed_bondtypes.add(bondtype)
l_data_bonds[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
#else:
# del l_data_bonds[i_line]
# delete data_bond_coeffs for bondtypes we don't care about anymore:
i_line = 0
while i_line < len(l_data_bond_coeffs):
line = l_data_bond_coeffs[i_line]
tokens = line.strip().split()
bondtype = Intify(tokens[0])
if (not (bondtype in needed_bondtypes)):
del l_data_bond_coeffs[i_line]
else:
tokens[0] = '@bond:type'+str(bondtype)
l_data_bond_coeffs[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
# delete in_bond_coeffs for bondtypes we don't care about anymore:
for bondtype in needed_bondtypes:
assert(type(bondtype) is int)
if ((min_needed_bondtype == None) or
(min_needed_bondtype > bondtype)):
min_needed_bondtype = bondtype
if ((max_needed_bondtype == None) or
(max_needed_bondtype < bondtype)):
max_needed_bondtype = bondtype
for bondid in needed_bondids:
assert(type(bondid) is int)
if ((min_needed_bondid == None) or
(min_needed_bondid > bondid)):
min_needed_bondid = bondid
if ((max_needed_bondid == None) or
(max_needed_bondid < bondid)):
max_needed_bondid = bondid
i_line = 0
while i_line < len(l_in_bond_coeffs):
line = l_in_bond_coeffs[i_line]
tokens = line.strip().split()
bondtype_str = tokens[1]
if ('*' in bondtype_str):
bondtype_tokens = bondtype_str.split('*')
if bondtype_tokens[0] == '':
i_a = min_needed_bondtype
else:
i_a = Intify(bondtype_tokens[0])
if bondtype_tokens[1] == '':
i_b = max_needed_bondtype
else:
i_b = Intify(bondtype_tokens[1])
else:
i_a = Intify(bondtype_str)
i_b = i_a
if i_a < min_needed_bondtype:
i_a = min_needed_bondtype
if i_b > max_needed_bondtype:
i_b = max_needed_bondtype
#if i_a == i_b:
# i_str = '@bond:type'+str(i_a)
# tokens[1] = i_str
#else:
# i_str = '@{bond:type'+str(j_a)+'}*@{bond:type'+str(j_b)+'}'
if ('*' in bondtype_str):
del l_in_bond_coeffs[i_line]
for i in range(i_a, i_b+1):
if (i in needed_bondtypes):
tokens[1] = '@bond:type'+str(i)
l_in_bond_coeffs.insert(i_line,
(' '*indent)+(' '.join(tokens)+'\n'))
i_line += 1
else:
if i_a < i_b:
raise InputError('Error: number of bond types in data file is not consistent with the\n'
' number of bond types you have define bond_coeffs for.\n')
if (i_a == i_b) and (i_a in needed_bondtypes):
tokens[1] = '@bond:type'+str(i_a)
l_in_bond_coeffs[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
else:
del l_in_bond_coeffs[i_line]
# --- ANGLES AND ANGLE COEFFS ---
# delete lines from data_angles if they involve atoms we don't care about
i_line = 0
while i_line < len(l_data_angles):
line = l_data_angles[i_line]
tokens = line.strip().split()
assert(len(tokens) == 5)
angleid = Intify(tokens[0])
angletype = Intify(tokens[1])
atomid1 = Intify(tokens[2])
atomid2 = Intify(tokens[3])
atomid3 = Intify(tokens[4])
#if ((atomid1 in needed_atomids) and
# (atomid2 in needed_atomids)):
tokens[0] = '$angle:id'+str(angleid)
tokens[1] = '@angle:type'+str(angletype)
#tokens[2] = '$atom:id'+str(atomid1)
#tokens[3] = '$atom:id'+str(atomid2)
#tokens[4] = '$atom:id'+str(atomid3)
tokens[2] = '$atom:'+atomids_int2name[atomid1]
tokens[3] = '$atom:'+atomids_int2name[atomid2]
tokens[4] = '$atom:'+atomids_int2name[atomid3]
needed_angleids.add(angleid)
needed_angletypes.add(angletype)
l_data_angles[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
#else:
# del l_data_angles[i_line]
# delete data_angle_coeffs for angletypes we don't care about anymore:
i_line = 0
while i_line < len(l_data_angle_coeffs):
line = l_data_angle_coeffs[i_line]
tokens = line.strip().split()
angletype = Intify(tokens[0])
if (not (angletype in needed_angletypes)):
del l_data_angle_coeffs[i_line]
else:
tokens[0] = '@angle:type'+str(angletype)
l_data_angle_coeffs[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
# --- class2specific ----
# Do the same for BondBond and BondAngle Coeffs:
# NOTE: LAMMPS INPUT SCRIPTS, ALL CLASS2 COEFFS are represented by:
# angle_coeff, dihedral_coeff, and improper_coeff commands.
# THERE ARE NO bondbond_coeff commands, or bondangle_coeff commands,
# etc..., so we dont have to worry about l_in_bondbond_coeffs,...
# delete data_bondbond_coeffs for angletypes we don't care about anymore:
i_line = 0
while i_line < len(l_data_bondbond_coeffs):
line = l_data_bondbond_coeffs[i_line]
tokens = line.strip().split()
angletype = Intify(tokens[0])
if (not (angletype in needed_angletypes)):
del l_data_bondbond_coeffs[i_line]
else:
tokens[0] = '@angle:type'+str(angletype)
l_data_bondbond_coeffs[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
# delete data_bondangle_coeffs for angletypes we don't care about anymore:
i_line = 0
while i_line < len(l_data_bondangle_coeffs):
line = l_data_bondangle_coeffs[i_line]
tokens = line.strip().split()
angletype = Intify(tokens[0])
if (not (angletype in needed_angletypes)):
del l_data_bondangle_coeffs[i_line]
else:
tokens[0] = '@angle:type'+str(angletype)
l_data_bondangle_coeffs[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
# --- end of class2specific ----
# delete in_angle_coeffs for angletypes we don't care about anymore:
for angletype in needed_angletypes:
assert(type(angletype) is int)
if ((min_needed_angletype == None) or
(min_needed_angletype > angletype)):
min_needed_angletype = angletype
if ((max_needed_angletype == None) or
(max_needed_angletype < angletype)):
max_needed_angletype = angletype
for angleid in needed_angleids:
assert(type(angleid) is int)
if ((min_needed_angleid == None) or
(min_needed_angleid > angleid)):
min_needed_angleid = angleid
if ((max_needed_angleid == None) or
(max_needed_angleid < angleid)):
max_needed_angleid = angleid
i_line = 0
while i_line < len(l_in_angle_coeffs):
line = l_in_angle_coeffs[i_line]
tokens = line.strip().split()
angletype_str = tokens[1]
if ('*' in angletype_str):
angletype_tokens = angletype_str.split('*')
if angletype_tokens[0] == '':
i_a = min_needed_angletype
else:
i_a = Intify(angletype_tokens[0])
if angletype_tokens[1] == '':
i_b = max_needed_angletype
else:
i_b = Intify(angletype_tokens[1])
else:
i_a = i_b = Intify(angletype_str)
if i_a < min_needed_angletype:
i_a = min_needed_angletype
if i_b > max_needed_angletype:
i_b = max_needed_angletype
#if i_a == i_b:
# i_str = '@angle:type'+str(i_a)
# tokens[1] = i_str
#else:
# i_str = '@{angle:type'+str(j_a)+'}*@{angle:type'+str(j_b)+'}'
if ('*' in angletype_str):
del l_in_angle_coeffs[i_line]
for i in range(i_a, i_b+1):
if (i in needed_angletypes):
tokens[1] = '@angle:type'+str(i)
l_in_angle_coeffs.insert(i_line,
(' '*indent)+(' '.join(tokens)+'\n'))
i_line += 1
else:
if i_a < i_b:
raise InputError('Error: number of angle types in data file is not consistent with the\n'
' number of angle types you have define angle_coeffs for.\n')
if (i_a == i_b) and (i_a in needed_angletypes):
tokens[1] = '@angle:type'+str(i_a)
l_in_angle_coeffs[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
else:
del l_in_angle_coeffs[i_line]
# --- DIHEDRALS AND DIHEDRAL COEFFS ---
# delete lines from data_dihedrals if they involve atoms we don't care about
i_line = 0
while i_line < len(l_data_dihedrals):
line = l_data_dihedrals[i_line]
tokens = line.strip().split()
assert(len(tokens) == 6)
dihedralid = Intify(tokens[0])
dihedraltype = Intify(tokens[1])
atomid1 = Intify(tokens[2])
atomid2 = Intify(tokens[3])
atomid3 = Intify(tokens[4])
atomid4 = Intify(tokens[5])
#if ((atomid1 in needed_atomids) and
# (atomid2 in needed_atomids)):
tokens[0] = '$dihedral:id'+str(dihedralid)
tokens[1] = '@dihedral:type'+str(dihedraltype)
#tokens[2] = '$atom:id'+str(atomid1)
#tokens[3] = '$atom:id'+str(atomid2)
#tokens[4] = '$atom:id'+str(atomid3)
#tokens[5] = '$atom:id'+str(atomid4)
tokens[2] = '$atom:'+atomids_int2name[atomid1]
tokens[3] = '$atom:'+atomids_int2name[atomid2]
tokens[4] = '$atom:'+atomids_int2name[atomid3]
tokens[5] = '$atom:'+atomids_int2name[atomid4]
needed_dihedralids.add(dihedralid)
needed_dihedraltypes.add(dihedraltype)
l_data_dihedrals[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
#else:
# del l_data_dihedrals[i_line]
# delete data_dihedral_coeffs for dihedraltypes we don't care about anymore:
i_line = 0
while i_line < len(l_data_dihedral_coeffs):
line = l_data_dihedral_coeffs[i_line]
tokens = line.strip().split()
dihedraltype = Intify(tokens[0])
if (not (dihedraltype in needed_dihedraltypes)):
del l_data_dihedral_coeffs[i_line]
else:
tokens[0] = '@dihedral:type'+str(dihedraltype)
l_data_dihedral_coeffs[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
# --- class2specific ----
# Do the same for MiddleBondTorsion, EndBondTorsion, AngleTorsion,
# AngleAngleTorsion, and BondBond13 Coeffs
# NOTE: LAMMPS INPUT SCRIPTS, ALL CLASS2 COEFFS are represented by:
# angle_coeff, dihedral_coeff, and improper_coeff commands.
# THERE ARE NO "middlebondtorsion_coeff" commands, etc...so we don't
# have to worry about dealing with "l_in_middlebondtorsion_coeffs",...
# delete data_middlebondtorsion_coeffs for dihedraltypes we don't care about anymore:
i_line = 0
while i_line < len(l_data_middlebondtorsion_coeffs):
line = l_data_middlebondtorsion_coeffs[i_line]
tokens = line.strip().split()
dihedraltype = Intify(tokens[0])
if (not (dihedraltype in needed_dihedraltypes)):
del l_data_middlebondtorsion_coeffs[i_line]
else:
tokens[0] = '@dihedral:type'+str(dihedraltype)
l_data_middlebondtorsion_coeffs[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
# delete data_endbondtorsion_coeffs for dihedraltypes we don't care about anymore:
i_line = 0
while i_line < len(l_data_endbondtorsion_coeffs):
line = l_data_endbondtorsion_coeffs[i_line]
tokens = line.strip().split()
dihedraltype = Intify(tokens[0])
if (not (dihedraltype in needed_dihedraltypes)):
del l_data_endbondtorsion_coeffs[i_line]
else:
tokens[0] = '@dihedral:type'+str(dihedraltype)
l_data_endbondtorsion_coeffs[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
# delete data_angletorsion_coeffs for dihedraltypes we don't care about anymore:
i_line = 0
while i_line < len(l_data_angletorsion_coeffs):
line = l_data_angletorsion_coeffs[i_line]
tokens = line.strip().split()
dihedraltype = Intify(tokens[0])
if (not (dihedraltype in needed_dihedraltypes)):
del l_data_angletorsion_coeffs[i_line]
else:
tokens[0] = '@dihedral:type'+str(dihedraltype)
l_data_angletorsion_coeffs[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
# delete data_angleangletorsion_coeffs for dihedraltypes we don't care about anymore:
i_line = 0
while i_line < len(l_data_angleangletorsion_coeffs):
line = l_data_angleangletorsion_coeffs[i_line]
tokens = line.strip().split()
dihedraltype = Intify(tokens[0])
if (not (dihedraltype in needed_dihedraltypes)):
del l_data_angleangletorsion_coeffs[i_line]
else:
tokens[0] = '@dihedral:type'+str(dihedraltype)
l_data_angleangletorsion_coeffs[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
# delete data_bondbond13_coeffs for dihedraltypes we don't care about anymore:
i_line = 0
while i_line < len(l_data_bondbond13_coeffs):
line = l_data_bondbond13_coeffs[i_line]
tokens = line.strip().split()
dihedraltype = Intify(tokens[0])
if (not (dihedraltype in needed_dihedraltypes)):
del l_data_bondbond13_coeffs[i_line]
else:
tokens[0] = '@dihedral:type'+str(dihedraltype)
l_data_bondbond13_coeffs[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
# --- end of class2specific ----
# delete in_dihedral_coeffs for dihedraltypes we don't care about anymore:
for dihedraltype in needed_dihedraltypes:
assert(type(dihedraltype) is int)
if ((min_needed_dihedraltype == None) or
(min_needed_dihedraltype > dihedraltype)):
min_needed_dihedraltype = dihedraltype
if ((max_needed_dihedraltype == None) or
(max_needed_dihedraltype < dihedraltype)):
max_needed_dihedraltype = dihedraltype
for dihedralid in needed_dihedralids:
assert(type(dihedralid) is int)
if ((min_needed_dihedralid == None) or
(min_needed_dihedralid > dihedralid)):
min_needed_dihedralid = dihedralid
if ((max_needed_dihedralid == None) or
(max_needed_dihedralid < dihedralid)):
max_needed_dihedralid = dihedralid
i_line = 0
while i_line < len(l_in_dihedral_coeffs):
line = l_in_dihedral_coeffs[i_line]
tokens = line.strip().split()
dihedraltype_str = tokens[1]
if ('*' in dihedraltype_str):
dihedraltype_tokens = dihedraltype_str.split('*')
if dihedraltype_tokens[0] == '':
i_a = min_needed_dihedraltype
else:
i_a = Intify(dihedraltype_tokens[0])
if dihedraltype_tokens[1] == '':
i_b = max_needed_dihedraltype
else:
i_b = Intify(dihedraltype_tokens[1])
else:
i_a = i_b = Intify(dihedraltype_str)
if i_a < min_needed_dihedraltype:
i_a = min_needed_dihedraltype
if i_b > max_needed_dihedraltype:
i_b = max_needed_dihedraltype
#if i_a == i_b:
# i_str = '@dihedral:type'+str(i_a)
# tokens[1] = i_str
#else:
# i_str = '@{dihedral:type'+str(j_a)+'}*@{dihedral:type'+str(j_b)+'}'
if ('*' in dihedraltype_str):
del l_in_dihedral_coeffs[i_line]
for i in range(i_a, i_b+1):
if (i in needed_dihedraltypes):
tokens[1] = '@dihedral:type'+str(i)
l_in_dihedral_coeffs.insert(i_line,
(' '*indent)+(' '.join(tokens)+'\n'))
i_line += 1
else:
if i_a < i_b:
raise InputError('Error: number of dihedral types in data file is not consistent with the\n'
' number of dihedral types you have define dihedral_coeffs for.\n')
if (i_a == i_b) and (i_a in needed_dihedraltypes):
tokens[1] = '@dihedral:type'+str(i_a)
l_in_dihedral_coeffs[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
else:
del l_in_dihedral_coeffs[i_line]
# --- IMPROPERS AND IMPROPER COEFFS ---
# delete lines from data_impropers if they involve atoms we don't care about
i_line = 0
while i_line < len(l_data_impropers):
line = l_data_impropers[i_line]
tokens = line.strip().split()
assert(len(tokens) == 6)
improperid = Intify(tokens[0])
impropertype = Intify(tokens[1])
atomid1 = Intify(tokens[2])
atomid2 = Intify(tokens[3])
atomid3 = Intify(tokens[4])
atomid4 = Intify(tokens[5])
#if ((atomid1 in needed_atomids) and
# (atomid2 in needed_atomids)):
tokens[0] = '$improper:id'+str(improperid)
tokens[1] = '@improper:type'+str(impropertype)
#tokens[2] = '$atom:id'+str(atomid1)
#tokens[3] = '$atom:id'+str(atomid2)
#tokens[4] = '$atom:id'+str(atomid3)
#tokens[5] = '$atom:id'+str(atomid4)
tokens[2] = '$atom:'+atomids_int2name[atomid1]
tokens[3] = '$atom:'+atomids_int2name[atomid2]
tokens[4] = '$atom:'+atomids_int2name[atomid3]
tokens[5] = '$atom:'+atomids_int2name[atomid4]
needed_improperids.add(improperid)
needed_impropertypes.add(impropertype)
l_data_impropers[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
#else:
# del l_data_impropers[i_line]
# delete data_improper_coeffs for impropertypes we don't care about anymore:
i_line = 0
while i_line < len(l_data_improper_coeffs):
line = l_data_improper_coeffs[i_line]
tokens = line.strip().split()
impropertype = Intify(tokens[0])
if (not (impropertype in needed_impropertypes)):
del l_data_improper_coeffs[i_line]
else:
tokens[0] = '@improper:type'+str(impropertype)
l_data_improper_coeffs[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
# --- class2specific ----
# Do the same for AngleAngle Coeffs
# NOTE: LAMMPS INPUT SCRIPTS, ALL CLASS2 COEFFS are represented by:
# angle_coeff, dihedral_coeff, and improper_coeff commands.
# THERE ARE NO "angleangle_coeff" commands, etc...so we don't
# have to worry about dealing with "l_in_angleangle_coeffs",...
# delete data_middlebondtorsion_coeffs for dihedraltypes we don't care about anymore:
# delete data_angleangle_coeffs for impropertypes we don't care about anymore:
i_line = 0
while i_line < len(l_data_angleangle_coeffs):
line = l_data_angleangle_coeffs[i_line]
tokens = line.strip().split()
impropertype = Intify(tokens[0])
if (not (impropertype in needed_impropertypes)):
del l_data_angleangle_coeffs[i_line]
else:
tokens[0] = '@improper:type'+str(impropertype)
l_data_angleangle_coeffs[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
# --- end of class2specific ----
# delete in_improper_coeffs for impropertypes we don't care about anymore:
for impropertype in needed_impropertypes:
assert(type(impropertype) is int)
if ((min_needed_impropertype == None) or
(min_needed_impropertype > impropertype)):
min_needed_impropertype = impropertype
if ((max_needed_impropertype == None) or
(max_needed_impropertype < impropertype)):
max_needed_impropertype = impropertype
for improperid in needed_improperids:
assert(type(improperid) is int)
if ((min_needed_improperid == None) or
(min_needed_improperid > improperid)):
min_needed_improperid = improperid
if ((max_needed_improperid == None) or
(max_needed_improperid < improperid)):
max_needed_improperid = improperid
i_line = 0
while i_line < len(l_in_improper_coeffs):
line = l_in_improper_coeffs[i_line]
tokens = line.strip().split()
impropertype_str = tokens[1]
if ('*' in impropertype_str):
impropertype_tokens = impropertype_str.split('*')
if impropertype_tokens[0] == '':
i_a = min_needed_impropertype
else:
i_a = Intify(impropertype_tokens[0])
if impropertype_tokens[1] == '':
i_b = max_needed_impropertype
else:
i_b = Intify(impropertype_tokens[1])
else:
i_a = i_b = Intify(impropertype_str)
if i_a < min_needed_impropertype:
i_a = min_needed_impropertype
if i_b > max_needed_impropertype:
i_b = max_needed_impropertype
#if i_a == i_b:
# i_str = '@improper:type'+str(i_a)
# tokens[1] = i_str
#else:
# i_str = '@{improper:type'+str(j_a)+'}*@{improper:type'+str(j_b)+'}'
if ('*' in impropertype_str):
del l_in_improper_coeffs[i_line]
for i in range(i_a, i_b+1):
if (i in needed_impropertypes):
tokens[1] = '@improper:type'+str(i)
l_in_improper_coeffs.insert(i_line,
(' '*indent)+(' '.join(tokens)+'\n'))
i_line += 1
else:
if i_a < i_b:
raise InputError('Error: number of improper types in data file is not consistent with the\n'
' number of improper types you have define improper_coeffs for.\n')
if (i_a == i_b) and (i_a in needed_impropertypes):
tokens[1] = '@improper:type'+str(i_a)
l_in_improper_coeffs[i_line] = (' '*indent)+(' '.join(tokens)+'\n')
i_line += 1
else:
del l_in_improper_coeffs[i_line]
# --- GROUPS ---
# Now parse through all of the "group" commands and try and figure
# out if any of these groups contain any of the atoms we are keeping.
# If so, then save the group and write it out.
# (I hate trying to parse this kind of text.)
#if len(l_in_group) > 0:
# sys.stderr.write('\n'
# ' --groups-- Attempting to parse \"group\" commands.\n'
# ' This may cause '+g_program_name+' to crash.\n'
# ' If so, comment out all group commands in your input script(s), and\n'
# ' try again. (And please report the error. -Andrew 2014-10-30)\n')
i_line = 0
groups_needed = set(['all'])
while i_line < len(l_in_group):
line = l_in_group[i_line]
tokens = line.strip().split()
delete_this_command = False
explicit_definition = False
if len(tokens) < 3:
delete_this_command = True
group_name = tokens[1]
specifier_style = tokens[2]
str_logical = ''
str_selection = ''
if specifier_style[0:4] == 'type':
str_logical+=specifier_style[4:]
explicit_definition = True
specifier_style = 'type'
elif specifier_style == 'id':
str_logical+=specifier_style[2:]
explicit_definition = True
specifier_style = 'id'
elif specifier_style == 'molecule':
str_logical+=specifier_style[8:]
specifier_style = 'molecule'
explicit_definition = True
if explicit_definition:
i_token_sel_min = 3
if len(tokens) <= i_token_sel_min:
sys.stderr.write('WARNING: possible syntax error on this line:\n'
+' '+l_in_group[i_line]+'\n')
delete_this_command = True
if str_logical == '':
str_logical = tokens[i_token_sel_min]
if not str_logical[0].isdigit():
i_token_sel_min += 1
if len(tokens) <= i_token_sel_min:
tokens.append('')
else:
tokens.insert(i_token_sel_min, str_logical)
i_token_sel_max = len(tokens)-1
for i in range(i_token_sel_min, len(tokens)):
if tokens[i].isdigit():
break
else:
i_token_sel_max = i
assert(len(tokens) > i_token_sel_min)
if str_logical[0:2] in ('<=','>=','==','!=','<>'):
tokens[i_token_sel_min] = str_logical[2:] + tokens[i_token_sel_min]
str_logical = str_logical[0:2]
if str_logical == '<=':
l_group_selection = [ (None,int(tokens[i_token_sel_min])) ]
elif str_logical == '>=':
l_group_selection = [ (int(tokens[i_token_sel_min]),None) ]
elif str_logical == '==':
l_group_selection = [ (int(tokens[i_token_sel_min]),
int(tokens[i_token_sel_min])) ]
elif str_logical == '!=':
l_group_selection = [ (None,int(tokens[i_token_sel_min])-1),
(int(tokens[i_token_sel_min])+1,None)]
elif str_logical == '<>':
l_group_selection = [ (int(tokens[i_token_sel_min]),
int(tokens[i_token_sel_max])) ]
elif str_logical[0:1] in ('<','>'):
tokens[i_token_sel_min] = str_logical[1:] + tokens[i_token_sel_min]
str_logical = str_logical[0:1]
if str_logical == '<':
l_group_selection = [(None,int(tokens[i_token_sel_min])-1)]
elif str_logical == '>':
l_group_selection = [(int(tokens[i_token_sel_min])+1,None)]
else:
str_selection = ' '.join(tokens[i_token_sel_min:i_token_sel_max+1])
l_group_selection = LammpsSelectToIntervals(str_selection,
slice_delim=':',
or_delim=' ')
mn, mx = IntervalListToMinMax(l_group_selection)
if mn == None:
mn = 1
filtered_selection=[]
if specifier_style == 'type':
if mx == None:
mx = max_needed_atomtype
for i in range(mn, mx+1):
if (BelongsToSel(i, l_group_selection)
and (i in needed_atomtypes)):
filtered_selection.append((i,i))
elif specifier_style == 'id':
if mx == None:
mx = max_needed_atomid
for i in range(mn, mx+1):
if (BelongsToSel(i, l_group_selection)
and (i in needed_atomids)):
filtered_selection.append((i,i))
elif specifier_style == 'molecule':
if mx == None:
mx = max_needed_molid
for i in range(mn, mx+1):
if (BelongsToSel(i, l_group_selection)
and (i in needed_molids)):
filtered_selection.append((i,i))
MergeIntervals(filtered_selection)
if len(filtered_selection) > 0:
tokens = ['group', group_name, specifier_style]
for interval in filtered_selection:
a = interval[0]
b = interval[1]
if specifier_style == 'type':
if a == b:
tokens.append('@atom:type'+str(a))
else:
tokens.append('@{atom:type'+str(a)+
'}:@{atom:type'+str(b)+'}')
if specifier_style == 'id':
if a == b:
tokens.append('$atom:id'+str(a))
else:
tokens.append('${atom:id'+str(a)
+'}:${atom:id'+str(b)+'}')
if specifier_style == 'molecule':
if a == b:
tokens.append('$mol:id'+str(a))
else:
tokens.append('${mol:id'+str(a)+
'}:${mol:id'+str(b)+'}')
# Commenting out next two lines. (This is handled later.)
#l_in_group[i_line] = ' '.join(tokens)
#groups_needed.add(group_name)
else:
delete_this_command = True
else:
if len(tokens) > 3:
if tokens[2] == 'union':
i_token = 3
while i_token < len(tokens):
if not (tokens[i_token] in groups_needed):
del tokens[i_token]
else:
i_token += 1
# if none of the groups contain atoms we need,
# then delete the entire command
if len(tokens) <= 3:
delete_this_command = True
elif tokens[2] == 'intersect':
i_token = 3
while i_token < len(tokens):
if not (tokens[i_token] in groups_needed):
# if any of the groups we need are empty
# then delete the command
delete_this_command = True
break
i_token += 1
elif (tokens[2] == 'subtract') and (len(tokens) >= 5):
if not (tokens[3] in groups_needed):
delete_this_command = True
i_token = 4
while i_token < len(tokens):
if not (tokens[i_token] in groups_needed):
del tokens[i_token]
else:
i_token += 1
else:
# Otherwise I don't recongize the syntax of this
# group command. In that case, I just delete it.
delete_this_command = True
elif tokens[2] == 'clear':
pass
elif tokens[2] == 'delete':
pass
else:
delete_this_command = True
if delete_this_command:
sys.stderr.write('WARNING: Ignoring line \n\"'+l_in_group[i_line].rstrip()+'\"\n')
del l_in_group[i_line]
else:
groups_needed.add(group_name)
l_in_group[i_line] = (' '*indent) + ' '.join(tokens) + '\n'
i_line += 1
# --- fix rigid ---
i_line = 0
while i_line < len(l_in_fix_rigid):
line = l_in_fix_rigid[i_line]
tokens = line.strip().split()
if len(tokens) < 4:
break
fixid = tokens[1]
group_name = tokens[2]
delete_this_command = True
assert(tokens[3].find('rigid') == 0)
if group_name in groups_needed:
delete_this_command = False
if delete_this_command:
sys.stderr.write('WARNING: Ignoring line \n\"'+l_in_fix_rigid[i_line].rstrip()+'\"\n')
del l_in_fix_rigid[i_line]
else:
l_in_fix_rigid[i_line] = (' '*indent) + ' '.join(tokens) + '\n'
i_line += 1
# --- fix shake ---
i_line = 0
while i_line < len(l_in_fix_shake):
line = l_in_fix_shake[i_line]
tokens = line.strip().split()
if len(tokens) < 4:
break
fixid = tokens[1]
group_name = tokens[2]
delete_this_command = True
assert(tokens[3].find('shake') == 0)
# parse the list of angle types
#i_token = tokens.index('a')
for i_token in range(0, len(tokens)):
if tokens[i_token] == 'a':
break
if i_token != len(tokens):
i_token += 1
while (i_token < len(tokens)) and tokens[i_token].isdigit():
# delete angle types from the list which
# do not belong to the selection
btype=int(tokens[i_token])
if int(tokens[i_token]) in needed_angletypes:
tokens[i_token] = '@angle:type'+tokens[i_token]
i_token += 1
delete_this_command = False
else:
del tokens[i_token]
# parse the list of bond types
#i_token = tokens.index('b')
for i_token in range(0, len(tokens)):
if tokens[i_token] == 'b':
break
if i_token != len(tokens):
i_token += 1
while (i_token < len(tokens)) and tokens[i_token].isdigit():
# delete bond types from the list which
# do not belong to the selection
btype=int(tokens[i_token])
if int(tokens[i_token]) in needed_bondtypes:
tokens[i_token] = '@bond:type'+tokens[i_token]
i_token += 1
delete_this_command = False
else:
del tokens[i_token]
# parse the list of atom types
# i_token = tokens.index('t')
for i_token in range(0, len(tokens)):
if tokens[i_token] == 't':
break
if i_token != len(tokens):
i_token += 1
while (i_token < len(tokens)) and tokens[i_token].isdigit():
# delete atom types from the list which
# do not belong to the selection
btype=int(tokens[i_token])
if int(tokens[i_token]) in needed_atomtypes:
tokens[i_token] = '@atom:type'+tokens[i_token]
i_token += 1
delete_this_command = False
else:
del tokens[i_token]
# Selecting atoms by mass feature should still work, so we
# don't need to delete or ignore these kinds of commands.
#for i_token in range(0, len(tokens)):
# if tokens[i_token] == 'm':
# break
#if i_token != len(tokens):
# delete_this_command = True
if 'mol' in tokens:
delete_this_command = True
if not (group_name in groups_needed):
delete_this_command = True
if delete_this_command:
sys.stderr.write('WARNING: Ignoring line \n\"'+l_in_fix_shake[i_line].rstrip()+'\"\n')
del l_in_fix_shake[i_line]
else:
l_in_fix_shake[i_line] = (' '*indent) + ' '.join(tokens) + '\n'
i_line += 1
# --- fix poems ---
i_line = 0
while i_line < len(l_in_fix_poems):
line = l_in_fix_poems[i_line]
tokens = line.strip().split()
if len(tokens) < 4:
break
fixid = tokens[1]
group_name = tokens[2]
delete_this_command = True
assert(tokens[3].find('poems') == 0)
if group_name in groups_needed:
delete_this_command = False
if tokens[4] != 'molecule':
delete_this_command = True
sys.stderr.write('WARNING: '+g_program_name+' ONLY supports \"fix poems\" commands\n'
' which use the \"molecule\" keyword.\n')
if tokens[4] == 'file':
sys.stderr.write(' If you want use external files with fix poems, then you will have to\n'
' generate the file yourself. You ask use moltemplate to generate\n'
' this file for you, by manually adding a section at the end of your\n'
' final .LT file (eg. \"system.lt\") which resembles the following:\n\n'
'write(\"poems_file.txt\") {\n'
' 1 1 $atom:idname1a $atom:idname2a $atom:idname3a ...\n'
' 2 1 $atom:idname1b $atom:idname2b $atom:idname3b ...\n'
' 3 1 $atom:idname1c $atom:idname2c $atom:idname3c ...\n'
' : : etc...\n'
'}\n\n'
' ...where $atom:idname1a, $atom:idname2a, ... are moltemplate-compatible\n'
' unique (full,long) id-names for the atoms in each rigid body.\n'
' This will insure the atom-id numbers in this file are correct.\n'
' See the documentation for fix poems for details.\n')
if delete_this_command:
sys.stderr.write('WARNING: Ignoring line \n\"'+l_in_fix_poems[i_line].rstrip()+'\"\n')
del l_in_fix_poems[i_line]
else:
l_in_fix_poems[i_line] = (' '*indent) + ' '.join(tokens) + '\n'
i_line += 1
# --- fix qeq ---
i_line = 0
while i_line < len(l_in_fix_qeq):
line = l_in_fix_qeq[i_line]
tokens = line.strip().split()
if len(tokens) < 4:
break
fixid = tokens[1]
group_name = tokens[2]
delete_this_command = True
assert(tokens[3].find('qeq') == 0)
if group_name in groups_needed:
delete_this_command = False
if delete_this_command:
sys.stderr.write('WARNING: Ignoring line \n\"'+l_in_fix_qeq[i_line].rstrip()+'\"\n')
del l_in_fix_qeq[i_line]
else:
l_in_fix_qeq[i_line] = (' '*indent) + ' '.join(tokens) + '\n'
i_line += 1
# --- fix qmmm ---
i_line = 0
while i_line < len(l_in_fix_qmmm):
line = l_in_fix_qmmm[i_line]
tokens = line.strip().split()
if len(tokens) < 4:
break
fixid = tokens[1]
group_name = tokens[2]
delete_this_command = True
assert(tokens[3].find('qmmm') == 0)
if group_name in groups_needed:
delete_this_command = False
if delete_this_command:
sys.stderr.write('WARNING: Ignoring line \n\"'+l_in_fix_qmmm[i_line].rstrip()+'\"\n')
del l_in_fix_qmmm[i_line]
else:
l_in_fix_qmmm[i_line] = (' '*indent) + ' '.join(tokens) + '\n'
i_line += 1
########################################
### Now begin writing the template. ###
########################################
if not some_pair_coeffs_read:
sys.stderr.write('Warning: No \"pair coeffs\" set.\n'
' (No interactions between non-bonded atoms defined.)\n')
no_warnings = False
#sys.stderr.write('Writing ttree data to standard out.\n'
# ' You can redirect this to a file using:\n'+
# ' '+' '.join(sys.argv)+' > filename.ttree\n'
# ' ----------------------\n')
if mol_name != '':
sys.stdout.write(mol_name + ' {\n')
if len(l_in_init) > 0:
sys.stdout.write('\n### LAMMPS commands for initialization\n'
'### (These can be overridden later.)\n\n')
l_in_init.insert(0, (' '*cindent)+'write_once(\"'+in_init+'\") {\n')
l_in_init.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_init))
if len(l_in_settings) > 0:
sys.stdout.write('\n### LAMMPS commands for settings\n'
'### (These can be overridden later.)\n\n')
l_in_settings.insert(0, (' '*cindent)+'write_once(\"'+in_settings+'\") {\n')
l_in_settings.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_settings))
non_empty_output = True
if len(l_in_masses) > 0:
l_in_masses.insert(0, (' '*cindent)+'write_once(\"'+in_settings+'\") {\n')
l_in_masses.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_masses))
non_empty_output = True
if remove_coeffs_from_data_file:
if len(l_data_pair_coeffs) > 0:
for line in l_data_pair_coeffs:
tokens = line.strip().split()
atomtype_str = tokens[0]
l_in_pair_coeffs.append((' '*cindent)+' pair_coeff '+atomtype_str+' '+atomtype_str+' '+' '.join(tokens[1:])+'\n')
l_data_pair_coeffs = []
if len(l_data_pairij_coeffs) > 0:
for line in l_data_pairij_coeffs:
l_in_pair_coeffs.append((' '*cindent)+' pair_coeff '+line.strip()+'\n')
l_data_pairij_coeffs = []
if len(l_in_pair_coeffs) > 0:
l_in_pair_coeffs.insert(0, (' '*cindent)+'write_once(\"'+in_settings+'\") {\n')
l_in_pair_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_pair_coeffs))
non_empty_output = True
if (remove_coeffs_from_data_file and (len(l_data_bond_coeffs) > 0)):
for line in l_data_bond_coeffs:
l_in_bond_coeffs.append((' '*cindent)+' bond_coeff '+line.strip()+'\n')
l_data_bond_coeffs = []
if len(l_in_bond_coeffs) > 0:
l_in_bond_coeffs.insert(0, (' '*cindent)+'write_once(\"'+in_settings+'\") {\n')
l_in_bond_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_bond_coeffs))
non_empty_output = True
if (remove_coeffs_from_data_file and (len(l_data_angle_coeffs) > 0)):
for line in l_data_angle_coeffs:
l_in_angle_coeffs.append((' '*cindent)+' angle_coeff '+line.strip()+'\n')
l_data_angle_coeffs = []
for line in l_data_bondbond_coeffs:
tokens = line.strip().split()
l_in_angle_coeffs.append((' '*cindent)+' angle_coeff '+tokens[0]+' bb '+' '.join(tokens[1:])+'\n')
l_data_bondbond_coeffs = []
for line in l_data_bondangle_coeffs:
tokens = line.strip().split()
l_in_angle_coeffs.append((' '*cindent)+' angle_coeff '+tokens[0]+' ba '+' '.join(tokens[1:])+'\n')
l_data_bondangle_coeffs = []
if len(l_in_angle_coeffs) > 0:
l_in_angle_coeffs.insert(0, (' '*cindent)+'write_once(\"'+in_settings+'\") {\n')
l_in_angle_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_angle_coeffs))
non_empty_output = True
if (remove_coeffs_from_data_file and (len(l_data_dihedral_coeffs) > 0)):
for line in l_data_dihedral_coeffs:
l_in_dihedral_coeffs.append((' '*cindent)+' dihedral_coeff '+line.strip()+'\n')
l_data_dihedral_coeffs = []
for line in l_data_middlebondtorsion_coeffs:
tokens = line.strip().split()
l_in_dihedral_coeffs.append((' '*cindent)+' dihedral_coeff '+tokens[0]+' mbt '+' '.join(tokens[1:])+'\n')
l_data_middlebondtorsion_coeffs = []
for line in l_data_endbondtorsion_coeffs:
tokens = line.strip().split()
l_in_dihedral_coeffs.append((' '*cindent)+' dihedral_coeff '+tokens[0]+' ebt '+' '.join(tokens[1:])+'\n')
l_data_endbondtorsion_coeffs = []
for line in l_data_angletorsion_coeffs:
tokens = line.strip().split()
l_in_dihedral_coeffs.append((' '*cindent)+' dihedral_coeff '+tokens[0]+' at '+' '.join(tokens[1:])+'\n')
l_data_angletorsion_coeffs = []
for line in l_data_angleangletorsion_coeffs:
tokens = line.strip().split()
l_in_dihedral_coeffs.append((' '*cindent)+' dihedral_coeff '+tokens[0]+' aat '+' '.join(tokens[1:])+'\n')
l_data_angleangletorsion_coeffs = []
for line in l_data_bondbond13_coeffs:
tokens = line.strip().split()
l_in_dihedral_coeffs.append((' '*cindent)+' dihedral_coeff '+tokens[0]+' bb13 '+' '.join(tokens[1:])+'\n')
l_data_bondbond13_coeffs = []
if len(l_in_dihedral_coeffs) > 0:
l_in_dihedral_coeffs.insert(0, (' '*cindent)+'write_once(\"'+in_settings+'\") {\n')
l_in_dihedral_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_dihedral_coeffs))
non_empty_output = True
if (remove_coeffs_from_data_file and (len(l_data_improper_coeffs) > 0)):
for line in l_data_improper_coeffs:
l_in_improper_coeffs.append((' '*cindent)+' improper_coeff '+line.strip()+'\n')
l_data_improper_coeffs = []
for line in l_data_angleangle_coeffs:
tokens = line.strip().split()
l_in_improper_coeffs.append((' '*cindent)+' improper_coeff '+tokens[0]+' aa '+' '.join(tokens[1:])+'\n')
l_data_angleangle_coeffs = []
if len(l_in_improper_coeffs) > 0:
l_in_improper_coeffs.insert(0, (' '*cindent)+'write_once(\"'+in_settings+'\") {\n')
l_in_improper_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_improper_coeffs))
non_empty_output = True
if non_empty_output:
sys.stdout.write('\n\n### DATA sections\n\n')
if len(l_data_masses) > 0:
l_data_masses.insert(0, (' '*cindent)+'write_once(\"'+data_masses+'\") {\n')
l_data_masses.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_masses))
non_empty_output = True
if len(l_data_bond_coeffs) > 0:
l_data_bond_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_bond_coeffs+'\") {\n')
l_data_bond_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_bond_coeffs))
non_empty_output = True
if len(l_data_angle_coeffs) > 0:
l_data_angle_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_angle_coeffs+'\") {\n')
l_data_angle_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_angle_coeffs))
non_empty_output = True
if len(l_data_dihedral_coeffs) > 0:
l_data_dihedral_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_dihedral_coeffs+'\") {\n')
l_data_dihedral_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_dihedral_coeffs))
non_empty_output = True
if len(l_data_improper_coeffs) > 0:
l_data_improper_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_improper_coeffs+'\") {\n')
l_data_improper_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_improper_coeffs))
non_empty_output = True
if len(l_data_pair_coeffs) > 0:
l_data_pair_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_pair_coeffs+'\") {\n')
l_data_pair_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_pair_coeffs))
non_empty_output = True
if len(l_data_pairij_coeffs) > 0:
l_data_pairij_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_pairij_coeffs+'\") {\n')
l_data_pairij_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_pairij_coeffs))
non_empty_output = True
# class2 force fields:
if len(l_data_bondbond_coeffs) > 0:
l_data_bondbond_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_bondbond_coeffs+'\") {\n')
l_data_bondbond_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_bondbond_coeffs))
non_empty_output = True
if len(l_data_bondangle_coeffs) > 0:
l_data_bondangle_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_bondangle_coeffs+'\") {\n')
l_data_bondangle_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_bondangle_coeffs))
non_empty_output = True
if len(l_data_middlebondtorsion_coeffs) > 0:
l_data_middlebondtorsion_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_middlebondtorsion_coeffs+'\") {\n')
l_data_middlebondtorsion_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_middlebondtorsion_coeffs))
non_empty_output = True
if len(l_data_endbondtorsion_coeffs) > 0:
l_data_endbondtorsion_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_endbondtorsion_coeffs+'\") {\n')
l_data_endbondtorsion_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_endbondtorsion_coeffs))
non_empty_output = True
if len(l_data_angletorsion_coeffs) > 0:
l_data_angletorsion_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_angletorsion_coeffs+'\") {\n')
l_data_angletorsion_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_angletorsion_coeffs))
non_empty_output = True
if len(l_data_angleangletorsion_coeffs) > 0:
l_data_angleangletorsion_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_angleangletorsion_coeffs+'\") {\n')
l_data_angleangletorsion_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_angleangletorsion_coeffs))
non_empty_output = True
if len(l_data_bondbond13_coeffs) > 0:
l_data_bondbond13_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_bondbond13_coeffs+'\") {\n')
l_data_bondbond13_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_bondbond13_coeffs))
non_empty_output = True
if len(l_data_angleangle_coeffs) > 0:
l_data_angleangle_coeffs.insert(0, (' '*cindent)+'write_once(\"'+data_angleangle_coeffs+'\") {\n')
l_data_angleangle_coeffs.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_angleangle_coeffs))
non_empty_output = True
# automatic generation of bonded interactions by type:
if len(l_data_angles_by_type) > 0:
l_data_angles_by_type.insert(0, (' '*cindent)+'write_once(\"'+data_angles_by_type+'\") {\n')
l_data_angles_by_type.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_angles_by_type))
non_empty_output = True
if len(l_data_dihedrals_by_type) > 0:
l_data_dihedrals_by_type.insert(0, (' '*cindent)+'write_once(\"'+data_dihedrals_by_type+'\") {\n')
l_data_dihedrals_by_type.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_dihedrals_by_type))
non_empty_output = True
if len(l_data_impropers_by_type) > 0:
l_data_impropers_by_type.insert(0, (' '*cindent)+'write_once(\"'+data_impropers_by_type+'\") {\n')
l_data_impropers_by_type.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_impropers_by_type))
non_empty_output = True
if len(l_data_atoms) > 0:
l_data_atoms.insert(0, (' '*cindent)+'write(\"'+data_atoms+'\") {\n')
l_data_atoms.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_atoms))
non_empty_output = True
else:
sys.stderr.write('Warning: missing \"Atoms\" section.\n'
' (Did you include a LAMMPS data file in your argument list?)\n')
no_warnings = False
# non-point-like particles
if len(l_data_ellipsoids) > 0:
l_data_ellipsoids.insert(0, (' '*cindent)+'write(\"'+data_ellipsoids+'\") {\n')
l_data_ellipsoids.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_ellipsoids))
if len(l_data_lines) > 0:
l_data_lines.insert(0, (' '*cindent)+'write(\"'+data_lines+'\") {\n')
l_data_lines.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_lines))
if len(l_data_triangles) > 0:
l_data_triangles.insert(0, (' '*cindent)+'write(\"'+data_triangles+'\") {\n')
l_data_triangles.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_triangles))
# DO NOT WRITE OUT VELOCITY DATA
# (Why: because it makes it difficult to combine this molecular template
# with molecule templates from other sources which lack velocity data.
# LAMMPS (and topotools) will crash if the number of entries in the
# Velocities section of a data file does not match the number of atoms.)
# COMMENTING OUT:
#if len(l_data_velocities) > 0:
# l_data_velocities.insert(0, (' '*cindent)+'write(\"'+data_velocities+'\") {\n')
# l_data_velocities.append((' '*cindent)+'}\n')
# sys.stdout.write('\n')
# sys.stdout.write(''.join(l_data_velocities))
if len(l_data_bonds) > 0:
l_data_bonds.insert(0, (' '*cindent)+'write(\"'+data_bonds+'\") {\n')
l_data_bonds.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_bonds))
non_empty_output = True
if len(l_data_angles) > 0:
l_data_angles.insert(0, (' '*cindent)+'write(\"'+data_angles+'\") {\n')
l_data_angles.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_angles))
non_empty_output = True
if len(l_data_dihedrals) > 0:
l_data_dihedrals.insert(0, (' '*cindent)+'write(\"'+data_dihedrals+'\") {\n')
l_data_dihedrals.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_dihedrals))
non_empty_output = True
if len(l_data_impropers) > 0:
l_data_impropers.insert(0, (' '*cindent)+'write(\"'+data_impropers+'\") {\n')
l_data_impropers.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_data_impropers))
non_empty_output = True
if len(l_in_group) > 0:
no_warnings = False
l_in_group.insert(0, (' '*cindent)+'write(\"'+in_settings+'\") {\n')
l_in_group.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_group))
#sys.stderr.write('######################################################\n'
# 'WARNING: One or more \"group\" commands appear to refer to relevant atoms.\n'
# ' Please check to make sure that the group(s) generated by\n'
# ' '+g_program_name+' contain the correct atoms. (-Andrew 2014-10-30)\n'
# '######################################################\n')
assert(non_empty_output)
if len(l_in_fix_rigid) > 0:
no_warnings = False
l_in_fix_rigid.insert(0, (' '*cindent)+'write(\"'+in_settings+'\") {\n')
l_in_fix_rigid.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_fix_rigid))
sys.stderr.write('WARNING: \"fix rigid\" style command(s) applied to selected atoms.\n'
' Please make sure that the fix group(s) are defined correctly.\n'
'######################################################\n')
assert(non_empty_output)
if len(l_in_fix_shake) > 0:
no_warnings = False
l_in_fix_shake.insert(0, (' '*cindent)+'write(\"'+in_settings+'\") {\n')
l_in_fix_shake.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_fix_shake))
sys.stderr.write('WARNING: \"fix shake\" style command(s) applied to selected atoms.\n'
' Please check to make sure that the fix group(s) are defined correctly,\n'
' and also check that the atom, bond, and angle types are correct.\n'
'######################################################\n')
assert(non_empty_output)
if len(l_in_fix_poems) > 0:
no_warnings = False
l_in_fix_poems.insert(0, (' '*cindent)+'write(\"'+in_settings+'\") {\n')
l_in_fix_poems.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_fix_poems))
sys.stderr.write('WARNING: \"fix poems\" style command(s) applied to selected atoms.\n'
' Please make sure that the fix group(s) are defined correctly.\n'
'######################################################\n')
assert(non_empty_output)
if len(l_in_fix_qeq) > 0:
no_warnings = False
l_in_fix_qeq.insert(0, (' '*cindent)+'write(\"'+in_settings+'\") {\n')
l_in_fix_qeq.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_fix_qeq))
sys.stderr.write('WARNING: \"fix qeq\" style command(s) applied to selected atoms.\n'
' Please make sure that the fix group(s) are defined correctly.\n'
'######################################################\n')
assert(non_empty_output)
if len(l_in_fix_qmmm) > 0:
no_warnings = False
l_in_fix_qmmm.insert(0, (' '*cindent)+'write(\"'+in_settings+'\") {\n')
l_in_fix_qmmm.append((' '*cindent)+'}\n')
sys.stdout.write('\n')
sys.stdout.write(''.join(l_in_fix_qmmm))
sys.stderr.write('WARNING: \"fix qmmm\" style command(s) applied to selected atoms.\n'
' Please make sure that the fix group(s) are defined correctly.\n'
'######################################################\n')
assert(non_empty_output)
if mol_name != '':
sys.stdout.write('\n} # end of \"'+mol_name+'\" type definition\n')
#if non_empty_output and no_warnings:
if non_empty_output:
sys.stderr.write('WARNING: The '+g_program_name+' script has not been rigorously tested.\n'
' Exotic (many-body) pair-styles and pair-styles with\n'
' unusual syntax (such hbond/dreiding) are not understood\n'
' by '+g_program_name+' (...although they are supported by moltemplate).\n'
' Please look over the resulting LT file and check for errors.\n'
' Convert any remaining atom, bond, angle, dihedral, or improper id\n'
' or type numbers to the corresponding $ or @-style counter variables.\n'
' Feel free to report any bugs you find. (-Andrew Jewett 2015-8-02)\n')
except (ValueError, InputError) as err:
sys.stderr.write('\n'+str(err)+'\n')
sys.exit(-1)
| gpl-2.0 |
mlavin/django | tests/expressions/models.py | 16 | 2383 | """
Tests for F() query expression syntax.
"""
import uuid
from django.db import models
class Employee(models.Model):
firstname = models.CharField(max_length=50)
lastname = models.CharField(max_length=50)
salary = models.IntegerField(blank=True, null=True)
def __str__(self):
return '%s %s' % (self.firstname, self.lastname)
class Company(models.Model):
name = models.CharField(max_length=100)
num_employees = models.PositiveIntegerField()
num_chairs = models.PositiveIntegerField()
ceo = models.ForeignKey(
Employee,
models.CASCADE,
related_name='company_ceo_set')
point_of_contact = models.ForeignKey(
Employee,
models.SET_NULL,
related_name='company_point_of_contact_set',
null=True)
def __str__(self):
return self.name
class Number(models.Model):
integer = models.BigIntegerField(db_column='the_integer')
float = models.FloatField(null=True, db_column='the_float')
def __str__(self):
return '%i, %.3f' % (self.integer, self.float)
class Experiment(models.Model):
name = models.CharField(max_length=24)
assigned = models.DateField()
completed = models.DateField()
estimated_time = models.DurationField()
start = models.DateTimeField()
end = models.DateTimeField()
class Meta:
ordering = ('name',)
def duration(self):
return self.end - self.start
class Result(models.Model):
experiment = models.ForeignKey(Experiment, models.CASCADE)
result_time = models.DateTimeField()
def __str__(self):
return "Result at %s" % self.result_time
class Time(models.Model):
time = models.TimeField(null=True)
def __str__(self):
return "%s" % self.time
class SimulationRun(models.Model):
start = models.ForeignKey(Time, models.CASCADE, null=True, related_name='+')
end = models.ForeignKey(Time, models.CASCADE, null=True, related_name='+')
midpoint = models.TimeField()
def __str__(self):
return "%s (%s to %s)" % (self.midpoint, self.start, self.end)
class UUIDPK(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4)
class UUID(models.Model):
uuid = models.UUIDField(null=True)
uuid_fk = models.ForeignKey(UUIDPK, models.CASCADE, null=True)
def __str__(self):
return "%s" % self.uuid
| bsd-3-clause |
hygull/servirall-django18-site | HyGoApp/forms.py | 1 | 4985 | from django import forms
from .models import SignUp,Post,Video,VideoVirtualReality
import re
class SignUpForm(forms.ModelForm):
class Meta:
model=SignUp
# form = SignUp
fields=["username","email"] #Some fields can be excluded
def clean_username(self):
signup_dict=self.cleaned_data
print "SignUp Details : ",signup_dict
username=signup_dict["username"]
lst=re.findall(r"^([A-Z]{1})([a-z]{2,19})$",username)
print "Matched : ",lst
if not len(lst)==1:
print "Not matched..."
raise forms.ValidationError("First letter of username should be in capital followed by 2 to 19 small case letters")
return username
def clean_email(self):
signup_dict = self.cleaned_data
print "SignUp details : ",signup_dict
email = signup_dict["email"]
lst = re.findall(r"^[a-z]{3,30}@gmail.com$",email)
print "Matched : ",lst
if not len(lst) == 1:
raise forms.ValidationError("Only gmail is allowed. Email should be in the form eg. hem@gmail.com & golang@gmail.com etc.Maximum length should be 30")
return email #In absence of this line => This field can't be null
class LoginForm(forms.Form):
username = forms.CharField(max_length=20)
email = forms.EmailField(max_length=30)
def clean_username(self):
signup_dict=self.cleaned_data
print "Login Details : ",signup_dict
username = signup_dict["username"]
lst = re.findall(r"^([A-Z]{1})([a-z]{2,19})$",username)
print "Matched : ",lst
if not len(lst) == 1:
print "Not matched..."
raise forms.ValidationError("First letter of username should be in capital followed by 2 to 19 small case letters")
return username
class PostForm(forms.ModelForm):
class Meta:
model = Post
# form = SignUp
fields = ["title","description"] #Some fields can be excluded
def clean_title(self):
title = self.cleaned_data["title"]
print "Cleaning title, got >> ",title
if len(title) == 0:
return forms.ValidationError("Title should not be blank")
return title
def clean_description(self):
description = self.cleaned_data["description"]
print "Cleaning description, got >> ",description
if len(description) == 0:
return forms.ValidationError("Descrition should not be blank")
return description
class VideoForm(forms.ModelForm):
class Meta:
model = Video
# form = SignUp
fields = ["title","url"] #Some fields can be excluded
#For placeholders
widgets = {
"title":forms.TextInput(attrs={"placeholder":"Enter the title of video from youtube..."}),
"url":forms.Textarea(attrs={"placeholder":"Paste the video URL from youtube..."})
}
def clean_title(self):
title = self.cleaned_data["title"]
print "Cleaning title, got >> ",title
if len(title) == 0:
raise forms.ValidationError("Title should not be blank")
return title
def clean_url(self):
url=self.cleaned_data["url"]
print "Cleaning url, got >> ",url
l=url.split('/')
if not len(l)==4:
raise forms.ValidationError("Wrong URL. The url should be like this => https://youtu.be/Kg9DGBLHUfw")
else:
if not l[0]=="https:" or not l[1]=="":
raise forms.ValidationError("Wrong URL. The url should start with => https://. eg https://youtu.be/Kg9DGBLHUfw")
else:
if not l[2]=='youtu.be':
raise forms.ValidationError("Wrong URL, The url should contain => youtu.be. It should be like => https://youtu.be/Kg9DGBLHUfw")
return url
# def clean_email(self):
# signup_dict=self.cleaned_data
# print "Login details : ",signup_dict
# email=signup_dict["email"]
# lst=re.findall(r"^[a-z]{3,30}@gmail.com$",email)
# print "Matched : ",lst
# if not len(lst)==1:
# raise forms.ValidationError("Only gmail is allowed. Email should be in the form eg. hem@gmail.com & golang@gmail.com etc.Maximum length should be 30")
# return email #In absence of this line => This field can't be null
class VideoVirtualRealityForm(forms.ModelForm):
class Meta:
model = VideoVirtualReality
# form = SignUp
fields = ["title","url"] #Some fields can be excluded
#For placeholders
widgets = {
"title":forms.TextInput(attrs={"placeholder":"Enter the title of video from youtube..."}),
"url":forms.TextInput(attrs={"placeholder":"Paste the video URL from youtube..."})
}
def clean_title(self):
title = self.cleaned_data["title"]
print "Cleaning title, got >> ",title
if len(title)==0:
raise forms.ValidationError("Title should not be blank")
return title
def clean_url(self):
url = self.cleaned_data["url"]
print "Cleaning url, got >> ",url
l = url.split('/')
if not len(l) == 4:
raise forms.ValidationError("Wrong URL. The url should be like this => https://youtu.be/Kg9DGBLHUfw")
else:
if not l[0] == "https:" or not l[1]=="":
raise forms.ValidationError("Wrong URL. The url should start with => https://. eg https://youtu.be/Kg9DGBLHUfw")
else:
if not l[2] == 'youtu.be':
raise forms.ValidationError("Wrong URL, The url should contain => youtu.be. It should be like => https://youtu.be/Kg9DGBLHUfw")
return url
| gpl-2.0 |
Changaco/oh-mainline | vendor/packages/twisted/twisted/internet/process.py | 18 | 36481 | # -*- test-case-name: twisted.test.test_process -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
UNIX Process management.
Do NOT use this module directly - use reactor.spawnProcess() instead.
Maintainer: Itamar Shtull-Trauring
"""
# System Imports
import gc, os, sys, stat, traceback, select, signal, errno
try:
import pty
except ImportError:
pty = None
try:
import fcntl, termios
except ImportError:
fcntl = None
from zope.interface import implements
from twisted.python import log, failure
from twisted.python.util import switchUID
from twisted.internet import fdesc, abstract, error
from twisted.internet.main import CONNECTION_LOST, CONNECTION_DONE
from twisted.internet._baseprocess import BaseProcess
from twisted.internet.interfaces import IProcessTransport
# Some people were importing this, which is incorrect, just keeping it
# here for backwards compatibility:
ProcessExitedAlready = error.ProcessExitedAlready
reapProcessHandlers = {}
def reapAllProcesses():
"""
Reap all registered processes.
"""
for process in reapProcessHandlers.values():
process.reapProcess()
def registerReapProcessHandler(pid, process):
"""
Register a process handler for the given pid, in case L{reapAllProcesses}
is called.
@param pid: the pid of the process.
@param process: a process handler.
"""
if pid in reapProcessHandlers:
raise RuntimeError("Try to register an already registered process.")
try:
auxPID, status = os.waitpid(pid, os.WNOHANG)
except:
log.msg('Failed to reap %d:' % pid)
log.err()
auxPID = None
if auxPID:
process.processEnded(status)
else:
# if auxPID is 0, there are children but none have exited
reapProcessHandlers[pid] = process
def unregisterReapProcessHandler(pid, process):
"""
Unregister a process handler previously registered with
L{registerReapProcessHandler}.
"""
if not (pid in reapProcessHandlers
and reapProcessHandlers[pid] == process):
raise RuntimeError("Try to unregister a process not registered.")
del reapProcessHandlers[pid]
def detectLinuxBrokenPipeBehavior():
"""
On some Linux version, write-only pipe are detected as readable. This
function is here to check if this bug is present or not.
See L{ProcessWriter.doRead} for a more detailed explanation.
"""
global brokenLinuxPipeBehavior
r, w = os.pipe()
os.write(w, 'a')
reads, writes, exes = select.select([w], [], [], 0)
if reads:
# Linux < 2.6.11 says a write-only pipe is readable.
brokenLinuxPipeBehavior = True
else:
brokenLinuxPipeBehavior = False
os.close(r)
os.close(w)
# Call at import time
detectLinuxBrokenPipeBehavior()
class ProcessWriter(abstract.FileDescriptor):
"""
(Internal) Helper class to write into a Process's input pipe.
I am a helper which describes a selectable asynchronous writer to a
process's input pipe, including stdin.
@ivar enableReadHack: A flag which determines how readability on this
write descriptor will be handled. If C{True}, then readability may
indicate the reader for this write descriptor has been closed (ie,
the connection has been lost). If C{False}, then readability events
are ignored.
"""
connected = 1
ic = 0
enableReadHack = False
def __init__(self, reactor, proc, name, fileno, forceReadHack=False):
"""
Initialize, specifying a Process instance to connect to.
"""
abstract.FileDescriptor.__init__(self, reactor)
fdesc.setNonBlocking(fileno)
self.proc = proc
self.name = name
self.fd = fileno
if not stat.S_ISFIFO(os.fstat(self.fileno()).st_mode):
# If the fd is not a pipe, then the read hack is never
# applicable. This case arises when ProcessWriter is used by
# StandardIO and stdout is redirected to a normal file.
self.enableReadHack = False
elif forceReadHack:
self.enableReadHack = True
else:
# Detect if this fd is actually a write-only fd. If it's
# valid to read, don't try to detect closing via read.
# This really only means that we cannot detect a TTY's write
# pipe being closed.
try:
os.read(self.fileno(), 0)
except OSError:
# It's a write-only pipe end, enable hack
self.enableReadHack = True
if self.enableReadHack:
self.startReading()
def fileno(self):
"""
Return the fileno() of my process's stdin.
"""
return self.fd
def writeSomeData(self, data):
"""
Write some data to the open process.
"""
rv = fdesc.writeToFD(self.fd, data)
if rv == len(data) and self.enableReadHack:
# If the send buffer is now empty and it is necessary to monitor
# this descriptor for readability to detect close, try detecting
# readability now.
self.startReading()
return rv
def write(self, data):
self.stopReading()
abstract.FileDescriptor.write(self, data)
def doRead(self):
"""
The only way a write pipe can become "readable" is at EOF, because the
child has closed it, and we're using a reactor which doesn't
distinguish between readable and closed (such as the select reactor).
Except that's not true on linux < 2.6.11. It has the following
characteristics: write pipe is completely empty => POLLOUT (writable in
select), write pipe is not completely empty => POLLIN (readable in
select), write pipe's reader closed => POLLIN|POLLERR (readable and
writable in select)
That's what this funky code is for. If linux was not broken, this
function could be simply "return CONNECTION_LOST".
BUG: We call select no matter what the reactor.
If the reactor is pollreactor, and the fd is > 1024, this will fail.
(only occurs on broken versions of linux, though).
"""
if self.enableReadHack:
if brokenLinuxPipeBehavior:
fd = self.fd
r, w, x = select.select([fd], [fd], [], 0)
if r and w:
return CONNECTION_LOST
else:
return CONNECTION_LOST
else:
self.stopReading()
def connectionLost(self, reason):
"""
See abstract.FileDescriptor.connectionLost.
"""
# At least on OS X 10.4, exiting while stdout is non-blocking can
# result in data loss. For some reason putting the file descriptor
# back into blocking mode seems to resolve this issue.
fdesc.setBlocking(self.fd)
abstract.FileDescriptor.connectionLost(self, reason)
self.proc.childConnectionLost(self.name, reason)
class ProcessReader(abstract.FileDescriptor):
"""
ProcessReader
I am a selectable representation of a process's output pipe, such as
stdout and stderr.
"""
connected = 1
def __init__(self, reactor, proc, name, fileno):
"""
Initialize, specifying a process to connect to.
"""
abstract.FileDescriptor.__init__(self, reactor)
fdesc.setNonBlocking(fileno)
self.proc = proc
self.name = name
self.fd = fileno
self.startReading()
def fileno(self):
"""
Return the fileno() of my process's stderr.
"""
return self.fd
def writeSomeData(self, data):
# the only time this is actually called is after .loseConnection Any
# actual write attempt would fail, so we must avoid that. This hack
# allows us to use .loseConnection on both readers and writers.
assert data == ""
return CONNECTION_LOST
def doRead(self):
"""
This is called when the pipe becomes readable.
"""
return fdesc.readFromFD(self.fd, self.dataReceived)
def dataReceived(self, data):
self.proc.childDataReceived(self.name, data)
def loseConnection(self):
if self.connected and not self.disconnecting:
self.disconnecting = 1
self.stopReading()
self.reactor.callLater(0, self.connectionLost,
failure.Failure(CONNECTION_DONE))
def connectionLost(self, reason):
"""
Close my end of the pipe, signal the Process (which signals the
ProcessProtocol).
"""
abstract.FileDescriptor.connectionLost(self, reason)
self.proc.childConnectionLost(self.name, reason)
class _BaseProcess(BaseProcess, object):
"""
Base class for Process and PTYProcess.
"""
status = None
pid = None
def reapProcess(self):
"""
Try to reap a process (without blocking) via waitpid.
This is called when sigchild is caught or a Process object loses its
"connection" (stdout is closed) This ought to result in reaping all
zombie processes, since it will be called twice as often as it needs
to be.
(Unfortunately, this is a slightly experimental approach, since
UNIX has no way to be really sure that your process is going to
go away w/o blocking. I don't want to block.)
"""
try:
try:
pid, status = os.waitpid(self.pid, os.WNOHANG)
except OSError, e:
if e.errno == errno.ECHILD:
# no child process
pid = None
else:
raise
except:
log.msg('Failed to reap %d:' % self.pid)
log.err()
pid = None
if pid:
self.processEnded(status)
unregisterReapProcessHandler(pid, self)
def _getReason(self, status):
exitCode = sig = None
if os.WIFEXITED(status):
exitCode = os.WEXITSTATUS(status)
else:
sig = os.WTERMSIG(status)
if exitCode or sig:
return error.ProcessTerminated(exitCode, sig, status)
return error.ProcessDone(status)
def signalProcess(self, signalID):
"""
Send the given signal C{signalID} to the process. It'll translate a
few signals ('HUP', 'STOP', 'INT', 'KILL', 'TERM') from a string
representation to its int value, otherwise it'll pass directly the
value provided
@type signalID: C{str} or C{int}
"""
if signalID in ('HUP', 'STOP', 'INT', 'KILL', 'TERM'):
signalID = getattr(signal, 'SIG%s' % (signalID,))
if self.pid is None:
raise ProcessExitedAlready()
os.kill(self.pid, signalID)
def _resetSignalDisposition(self):
# The Python interpreter ignores some signals, and our child
# process will inherit that behaviour. To have a child process
# that responds to signals normally, we need to reset our
# child process's signal handling (just) after we fork and
# before we execvpe.
for signalnum in range(1, signal.NSIG):
if signal.getsignal(signalnum) == signal.SIG_IGN:
# Reset signal handling to the default
signal.signal(signalnum, signal.SIG_DFL)
def _fork(self, path, uid, gid, executable, args, environment, **kwargs):
"""
Fork and then exec sub-process.
@param path: the path where to run the new process.
@type path: C{str}
@param uid: if defined, the uid used to run the new process.
@type uid: C{int}
@param gid: if defined, the gid used to run the new process.
@type gid: C{int}
@param executable: the executable to run in a new process.
@type executable: C{str}
@param args: arguments used to create the new process.
@type args: C{list}.
@param environment: environment used for the new process.
@type environment: C{dict}.
@param kwargs: keyword arguments to L{_setupChild} method.
"""
settingUID = (uid is not None) or (gid is not None)
if settingUID:
curegid = os.getegid()
currgid = os.getgid()
cureuid = os.geteuid()
curruid = os.getuid()
if uid is None:
uid = cureuid
if gid is None:
gid = curegid
# prepare to change UID in subprocess
os.setuid(0)
os.setgid(0)
collectorEnabled = gc.isenabled()
gc.disable()
try:
self.pid = os.fork()
except:
# Still in the parent process
if settingUID:
os.setregid(currgid, curegid)
os.setreuid(curruid, cureuid)
if collectorEnabled:
gc.enable()
raise
else:
if self.pid == 0: # pid is 0 in the child process
# do not put *ANY* code outside the try block. The child process
# must either exec or _exit. If it gets outside this block (due
# to an exception that is not handled here, but which might be
# handled higher up), there will be two copies of the parent
# running in parallel, doing all kinds of damage.
# After each change to this code, review it to make sure there
# are no exit paths.
try:
# Stop debugging. If I am, I don't care anymore.
sys.settrace(None)
self._setupChild(**kwargs)
self._execChild(path, settingUID, uid, gid,
executable, args, environment)
except:
# If there are errors, bail and try to write something
# descriptive to stderr.
# XXX: The parent's stderr isn't necessarily fd 2 anymore, or
# even still available
# XXXX: however even libc assumes write(2, err) is a useful
# thing to attempt
try:
stderr = os.fdopen(2, 'w')
stderr.write("Upon execvpe %s %s in environment %s\n:" %
(executable, str(args),
"id %s" % id(environment)))
traceback.print_exc(file=stderr)
stderr.flush()
for fd in range(3):
os.close(fd)
except:
pass # make *sure* the child terminates
# Did you read the comment about not adding code here?
os._exit(1)
# we are now in parent process
if settingUID:
os.setregid(currgid, curegid)
os.setreuid(curruid, cureuid)
if collectorEnabled:
gc.enable()
self.status = -1 # this records the exit status of the child
def _setupChild(self, *args, **kwargs):
"""
Setup the child process. Override in subclasses.
"""
raise NotImplementedError()
def _execChild(self, path, settingUID, uid, gid,
executable, args, environment):
"""
The exec() which is done in the forked child.
"""
if path:
os.chdir(path)
# set the UID before I actually exec the process
if settingUID:
switchUID(uid, gid)
os.execvpe(executable, args, environment)
def __repr__(self):
"""
String representation of a process.
"""
return "<%s pid=%s status=%s>" % (self.__class__.__name__,
self.pid, self.status)
class _FDDetector(object):
"""
This class contains the logic necessary to decide which of the available
system techniques should be used to detect the open file descriptors for
the current process. The chosen technique gets monkey-patched into the
_listOpenFDs method of this class so that the detection only needs to occur
once.
@ivars listdir: The implementation of listdir to use. This gets overwritten
by the test cases.
@ivars getpid: The implementation of getpid to use, returns the PID of the
running process.
@ivars openfile: The implementation of open() to use, by default the Python
builtin.
"""
# So that we can unit test this
listdir = os.listdir
getpid = os.getpid
openfile = open
def _listOpenFDs(self):
"""
Figure out which implementation to use, then run it.
"""
self._listOpenFDs = self._getImplementation()
return self._listOpenFDs()
def _getImplementation(self):
"""
Check if /dev/fd works, if so, use that. Otherwise, check if
/proc/%d/fd exists, if so use that.
Otherwise, ask resource.getrlimit, if that throws an exception, then
fallback to _fallbackFDImplementation.
"""
try:
self.listdir("/dev/fd")
if self._checkDevFDSanity(): # FreeBSD support :-)
return self._devFDImplementation
else:
return self._fallbackFDImplementation
except:
try:
self.listdir("/proc/%d/fd" % (self.getpid(),))
return self._procFDImplementation
except:
try:
self._resourceFDImplementation() # Imports resource
return self._resourceFDImplementation
except:
return self._fallbackFDImplementation
def _checkDevFDSanity(self):
"""
Returns true iff opening a file modifies the fds visible
in /dev/fd, as it should on a sane platform.
"""
start = self.listdir("/dev/fd")
fp = self.openfile("/dev/null", "r")
end = self.listdir("/dev/fd")
return start != end
def _devFDImplementation(self):
"""
Simple implementation for systems where /dev/fd actually works.
See: http://www.freebsd.org/cgi/man.cgi?fdescfs
"""
dname = "/dev/fd"
result = [int(fd) for fd in os.listdir(dname)]
return result
def _procFDImplementation(self):
"""
Simple implementation for systems where /proc/pid/fd exists (we assume
it works).
"""
dname = "/proc/%d/fd" % (os.getpid(),)
return [int(fd) for fd in os.listdir(dname)]
def _resourceFDImplementation(self):
"""
Fallback implementation where the resource module can inform us about
how many FDs we can expect.
Note that on OS-X we expect to be using the /dev/fd implementation.
"""
import resource
maxfds = resource.getrlimit(resource.RLIMIT_NOFILE)[1] + 1
# OS-X reports 9223372036854775808. That's a lot of fds
# to close
if maxfds > 1024:
maxfds = 1024
return xrange(maxfds)
def _fallbackFDImplementation(self):
"""
Fallback-fallback implementation where we just assume that we need to
close 256 FDs.
"""
maxfds = 256
return xrange(maxfds)
detector = _FDDetector()
def _listOpenFDs():
"""
Use the global detector object to figure out which FD implementation to
use.
"""
return detector._listOpenFDs()
class Process(_BaseProcess):
"""
An operating-system Process.
This represents an operating-system process with arbitrary input/output
pipes connected to it. Those pipes may represent standard input,
standard output, and standard error, or any other file descriptor.
On UNIX, this is implemented using fork(), exec(), pipe()
and fcntl(). These calls may not exist elsewhere so this
code is not cross-platform. (also, windows can only select
on sockets...)
"""
implements(IProcessTransport)
debug = False
debug_child = False
status = -1
pid = None
processWriterFactory = ProcessWriter
processReaderFactory = ProcessReader
def __init__(self,
reactor, executable, args, environment, path, proto,
uid=None, gid=None, childFDs=None):
"""
Spawn an operating-system process.
This is where the hard work of disconnecting all currently open
files / forking / executing the new process happens. (This is
executed automatically when a Process is instantiated.)
This will also run the subprocess as a given user ID and group ID, if
specified. (Implementation Note: this doesn't support all the arcane
nuances of setXXuid on UNIX: it will assume that either your effective
or real UID is 0.)
"""
if not proto:
assert 'r' not in childFDs.values()
assert 'w' not in childFDs.values()
_BaseProcess.__init__(self, proto)
self.pipes = {}
# keys are childFDs, we can sense them closing
# values are ProcessReader/ProcessWriters
helpers = {}
# keys are childFDs
# values are parentFDs
if childFDs is None:
childFDs = {0: "w", # we write to the child's stdin
1: "r", # we read from their stdout
2: "r", # and we read from their stderr
}
debug = self.debug
if debug: print "childFDs", childFDs
_openedPipes = []
def pipe():
r, w = os.pipe()
_openedPipes.extend([r, w])
return r, w
# fdmap.keys() are filenos of pipes that are used by the child.
fdmap = {} # maps childFD to parentFD
try:
for childFD, target in childFDs.items():
if debug: print "[%d]" % childFD, target
if target == "r":
# we need a pipe that the parent can read from
readFD, writeFD = pipe()
if debug: print "readFD=%d, writeFD=%d" % (readFD, writeFD)
fdmap[childFD] = writeFD # child writes to this
helpers[childFD] = readFD # parent reads from this
elif target == "w":
# we need a pipe that the parent can write to
readFD, writeFD = pipe()
if debug: print "readFD=%d, writeFD=%d" % (readFD, writeFD)
fdmap[childFD] = readFD # child reads from this
helpers[childFD] = writeFD # parent writes to this
else:
assert type(target) == int, '%r should be an int' % (target,)
fdmap[childFD] = target # parent ignores this
if debug: print "fdmap", fdmap
if debug: print "helpers", helpers
# the child only cares about fdmap.values()
self._fork(path, uid, gid, executable, args, environment, fdmap=fdmap)
except:
map(os.close, _openedPipes)
raise
# we are the parent process:
self.proto = proto
# arrange for the parent-side pipes to be read and written
for childFD, parentFD in helpers.items():
os.close(fdmap[childFD])
if childFDs[childFD] == "r":
reader = self.processReaderFactory(reactor, self, childFD,
parentFD)
self.pipes[childFD] = reader
if childFDs[childFD] == "w":
writer = self.processWriterFactory(reactor, self, childFD,
parentFD, forceReadHack=True)
self.pipes[childFD] = writer
try:
# the 'transport' is used for some compatibility methods
if self.proto is not None:
self.proto.makeConnection(self)
except:
log.err()
# The reactor might not be running yet. This might call back into
# processEnded synchronously, triggering an application-visible
# callback. That's probably not ideal. The replacement API for
# spawnProcess should improve upon this situation.
registerReapProcessHandler(self.pid, self)
def _setupChild(self, fdmap):
"""
fdmap[childFD] = parentFD
The child wants to end up with 'childFD' attached to what used to be
the parent's parentFD. As an example, a bash command run like
'command 2>&1' would correspond to an fdmap of {0:0, 1:1, 2:1}.
'command >foo.txt' would be {0:0, 1:os.open('foo.txt'), 2:2}.
This is accomplished in two steps::
1. close all file descriptors that aren't values of fdmap. This
means 0 .. maxfds (or just the open fds within that range, if
the platform supports '/proc/<pid>/fd').
2. for each childFD::
- if fdmap[childFD] == childFD, the descriptor is already in
place. Make sure the CLOEXEC flag is not set, then delete
the entry from fdmap.
- if childFD is in fdmap.values(), then the target descriptor
is busy. Use os.dup() to move it elsewhere, update all
fdmap[childFD] items that point to it, then close the
original. Then fall through to the next case.
- now fdmap[childFD] is not in fdmap.values(), and is free.
Use os.dup2() to move it to the right place, then close the
original.
"""
debug = self.debug_child
if debug:
errfd = sys.stderr
errfd.write("starting _setupChild\n")
destList = fdmap.values()
for fd in _listOpenFDs():
if fd in destList:
continue
if debug and fd == errfd.fileno():
continue
try:
os.close(fd)
except:
pass
# at this point, the only fds still open are the ones that need to
# be moved to their appropriate positions in the child (the targets
# of fdmap, i.e. fdmap.values() )
if debug: print >>errfd, "fdmap", fdmap
childlist = fdmap.keys()
childlist.sort()
for child in childlist:
target = fdmap[child]
if target == child:
# fd is already in place
if debug: print >>errfd, "%d already in place" % target
fdesc._unsetCloseOnExec(child)
else:
if child in fdmap.values():
# we can't replace child-fd yet, as some other mapping
# still needs the fd it wants to target. We must preserve
# that old fd by duping it to a new home.
newtarget = os.dup(child) # give it a safe home
if debug: print >>errfd, "os.dup(%d) -> %d" % (child,
newtarget)
os.close(child) # close the original
for c, p in fdmap.items():
if p == child:
fdmap[c] = newtarget # update all pointers
# now it should be available
if debug: print >>errfd, "os.dup2(%d,%d)" % (target, child)
os.dup2(target, child)
# At this point, the child has everything it needs. We want to close
# everything that isn't going to be used by the child, i.e.
# everything not in fdmap.keys(). The only remaining fds open are
# those in fdmap.values().
# Any given fd may appear in fdmap.values() multiple times, so we
# need to remove duplicates first.
old = []
for fd in fdmap.values():
if not fd in old:
if not fd in fdmap.keys():
old.append(fd)
if debug: print >>errfd, "old", old
for fd in old:
os.close(fd)
self._resetSignalDisposition()
def writeToChild(self, childFD, data):
self.pipes[childFD].write(data)
def closeChildFD(self, childFD):
# for writer pipes, loseConnection tries to write the remaining data
# out to the pipe before closing it
# if childFD is not in the list of pipes, assume that it is already
# closed
if childFD in self.pipes:
self.pipes[childFD].loseConnection()
def pauseProducing(self):
for p in self.pipes.itervalues():
if isinstance(p, ProcessReader):
p.stopReading()
def resumeProducing(self):
for p in self.pipes.itervalues():
if isinstance(p, ProcessReader):
p.startReading()
# compatibility
def closeStdin(self):
"""
Call this to close standard input on this process.
"""
self.closeChildFD(0)
def closeStdout(self):
self.closeChildFD(1)
def closeStderr(self):
self.closeChildFD(2)
def loseConnection(self):
self.closeStdin()
self.closeStderr()
self.closeStdout()
def write(self, data):
"""
Call this to write to standard input on this process.
NOTE: This will silently lose data if there is no standard input.
"""
if 0 in self.pipes:
self.pipes[0].write(data)
def registerProducer(self, producer, streaming):
"""
Call this to register producer for standard input.
If there is no standard input producer.stopProducing() will
be called immediately.
"""
if 0 in self.pipes:
self.pipes[0].registerProducer(producer, streaming)
else:
producer.stopProducing()
def unregisterProducer(self):
"""
Call this to unregister producer for standard input."""
if 0 in self.pipes:
self.pipes[0].unregisterProducer()
def writeSequence(self, seq):
"""
Call this to write to standard input on this process.
NOTE: This will silently lose data if there is no standard input.
"""
if 0 in self.pipes:
self.pipes[0].writeSequence(seq)
def childDataReceived(self, name, data):
self.proto.childDataReceived(name, data)
def childConnectionLost(self, childFD, reason):
# this is called when one of the helpers (ProcessReader or
# ProcessWriter) notices their pipe has been closed
os.close(self.pipes[childFD].fileno())
del self.pipes[childFD]
try:
self.proto.childConnectionLost(childFD)
except:
log.err()
self.maybeCallProcessEnded()
def maybeCallProcessEnded(self):
# we don't call ProcessProtocol.processEnded until:
# the child has terminated, AND
# all writers have indicated an error status, AND
# all readers have indicated EOF
# This insures that we've gathered all output from the process.
if self.pipes:
return
if not self.lostProcess:
self.reapProcess()
return
_BaseProcess.maybeCallProcessEnded(self)
class PTYProcess(abstract.FileDescriptor, _BaseProcess):
"""
An operating-system Process that uses PTY support.
"""
implements(IProcessTransport)
status = -1
pid = None
def __init__(self, reactor, executable, args, environment, path, proto,
uid=None, gid=None, usePTY=None):
"""
Spawn an operating-system process.
This is where the hard work of disconnecting all currently open
files / forking / executing the new process happens. (This is
executed automatically when a Process is instantiated.)
This will also run the subprocess as a given user ID and group ID, if
specified. (Implementation Note: this doesn't support all the arcane
nuances of setXXuid on UNIX: it will assume that either your effective
or real UID is 0.)
"""
if pty is None and not isinstance(usePTY, (tuple, list)):
# no pty module and we didn't get a pty to use
raise NotImplementedError(
"cannot use PTYProcess on platforms without the pty module.")
abstract.FileDescriptor.__init__(self, reactor)
_BaseProcess.__init__(self, proto)
if isinstance(usePTY, (tuple, list)):
masterfd, slavefd, ttyname = usePTY
else:
masterfd, slavefd = pty.openpty()
ttyname = os.ttyname(slavefd)
try:
self._fork(path, uid, gid, executable, args, environment,
masterfd=masterfd, slavefd=slavefd)
except:
if not isinstance(usePTY, (tuple, list)):
os.close(masterfd)
os.close(slavefd)
raise
# we are now in parent process:
os.close(slavefd)
fdesc.setNonBlocking(masterfd)
self.fd = masterfd
self.startReading()
self.connected = 1
self.status = -1
try:
self.proto.makeConnection(self)
except:
log.err()
registerReapProcessHandler(self.pid, self)
def _setupChild(self, masterfd, slavefd):
"""
Setup child process after fork() but before exec().
"""
os.close(masterfd)
if hasattr(termios, 'TIOCNOTTY'):
try:
fd = os.open("/dev/tty", os.O_RDWR | os.O_NOCTTY)
except OSError:
pass
else:
try:
fcntl.ioctl(fd, termios.TIOCNOTTY, '')
except:
pass
os.close(fd)
os.setsid()
if hasattr(termios, 'TIOCSCTTY'):
fcntl.ioctl(slavefd, termios.TIOCSCTTY, '')
for fd in range(3):
if fd != slavefd:
os.close(fd)
os.dup2(slavefd, 0) # stdin
os.dup2(slavefd, 1) # stdout
os.dup2(slavefd, 2) # stderr
for fd in _listOpenFDs():
if fd > 2:
try:
os.close(fd)
except:
pass
self._resetSignalDisposition()
# PTYs do not have stdin/stdout/stderr. They only have in and out, just
# like sockets. You cannot close one without closing off the entire PTY.
def closeStdin(self):
pass
def closeStdout(self):
pass
def closeStderr(self):
pass
def doRead(self):
"""
Called when my standard output stream is ready for reading.
"""
return fdesc.readFromFD(
self.fd,
lambda data: self.proto.childDataReceived(1, data))
def fileno(self):
"""
This returns the file number of standard output on this process.
"""
return self.fd
def maybeCallProcessEnded(self):
# two things must happen before we call the ProcessProtocol's
# processEnded method. 1: the child process must die and be reaped
# (which calls our own processEnded method). 2: the child must close
# their stdin/stdout/stderr fds, causing the pty to close, causing
# our connectionLost method to be called. #2 can also be triggered
# by calling .loseConnection().
if self.lostProcess == 2:
_BaseProcess.maybeCallProcessEnded(self)
def connectionLost(self, reason):
"""
I call this to clean up when one or all of my connections has died.
"""
abstract.FileDescriptor.connectionLost(self, reason)
os.close(self.fd)
self.lostProcess += 1
self.maybeCallProcessEnded()
def writeSomeData(self, data):
"""
Write some data to the open process.
"""
return fdesc.writeToFD(self.fd, data)
| agpl-3.0 |
elky/django | django/db/models/sql/datastructures.py | 30 | 5659 | """
Useful auxiliary data structures for query construction. Not useful outside
the SQL domain.
"""
# for backwards-compatibility in Django 1.11
from django.core.exceptions import EmptyResultSet # NOQA: F401
from django.db.models.sql.constants import INNER, LOUTER
class MultiJoin(Exception):
"""
Used by join construction code to indicate the point at which a
multi-valued join was attempted (if the caller wants to treat that
exceptionally).
"""
def __init__(self, names_pos, path_with_names):
self.level = names_pos
# The path travelled, this includes the path to the multijoin.
self.names_with_path = path_with_names
class Empty:
pass
class Join:
"""
Used by sql.Query and sql.SQLCompiler to generate JOIN clauses into the
FROM entry. For example, the SQL generated could be
LEFT OUTER JOIN "sometable" T1 ON ("othertable"."sometable_id" = "sometable"."id")
This class is primarily used in Query.alias_map. All entries in alias_map
must be Join compatible by providing the following attributes and methods:
- table_name (string)
- table_alias (possible alias for the table, can be None)
- join_type (can be None for those entries that aren't joined from
anything)
- parent_alias (which table is this join's parent, can be None similarly
to join_type)
- as_sql()
- relabeled_clone()
"""
def __init__(self, table_name, parent_alias, table_alias, join_type,
join_field, nullable):
# Join table
self.table_name = table_name
self.parent_alias = parent_alias
# Note: table_alias is not necessarily known at instantiation time.
self.table_alias = table_alias
# LOUTER or INNER
self.join_type = join_type
# A list of 2-tuples to use in the ON clause of the JOIN.
# Each 2-tuple will create one join condition in the ON clause.
self.join_cols = join_field.get_joining_columns()
# Along which field (or ForeignObjectRel in the reverse join case)
self.join_field = join_field
# Is this join nullabled?
self.nullable = nullable
def as_sql(self, compiler, connection):
"""
Generate the full
LEFT OUTER JOIN sometable ON sometable.somecol = othertable.othercol, params
clause for this join.
"""
join_conditions = []
params = []
qn = compiler.quote_name_unless_alias
qn2 = connection.ops.quote_name
# Add a join condition for each pair of joining columns.
for index, (lhs_col, rhs_col) in enumerate(self.join_cols):
join_conditions.append('%s.%s = %s.%s' % (
qn(self.parent_alias),
qn2(lhs_col),
qn(self.table_alias),
qn2(rhs_col),
))
# Add a single condition inside parentheses for whatever
# get_extra_restriction() returns.
extra_cond = self.join_field.get_extra_restriction(
compiler.query.where_class, self.table_alias, self.parent_alias)
if extra_cond:
extra_sql, extra_params = compiler.compile(extra_cond)
join_conditions.append('(%s)' % extra_sql)
params.extend(extra_params)
if not join_conditions:
# This might be a rel on the other end of an actual declared field.
declared_field = getattr(self.join_field, 'field', self.join_field)
raise ValueError(
"Join generated an empty ON clause. %s did not yield either "
"joining columns or extra restrictions." % declared_field.__class__
)
on_clause_sql = ' AND '.join(join_conditions)
alias_str = '' if self.table_alias == self.table_name else (' %s' % self.table_alias)
sql = '%s %s%s ON (%s)' % (self.join_type, qn(self.table_name), alias_str, on_clause_sql)
return sql, params
def relabeled_clone(self, change_map):
new_parent_alias = change_map.get(self.parent_alias, self.parent_alias)
new_table_alias = change_map.get(self.table_alias, self.table_alias)
return self.__class__(
self.table_name, new_parent_alias, new_table_alias, self.join_type,
self.join_field, self.nullable)
def __eq__(self, other):
if isinstance(other, self.__class__):
return (
self.table_name == other.table_name and
self.parent_alias == other.parent_alias and
self.join_field == other.join_field
)
return False
def demote(self):
new = self.relabeled_clone({})
new.join_type = INNER
return new
def promote(self):
new = self.relabeled_clone({})
new.join_type = LOUTER
return new
class BaseTable:
"""
The BaseTable class is used for base table references in FROM clause. For
example, the SQL "foo" in
SELECT * FROM "foo" WHERE somecond
could be generated by this class.
"""
join_type = None
parent_alias = None
def __init__(self, table_name, alias):
self.table_name = table_name
self.table_alias = alias
def as_sql(self, compiler, connection):
alias_str = '' if self.table_alias == self.table_name else (' %s' % self.table_alias)
base_sql = compiler.quote_name_unless_alias(self.table_name)
return base_sql + alias_str, []
def relabeled_clone(self, change_map):
return self.__class__(self.table_name, change_map.get(self.table_alias, self.table_alias))
| bsd-3-clause |
johnkeepmoving/oss-ftp | python27/win32/Lib/test/test_cfgparser.py | 23 | 27873 | import ConfigParser
import StringIO
import os
import unittest
import UserDict
from test import test_support
class SortedDict(UserDict.UserDict):
def items(self):
result = self.data.items()
result.sort()
return result
def keys(self):
result = self.data.keys()
result.sort()
return result
def values(self):
# XXX never used?
result = self.items()
return [i[1] for i in result]
def iteritems(self): return iter(self.items())
def iterkeys(self): return iter(self.keys())
__iter__ = iterkeys
def itervalues(self): return iter(self.values())
class TestCaseBase(unittest.TestCase):
allow_no_value = False
def newconfig(self, defaults=None):
if defaults is None:
self.cf = self.config_class(allow_no_value=self.allow_no_value)
else:
self.cf = self.config_class(defaults,
allow_no_value=self.allow_no_value)
return self.cf
def fromstring(self, string, defaults=None):
cf = self.newconfig(defaults)
sio = StringIO.StringIO(string)
cf.readfp(sio)
return cf
def test_basic(self):
config_string = (
"[Foo Bar]\n"
"foo=bar\n"
"[Spacey Bar]\n"
"foo = bar\n"
"[Commented Bar]\n"
"foo: bar ; comment\n"
"[Long Line]\n"
"foo: this line is much, much longer than my editor\n"
" likes it.\n"
"[Section\\with$weird%characters[\t]\n"
"[Internationalized Stuff]\n"
"foo[bg]: Bulgarian\n"
"foo=Default\n"
"foo[en]=English\n"
"foo[de]=Deutsch\n"
"[Spaces]\n"
"key with spaces : value\n"
"another with spaces = splat!\n"
)
if self.allow_no_value:
config_string += (
"[NoValue]\n"
"option-without-value\n"
)
cf = self.fromstring(config_string)
L = cf.sections()
L.sort()
E = [r'Commented Bar',
r'Foo Bar',
r'Internationalized Stuff',
r'Long Line',
r'Section\with$weird%characters[' '\t',
r'Spaces',
r'Spacey Bar',
]
if self.allow_no_value:
E.append(r'NoValue')
E.sort()
eq = self.assertEqual
eq(L, E)
# The use of spaces in the section names serves as a
# regression test for SourceForge bug #583248:
# http://www.python.org/sf/583248
eq(cf.get('Foo Bar', 'foo'), 'bar')
eq(cf.get('Spacey Bar', 'foo'), 'bar')
eq(cf.get('Commented Bar', 'foo'), 'bar')
eq(cf.get('Spaces', 'key with spaces'), 'value')
eq(cf.get('Spaces', 'another with spaces'), 'splat!')
if self.allow_no_value:
eq(cf.get('NoValue', 'option-without-value'), None)
self.assertNotIn('__name__', cf.options("Foo Bar"),
'__name__ "option" should not be exposed by the API!')
# Make sure the right things happen for remove_option();
# added to include check for SourceForge bug #123324:
self.assertTrue(cf.remove_option('Foo Bar', 'foo'),
"remove_option() failed to report existence of option")
self.assertFalse(cf.has_option('Foo Bar', 'foo'),
"remove_option() failed to remove option")
self.assertFalse(cf.remove_option('Foo Bar', 'foo'),
"remove_option() failed to report non-existence of option"
" that was removed")
self.assertRaises(ConfigParser.NoSectionError,
cf.remove_option, 'No Such Section', 'foo')
eq(cf.get('Long Line', 'foo'),
'this line is much, much longer than my editor\nlikes it.')
def test_case_sensitivity(self):
cf = self.newconfig()
cf.add_section("A")
cf.add_section("a")
L = cf.sections()
L.sort()
eq = self.assertEqual
eq(L, ["A", "a"])
cf.set("a", "B", "value")
eq(cf.options("a"), ["b"])
eq(cf.get("a", "b"), "value",
"could not locate option, expecting case-insensitive option names")
self.assertTrue(cf.has_option("a", "b"))
cf.set("A", "A-B", "A-B value")
for opt in ("a-b", "A-b", "a-B", "A-B"):
self.assertTrue(
cf.has_option("A", opt),
"has_option() returned false for option which should exist")
eq(cf.options("A"), ["a-b"])
eq(cf.options("a"), ["b"])
cf.remove_option("a", "B")
eq(cf.options("a"), [])
# SF bug #432369:
cf = self.fromstring(
"[MySection]\nOption: first line\n\tsecond line\n")
eq(cf.options("MySection"), ["option"])
eq(cf.get("MySection", "Option"), "first line\nsecond line")
# SF bug #561822:
cf = self.fromstring("[section]\nnekey=nevalue\n",
defaults={"key":"value"})
self.assertTrue(cf.has_option("section", "Key"))
def test_default_case_sensitivity(self):
cf = self.newconfig({"foo": "Bar"})
self.assertEqual(
cf.get("DEFAULT", "Foo"), "Bar",
"could not locate option, expecting case-insensitive option names")
cf = self.newconfig({"Foo": "Bar"})
self.assertEqual(
cf.get("DEFAULT", "Foo"), "Bar",
"could not locate option, expecting case-insensitive defaults")
def test_parse_errors(self):
self.newconfig()
self.parse_error(ConfigParser.ParsingError,
"[Foo]\n extra-spaces: splat\n")
self.parse_error(ConfigParser.ParsingError,
"[Foo]\n extra-spaces= splat\n")
self.parse_error(ConfigParser.ParsingError,
"[Foo]\n:value-without-option-name\n")
self.parse_error(ConfigParser.ParsingError,
"[Foo]\n=value-without-option-name\n")
self.parse_error(ConfigParser.MissingSectionHeaderError,
"No Section!\n")
def parse_error(self, exc, src):
sio = StringIO.StringIO(src)
self.assertRaises(exc, self.cf.readfp, sio)
def test_query_errors(self):
cf = self.newconfig()
self.assertEqual(cf.sections(), [],
"new ConfigParser should have no defined sections")
self.assertFalse(cf.has_section("Foo"),
"new ConfigParser should have no acknowledged "
"sections")
self.assertRaises(ConfigParser.NoSectionError,
cf.options, "Foo")
self.assertRaises(ConfigParser.NoSectionError,
cf.set, "foo", "bar", "value")
self.get_error(ConfigParser.NoSectionError, "foo", "bar")
cf.add_section("foo")
self.get_error(ConfigParser.NoOptionError, "foo", "bar")
def get_error(self, exc, section, option):
try:
self.cf.get(section, option)
except exc, e:
return e
else:
self.fail("expected exception type %s.%s"
% (exc.__module__, exc.__name__))
def test_boolean(self):
cf = self.fromstring(
"[BOOLTEST]\n"
"T1=1\n"
"T2=TRUE\n"
"T3=True\n"
"T4=oN\n"
"T5=yes\n"
"F1=0\n"
"F2=FALSE\n"
"F3=False\n"
"F4=oFF\n"
"F5=nO\n"
"E1=2\n"
"E2=foo\n"
"E3=-1\n"
"E4=0.1\n"
"E5=FALSE AND MORE"
)
for x in range(1, 5):
self.assertTrue(cf.getboolean('BOOLTEST', 't%d' % x))
self.assertFalse(cf.getboolean('BOOLTEST', 'f%d' % x))
self.assertRaises(ValueError,
cf.getboolean, 'BOOLTEST', 'e%d' % x)
def test_weird_errors(self):
cf = self.newconfig()
cf.add_section("Foo")
self.assertRaises(ConfigParser.DuplicateSectionError,
cf.add_section, "Foo")
def test_write(self):
config_string = (
"[Long Line]\n"
"foo: this line is much, much longer than my editor\n"
" likes it.\n"
"[DEFAULT]\n"
"foo: another very\n"
" long line\n"
)
if self.allow_no_value:
config_string += (
"[Valueless]\n"
"option-without-value\n"
)
cf = self.fromstring(config_string)
output = StringIO.StringIO()
cf.write(output)
expect_string = (
"[DEFAULT]\n"
"foo = another very\n"
"\tlong line\n"
"\n"
"[Long Line]\n"
"foo = this line is much, much longer than my editor\n"
"\tlikes it.\n"
"\n"
)
if self.allow_no_value:
expect_string += (
"[Valueless]\n"
"option-without-value\n"
"\n"
)
self.assertEqual(output.getvalue(), expect_string)
def test_set_string_types(self):
cf = self.fromstring("[sect]\n"
"option1=foo\n")
# Check that we don't get an exception when setting values in
# an existing section using strings:
class mystr(str):
pass
cf.set("sect", "option1", "splat")
cf.set("sect", "option1", mystr("splat"))
cf.set("sect", "option2", "splat")
cf.set("sect", "option2", mystr("splat"))
def test_set_unicode(self):
try:
unicode
except NameError:
self.skipTest('no unicode support')
cf = self.fromstring("[sect]\n"
"option1=foo\n")
cf.set("sect", "option1", unicode("splat"))
cf.set("sect", "option2", unicode("splat"))
def test_read_returns_file_list(self):
file1 = test_support.findfile("cfgparser.1")
# check when we pass a mix of readable and non-readable files:
cf = self.newconfig()
parsed_files = cf.read([file1, "nonexistent-file"])
self.assertEqual(parsed_files, [file1])
self.assertEqual(cf.get("Foo Bar", "foo"), "newbar")
# check when we pass only a filename:
cf = self.newconfig()
parsed_files = cf.read(file1)
self.assertEqual(parsed_files, [file1])
self.assertEqual(cf.get("Foo Bar", "foo"), "newbar")
# check when we pass only missing files:
cf = self.newconfig()
parsed_files = cf.read(["nonexistent-file"])
self.assertEqual(parsed_files, [])
# check when we pass no files:
cf = self.newconfig()
parsed_files = cf.read([])
self.assertEqual(parsed_files, [])
# shared by subclasses
def get_interpolation_config(self):
return self.fromstring(
"[Foo]\n"
"bar=something %(with1)s interpolation (1 step)\n"
"bar9=something %(with9)s lots of interpolation (9 steps)\n"
"bar10=something %(with10)s lots of interpolation (10 steps)\n"
"bar11=something %(with11)s lots of interpolation (11 steps)\n"
"with11=%(with10)s\n"
"with10=%(with9)s\n"
"with9=%(with8)s\n"
"with8=%(With7)s\n"
"with7=%(WITH6)s\n"
"with6=%(with5)s\n"
"With5=%(with4)s\n"
"WITH4=%(with3)s\n"
"with3=%(with2)s\n"
"with2=%(with1)s\n"
"with1=with\n"
"\n"
"[Mutual Recursion]\n"
"foo=%(bar)s\n"
"bar=%(foo)s\n"
"\n"
"[Interpolation Error]\n"
"name=%(reference)s\n",
# no definition for 'reference'
defaults={"getname": "%(__name__)s"})
def check_items_config(self, expected):
cf = self.fromstring(
"[section]\n"
"name = value\n"
"key: |%(name)s| \n"
"getdefault: |%(default)s|\n"
"getname: |%(__name__)s|",
defaults={"default": "<default>"})
L = list(cf.items("section"))
L.sort()
self.assertEqual(L, expected)
class ConfigParserTestCase(TestCaseBase):
config_class = ConfigParser.ConfigParser
allow_no_value = True
def test_interpolation(self):
rawval = {
ConfigParser.ConfigParser: ("something %(with11)s "
"lots of interpolation (11 steps)"),
ConfigParser.SafeConfigParser: "%(with1)s",
}
cf = self.get_interpolation_config()
eq = self.assertEqual
eq(cf.get("Foo", "getname"), "Foo")
eq(cf.get("Foo", "bar"), "something with interpolation (1 step)")
eq(cf.get("Foo", "bar9"),
"something with lots of interpolation (9 steps)")
eq(cf.get("Foo", "bar10"),
"something with lots of interpolation (10 steps)")
self.get_error(ConfigParser.InterpolationDepthError, "Foo", "bar11")
def test_interpolation_missing_value(self):
self.get_interpolation_config()
e = self.get_error(ConfigParser.InterpolationError,
"Interpolation Error", "name")
self.assertEqual(e.reference, "reference")
self.assertEqual(e.section, "Interpolation Error")
self.assertEqual(e.option, "name")
def test_items(self):
self.check_items_config([('default', '<default>'),
('getdefault', '|<default>|'),
('getname', '|section|'),
('key', '|value|'),
('name', 'value')])
def test_set_nonstring_types(self):
cf = self.newconfig()
cf.add_section('non-string')
cf.set('non-string', 'int', 1)
cf.set('non-string', 'list', [0, 1, 1, 2, 3, 5, 8, 13, '%('])
cf.set('non-string', 'dict', {'pi': 3.14159, '%(': 1,
'%(list)': '%(list)'})
cf.set('non-string', 'string_with_interpolation', '%(list)s')
cf.set('non-string', 'no-value')
self.assertEqual(cf.get('non-string', 'int', raw=True), 1)
self.assertRaises(TypeError, cf.get, 'non-string', 'int')
self.assertEqual(cf.get('non-string', 'list', raw=True),
[0, 1, 1, 2, 3, 5, 8, 13, '%('])
self.assertRaises(TypeError, cf.get, 'non-string', 'list')
self.assertEqual(cf.get('non-string', 'dict', raw=True),
{'pi': 3.14159, '%(': 1, '%(list)': '%(list)'})
self.assertRaises(TypeError, cf.get, 'non-string', 'dict')
self.assertEqual(cf.get('non-string', 'string_with_interpolation',
raw=True), '%(list)s')
self.assertRaises(ValueError, cf.get, 'non-string',
'string_with_interpolation', raw=False)
self.assertEqual(cf.get('non-string', 'no-value'), None)
class MultilineValuesTestCase(TestCaseBase):
config_class = ConfigParser.ConfigParser
wonderful_spam = ("I'm having spam spam spam spam "
"spam spam spam beaked beans spam "
"spam spam and spam!").replace(' ', '\t\n')
def setUp(self):
cf = self.newconfig()
for i in range(100):
s = 'section{}'.format(i)
cf.add_section(s)
for j in range(10):
cf.set(s, 'lovely_spam{}'.format(j), self.wonderful_spam)
with open(test_support.TESTFN, 'w') as f:
cf.write(f)
def tearDown(self):
os.unlink(test_support.TESTFN)
def test_dominating_multiline_values(self):
# we're reading from file because this is where the code changed
# during performance updates in Python 3.2
cf_from_file = self.newconfig()
with open(test_support.TESTFN) as f:
cf_from_file.readfp(f)
self.assertEqual(cf_from_file.get('section8', 'lovely_spam4'),
self.wonderful_spam.replace('\t\n', '\n'))
class RawConfigParserTestCase(TestCaseBase):
config_class = ConfigParser.RawConfigParser
def test_interpolation(self):
cf = self.get_interpolation_config()
eq = self.assertEqual
eq(cf.get("Foo", "getname"), "%(__name__)s")
eq(cf.get("Foo", "bar"),
"something %(with1)s interpolation (1 step)")
eq(cf.get("Foo", "bar9"),
"something %(with9)s lots of interpolation (9 steps)")
eq(cf.get("Foo", "bar10"),
"something %(with10)s lots of interpolation (10 steps)")
eq(cf.get("Foo", "bar11"),
"something %(with11)s lots of interpolation (11 steps)")
def test_items(self):
self.check_items_config([('default', '<default>'),
('getdefault', '|%(default)s|'),
('getname', '|%(__name__)s|'),
('key', '|%(name)s|'),
('name', 'value')])
def test_set_nonstring_types(self):
cf = self.newconfig()
cf.add_section('non-string')
cf.set('non-string', 'int', 1)
cf.set('non-string', 'list', [0, 1, 1, 2, 3, 5, 8, 13])
cf.set('non-string', 'dict', {'pi': 3.14159})
self.assertEqual(cf.get('non-string', 'int'), 1)
self.assertEqual(cf.get('non-string', 'list'),
[0, 1, 1, 2, 3, 5, 8, 13])
self.assertEqual(cf.get('non-string', 'dict'), {'pi': 3.14159})
class SafeConfigParserTestCase(ConfigParserTestCase):
config_class = ConfigParser.SafeConfigParser
def test_safe_interpolation(self):
# See http://www.python.org/sf/511737
cf = self.fromstring("[section]\n"
"option1=xxx\n"
"option2=%(option1)s/xxx\n"
"ok=%(option1)s/%%s\n"
"not_ok=%(option2)s/%%s")
self.assertEqual(cf.get("section", "ok"), "xxx/%s")
self.assertEqual(cf.get("section", "not_ok"), "xxx/xxx/%s")
def test_set_malformatted_interpolation(self):
cf = self.fromstring("[sect]\n"
"option1=foo\n")
self.assertEqual(cf.get('sect', "option1"), "foo")
self.assertRaises(ValueError, cf.set, "sect", "option1", "%foo")
self.assertRaises(ValueError, cf.set, "sect", "option1", "foo%")
self.assertRaises(ValueError, cf.set, "sect", "option1", "f%oo")
self.assertEqual(cf.get('sect', "option1"), "foo")
# bug #5741: double percents are *not* malformed
cf.set("sect", "option2", "foo%%bar")
self.assertEqual(cf.get("sect", "option2"), "foo%bar")
def test_set_nonstring_types(self):
cf = self.fromstring("[sect]\n"
"option1=foo\n")
# Check that we get a TypeError when setting non-string values
# in an existing section:
self.assertRaises(TypeError, cf.set, "sect", "option1", 1)
self.assertRaises(TypeError, cf.set, "sect", "option1", 1.0)
self.assertRaises(TypeError, cf.set, "sect", "option1", object())
self.assertRaises(TypeError, cf.set, "sect", "option2", 1)
self.assertRaises(TypeError, cf.set, "sect", "option2", 1.0)
self.assertRaises(TypeError, cf.set, "sect", "option2", object())
def test_add_section_default_1(self):
cf = self.newconfig()
self.assertRaises(ValueError, cf.add_section, "default")
def test_add_section_default_2(self):
cf = self.newconfig()
self.assertRaises(ValueError, cf.add_section, "DEFAULT")
class SafeConfigParserTestCaseNoValue(SafeConfigParserTestCase):
allow_no_value = True
class TestChainMap(unittest.TestCase):
def test_issue_12717(self):
d1 = dict(red=1, green=2)
d2 = dict(green=3, blue=4)
dcomb = d2.copy()
dcomb.update(d1)
cm = ConfigParser._Chainmap(d1, d2)
self.assertIsInstance(cm.keys(), list)
self.assertEqual(set(cm.keys()), set(dcomb.keys())) # keys()
self.assertEqual(set(cm.values()), set(dcomb.values())) # values()
self.assertEqual(set(cm.items()), set(dcomb.items())) # items()
self.assertEqual(set(cm), set(dcomb)) # __iter__ ()
self.assertEqual(cm, dcomb) # __eq__()
self.assertEqual([cm[k] for k in dcomb], dcomb.values()) # __getitem__()
klist = 'red green blue black brown'.split()
self.assertEqual([cm.get(k, 10) for k in klist],
[dcomb.get(k, 10) for k in klist]) # get()
self.assertEqual([k in cm for k in klist],
[k in dcomb for k in klist]) # __contains__()
with test_support.check_py3k_warnings():
self.assertEqual([cm.has_key(k) for k in klist],
[dcomb.has_key(k) for k in klist]) # has_key()
class Issue7005TestCase(unittest.TestCase):
"""Test output when None is set() as a value and allow_no_value == False.
http://bugs.python.org/issue7005
"""
expected_output = "[section]\noption = None\n\n"
def prepare(self, config_class):
# This is the default, but that's the point.
cp = config_class(allow_no_value=False)
cp.add_section("section")
cp.set("section", "option", None)
sio = StringIO.StringIO()
cp.write(sio)
return sio.getvalue()
def test_none_as_value_stringified(self):
output = self.prepare(ConfigParser.ConfigParser)
self.assertEqual(output, self.expected_output)
def test_none_as_value_stringified_raw(self):
output = self.prepare(ConfigParser.RawConfigParser)
self.assertEqual(output, self.expected_output)
class SortedTestCase(RawConfigParserTestCase):
def newconfig(self, defaults=None):
self.cf = self.config_class(defaults=defaults, dict_type=SortedDict)
return self.cf
def test_sorted(self):
self.fromstring("[b]\n"
"o4=1\n"
"o3=2\n"
"o2=3\n"
"o1=4\n"
"[a]\n"
"k=v\n")
output = StringIO.StringIO()
self.cf.write(output)
self.assertEqual(output.getvalue(),
"[a]\n"
"k = v\n\n"
"[b]\n"
"o1 = 4\n"
"o2 = 3\n"
"o3 = 2\n"
"o4 = 1\n\n")
class ExceptionPicklingTestCase(unittest.TestCase):
"""Tests for issue #13760: ConfigParser exceptions are not picklable."""
def test_error(self):
import pickle
e1 = ConfigParser.Error('value')
pickled = pickle.dumps(e1)
e2 = pickle.loads(pickled)
self.assertEqual(e1.message, e2.message)
self.assertEqual(repr(e1), repr(e2))
def test_nosectionerror(self):
import pickle
e1 = ConfigParser.NoSectionError('section')
pickled = pickle.dumps(e1)
e2 = pickle.loads(pickled)
self.assertEqual(e1.message, e2.message)
self.assertEqual(e1.args, e2.args)
self.assertEqual(e1.section, e2.section)
self.assertEqual(repr(e1), repr(e2))
def test_nooptionerror(self):
import pickle
e1 = ConfigParser.NoOptionError('option', 'section')
pickled = pickle.dumps(e1)
e2 = pickle.loads(pickled)
self.assertEqual(e1.message, e2.message)
self.assertEqual(e1.args, e2.args)
self.assertEqual(e1.section, e2.section)
self.assertEqual(e1.option, e2.option)
self.assertEqual(repr(e1), repr(e2))
def test_duplicatesectionerror(self):
import pickle
e1 = ConfigParser.DuplicateSectionError('section')
pickled = pickle.dumps(e1)
e2 = pickle.loads(pickled)
self.assertEqual(e1.message, e2.message)
self.assertEqual(e1.args, e2.args)
self.assertEqual(e1.section, e2.section)
self.assertEqual(repr(e1), repr(e2))
def test_interpolationerror(self):
import pickle
e1 = ConfigParser.InterpolationError('option', 'section', 'msg')
pickled = pickle.dumps(e1)
e2 = pickle.loads(pickled)
self.assertEqual(e1.message, e2.message)
self.assertEqual(e1.args, e2.args)
self.assertEqual(e1.section, e2.section)
self.assertEqual(e1.option, e2.option)
self.assertEqual(repr(e1), repr(e2))
def test_interpolationmissingoptionerror(self):
import pickle
e1 = ConfigParser.InterpolationMissingOptionError('option', 'section',
'rawval', 'reference')
pickled = pickle.dumps(e1)
e2 = pickle.loads(pickled)
self.assertEqual(e1.message, e2.message)
self.assertEqual(e1.args, e2.args)
self.assertEqual(e1.section, e2.section)
self.assertEqual(e1.option, e2.option)
self.assertEqual(e1.reference, e2.reference)
self.assertEqual(repr(e1), repr(e2))
def test_interpolationsyntaxerror(self):
import pickle
e1 = ConfigParser.InterpolationSyntaxError('option', 'section', 'msg')
pickled = pickle.dumps(e1)
e2 = pickle.loads(pickled)
self.assertEqual(e1.message, e2.message)
self.assertEqual(e1.args, e2.args)
self.assertEqual(e1.section, e2.section)
self.assertEqual(e1.option, e2.option)
self.assertEqual(repr(e1), repr(e2))
def test_interpolationdeptherror(self):
import pickle
e1 = ConfigParser.InterpolationDepthError('option', 'section',
'rawval')
pickled = pickle.dumps(e1)
e2 = pickle.loads(pickled)
self.assertEqual(e1.message, e2.message)
self.assertEqual(e1.args, e2.args)
self.assertEqual(e1.section, e2.section)
self.assertEqual(e1.option, e2.option)
self.assertEqual(repr(e1), repr(e2))
def test_parsingerror(self):
import pickle
e1 = ConfigParser.ParsingError('source')
e1.append(1, 'line1')
e1.append(2, 'line2')
e1.append(3, 'line3')
pickled = pickle.dumps(e1)
e2 = pickle.loads(pickled)
self.assertEqual(e1.message, e2.message)
self.assertEqual(e1.args, e2.args)
self.assertEqual(e1.filename, e2.filename)
self.assertEqual(e1.errors, e2.errors)
self.assertEqual(repr(e1), repr(e2))
def test_missingsectionheadererror(self):
import pickle
e1 = ConfigParser.MissingSectionHeaderError('filename', 123, 'line')
pickled = pickle.dumps(e1)
e2 = pickle.loads(pickled)
self.assertEqual(e1.message, e2.message)
self.assertEqual(e1.args, e2.args)
self.assertEqual(e1.line, e2.line)
self.assertEqual(e1.filename, e2.filename)
self.assertEqual(e1.lineno, e2.lineno)
self.assertEqual(repr(e1), repr(e2))
def test_main():
test_support.run_unittest(
ConfigParserTestCase,
MultilineValuesTestCase,
RawConfigParserTestCase,
SafeConfigParserTestCase,
SafeConfigParserTestCaseNoValue,
SortedTestCase,
Issue7005TestCase,
TestChainMap,
ExceptionPicklingTestCase,
)
if __name__ == "__main__":
test_main()
| mit |
RackSec/ansible | lib/ansible/modules/cloud/misc/proxmox_kvm.py | 41 | 47770 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Abdoul Bah (@helldorado) <bahabdoul at gmail.com>
"""
Ansible module to manage Qemu(KVM) instance in Proxmox VE cluster.
This module is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this software. If not, see <http://www.gnu.org/licenses/>.
"""
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: proxmox_kvm
short_description: Management of Qemu(KVM) Virtual Machines in Proxmox VE cluster.
description:
- Allows you to create/delete/stop Qemu(KVM) Virtual Machines in Proxmox VE cluster.
version_added: "2.3"
author: "Abdoul Bah (@helldorado) <bahabdoul at gmail.com>"
options:
acpi:
description:
- Specify if ACPI should be enables/disabled.
required: false
default: "yes"
choices: [ "yes", "no" ]
agent:
description:
- Specify if the QEMU GuestAgent should be enabled/disabled.
required: false
default: null
choices: [ "yes", "no" ]
args:
description:
- Pass arbitrary arguments to kvm.
- This option is for experts only!
default: "-serial unix:/var/run/qemu-server/VMID.serial,server,nowait"
required: false
api_host:
description:
- Specify the target host of the Proxmox VE cluster.
required: true
api_user:
description:
- Specify the user to authenticate with.
required: true
api_password:
description:
- Specify the password to authenticate with.
- You can use C(PROXMOX_PASSWORD) environment variable.
default: null
required: false
autostart:
description:
- Specify, if the VM should be automatically restarted after crash (currently ignored in PVE API).
required: false
default: "no"
choices: [ "yes", "no" ]
balloon:
description:
- Specify the amount of RAM for the VM in MB.
- Using zero disables the balloon driver.
required: false
default: 0
bios:
description:
- Specify the BIOS implementation.
choices: ['seabios', 'ovmf']
required: false
default: null
boot:
description:
- Specify the boot order -> boot on floppy C(a), hard disk C(c), CD-ROM C(d), or network C(n).
- You can combine to set order.
required: false
default: cnd
bootdisk:
description:
- Enable booting from specified disk. C((ide|sata|scsi|virtio)\d+)
required: false
default: null
clone:
description:
- Name of VM to be cloned. If C(vmid) is setted, C(clone) can take arbitrary value but required for intiating the clone.
required: false
default: null
cores:
description:
- Specify number of cores per socket.
required: false
default: 1
cpu:
description:
- Specify emulated CPU type.
required: false
default: kvm64
cpulimit:
description:
- Specify if CPU usage will be limited. Value 0 indicates no CPU limit.
- If the computer has 2 CPUs, it has total of '2' CPU time
required: false
default: null
cpuunits:
description:
- Specify CPU weight for a VM.
- You can disable fair-scheduler configuration by setting this to 0
default: 1000
required: false
delete:
description:
- Specify a list of settings you want to delete.
required: false
default: null
description:
description:
- Specify the description for the VM. Only used on the configuration web interface.
- This is saved as comment inside the configuration file.
required: false
default: null
digest:
description:
- Specify if to prevent changes if current configuration file has different SHA1 digest.
- This can be used to prevent concurrent modifications.
required: false
default: null
force:
description:
- Allow to force stop VM.
- Can be used only with states C(stopped), C(restarted).
default: null
choices: [ "yes", "no" ]
required: false
format:
description:
- Target drive’s backing file’s data format.
- Used only with clone
default: qcow2
choices: [ "cloop", "cow", "qcow", "qcow2", "qed", "raw", "vmdk" ]
required: false
freeze:
description:
- Specify if PVE should freeze CPU at startup (use 'c' monitor command to start execution).
required: false
default: null
choices: [ "yes", "no" ]
full:
description:
- Create a full copy of all disk. This is always done when you clone a normal VM.
- For VM templates, we try to create a linked clone by default.
- Used only with clone
default: yes
choices: [ "yes", "no"]
required: false
hostpci:
description:
- Specify a hash/dictionary of map host pci devices into guest. C(hostpci='{"key":"value", "key":"value"}').
- Keys allowed are - C(hostpci[n]) where 0 ≤ n ≤ N.
- Values allowed are - C("host="HOSTPCIID[;HOSTPCIID2...]",pcie="1|0",rombar="1|0",x-vga="1|0"").
- The C(host) parameter is Host PCI device pass through. HOSTPCIID syntax is C(bus:dev.func) (hexadecimal numbers).
- C(pcie=boolean) I(default=0) Choose the PCI-express bus (needs the q35 machine model).
- C(rombar=boolean) I(default=1) Specify whether or not the device’s ROM will be visible in the guest’s memory map.
- C(x-vga=boolean) I(default=0) Enable vfio-vga device support.
- /!\ This option allows direct access to host hardware. So it is no longer possible to migrate such machines - use with special care.
required: false
default: null
hotplug:
description:
- Selectively enable hotplug features.
- This is a comma separated list of hotplug features C('network', 'disk', 'cpu', 'memory' and 'usb').
- Value 0 disables hotplug completely and value 1 is an alias for the default C('network,disk,usb').
required: false
default: null
hugepages:
description:
- Enable/disable hugepages memory.
choices: ['any', '2', '1024']
required: false
default: null
ide:
description:
- A hash/dictionary of volume used as IDE hard disk or CD-ROM. C(ide='{"key":"value", "key":"value"}').
- Keys allowed are - C(ide[n]) where 0 ≤ n ≤ 3.
- Values allowed are - C("storage:size,format=value").
- C(storage) is the storage identifier where to create the disk.
- C(size) is the size of the disk in GB.
- C(format) is the drive’s backing file’s data format. C(qcow2|raw|subvol).
required: false
default: null
keyboard:
description:
- Sets the keyboard layout for VNC server.
required: false
default: null
kvm:
description:
- Enable/disable KVM hardware virtualization.
required: false
default: "yes"
choices: [ "yes", "no" ]
localtime:
description:
- Sets the real time clock to local time.
- This is enabled by default if ostype indicates a Microsoft OS.
required: false
default: null
choices: [ "yes", "no" ]
lock:
description:
- Lock/unlock the VM.
choices: ['migrate', 'backup', 'snapshot', 'rollback']
required: false
default: null
machine:
description:
- Specifies the Qemu machine type.
- type => C((pc|pc(-i440fx)?-\d+\.\d+(\.pxe)?|q35|pc-q35-\d+\.\d+(\.pxe)?))
required: false
default: null
memory:
description:
- Memory size in MB for instance.
required: false
default: 512
migrate_downtime:
description:
- Sets maximum tolerated downtime (in seconds) for migrations.
required: false
default: null
migrate_speed:
description:
- Sets maximum speed (in MB/s) for migrations.
- A value of 0 is no limit.
required: false
default: null
name:
description:
- Specifies the VM name. Only used on the configuration web interface.
- Required only for C(state=present).
default: null
required: false
net:
description:
- A hash/dictionary of network interfaces for the VM. C(net='{"key":"value", "key":"value"}').
- Keys allowed are - C(net[n]) where 0 ≤ n ≤ N.
- Values allowed are - C("model="XX:XX:XX:XX:XX:XX",brigde="value",rate="value",tag="value",firewall="1|0",trunks="vlanid"").
- Model is one of C(e1000 e1000-82540em e1000-82544gc e1000-82545em i82551 i82557b i82559er ne2k_isa ne2k_pci pcnet rtl8139 virtio vmxnet3).
- C(XX:XX:XX:XX:XX:XX) should be an unique MAC address. This is automatically generated if not specified.
- The C(bridge) parameter can be used to automatically add the interface to a bridge device. The Proxmox VE standard bridge is called 'vmbr0'.
- Option C(rate) is used to limit traffic bandwidth from and to this interface. It is specified as floating point number, unit is 'Megabytes per second'.
- If you specify no bridge, we create a kvm 'user' (NATed) network device, which provides DHCP and DNS services.
default: null
required: false
newid:
description:
- VMID for the clone. Used only with clone.
- If newid is not set, the next available VM ID will be fetched from ProxmoxAPI.
default: null
required: false
node:
description:
- Proxmox VE node, where the new VM will be created.
- Only required for C(state=present).
- For other states, it will be autodiscovered.
default: null
required: false
numa:
description:
- A hash/dictionaries of NUMA topology. C(numa='{"key":"value", "key":"value"}').
- Keys allowed are - C(numa[n]) where 0 ≤ n ≤ N.
- Values allowed are - C("cpu="<id[-id];...>",hostnodes="<id[-id];...>",memory="number",policy="(bind|interleave|preferred)"").
- C(cpus) CPUs accessing this NUMA node.
- C(hostnodes) Host NUMA nodes to use.
- C(memory) Amount of memory this NUMA node provides.
- C(policy) NUMA allocation policy.
default: null
required: false
onboot:
description:
- Specifies whether a VM will be started during system bootup.
default: "yes"
choices: [ "yes", "no" ]
required: false
ostype:
description:
- Specifies guest operating system. This is used to enable special optimization/features for specific operating systems.
- The l26 is Linux 2.6/3.X Kernel.
choices: ['other', 'wxp', 'w2k', 'w2k3', 'w2k8', 'wvista', 'win7', 'win8', 'l24', 'l26', 'solaris']
default: l26
required: false
parallel:
description:
- A hash/dictionary of map host parallel devices. C(parallel='{"key":"value", "key":"value"}').
- Keys allowed are - (parallel[n]) where 0 ≤ n ≤ 2.
- Values allowed are - C("/dev/parport\d+|/dev/usb/lp\d+").
default: null
required: false
pool:
description:
- Add the new VM to the specified pool.
default: null
required: false
protection:
description:
- Enable/disable the protection flag of the VM. This will enable/disable the remove VM and remove disk operations.
default: null
choices: [ "yes", "no" ]
required: false
reboot:
description:
- Allow reboot. If set to yes, the VM exit on reboot.
default: null
choices: [ "yes", "no" ]
required: false
revert:
description:
- Revert a pending change.
default: null
required: false
sata:
description:
- A hash/dictionary of volume used as sata hard disk or CD-ROM. C(sata='{"key":"value", "key":"value"}').
- Keys allowed are - C(sata[n]) where 0 ≤ n ≤ 5.
- Values allowed are - C("storage:size,format=value").
- C(storage) is the storage identifier where to create the disk.
- C(size) is the size of the disk in GB.
- C(format) is the drive’s backing file’s data format. C(qcow2|raw|subvol).
default: null
required: false
scsi:
description:
- A hash/dictionary of volume used as SCSI hard disk or CD-ROM. C(scsi='{"key":"value", "key":"value"}').
- Keys allowed are - C(sata[n]) where 0 ≤ n ≤ 13.
- Values allowed are - C("storage:size,format=value").
- C(storage) is the storage identifier where to create the disk.
- C(size) is the size of the disk in GB.
- C(format) is the drive’s backing file’s data format. C(qcow2|raw|subvol).
default: null
required: false
scsihw:
description:
- Specifies the SCSI controller model.
choices: ['lsi', 'lsi53c810', 'virtio-scsi-pci', 'virtio-scsi-single', 'megasas', 'pvscsi']
required: false
default: null
serial:
description:
- A hash/dictionary of serial device to create inside the VM. C('{"key":"value", "key":"value"}').
- Keys allowed are - serial[n](str; required) where 0 ≤ n ≤ 3.
- Values allowed are - C((/dev/.+|socket)).
- /!\ If you pass through a host serial device, it is no longer possible to migrate such machines - use with special care.
default: null
required: false
shares:
description:
- Rets amount of memory shares for auto-ballooning. (0 - 50000).
- The larger the number is, the more memory this VM gets.
- The number is relative to weights of all other running VMs.
- Using 0 disables auto-ballooning, this means no limit.
required: false
default: null
skiplock:
description:
- Ignore locks
- Only root is allowed to use this option.
required: false
default: null
smbios:
description:
- Specifies SMBIOS type 1 fields.
required: false
default: null
snapname:
description:
- The name of the snapshot. Used only with clone.
default: null
required: false
sockets:
description:
- Sets the number of CPU sockets. (1 - N).
required: false
default: 1
startdate:
description:
- Sets the initial date of the real time clock.
- Valid format for date are C('now') or C('2016-09-25T16:01:21') or C('2016-09-25').
required: false
default: null
startup:
description:
- Startup and shutdown behavior. C([[order=]\d+] [,up=\d+] [,down=\d+]).
- Order is a non-negative number defining the general startup order.
- Shutdown in done with reverse ordering.
required: false
default: null
state:
description:
- Indicates desired state of the instance.
- If C(current), the current state of the VM will be fecthed. You can access it with C(results.status)
choices: ['present', 'started', 'absent', 'stopped', 'restarted','current']
required: false
default: present
storage:
description:
- Target storage for full clone.
default: null
required: false
tablet:
description:
- Enables/disables the USB tablet device.
required: false
choices: [ "yes", "no" ]
default: "no"
target:
description:
- Target node. Only allowed if the original VM is on shared storage.
- Used only with clone
default: null
required: false
tdf:
description:
- Enables/disables time drift fix.
required: false
default: null
choices: [ "yes", "no" ]
template:
description:
- Enables/disables the template.
required: false
default: "no"
choices: [ "yes", "no" ]
timeout:
description:
- Timeout for operations.
default: 30
required: false
update:
description:
- If C(yes), the VM will be update with new value.
- Cause of the operations of the API and security reasons, I have disabled the update of the following parameters
- C(net, virtio, ide, sata, scsi). Per example updating C(net) update the MAC address and C(virtio) create always new disk...
default: "no"
choices: [ "yes", "no" ]
required: false
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates.
default: "no"
choices: [ "yes", "no" ]
required: false
vcpus:
description:
- Sets number of hotplugged vcpus.
required: false
default: null
vga:
description:
- Select VGA type. If you want to use high resolution modes (>= 1280x1024x16) then you should use option 'std' or 'vmware'.
choices: ['std', 'cirrus', 'vmware', 'qxl', 'serial0', 'serial1', 'serial2', 'serial3', 'qxl2', 'qxl3', 'qxl4']
required: false
default: std
virtio:
description:
- A hash/dictionary of volume used as VIRTIO hard disk. C(virtio='{"key":"value", "key":"value"}').
- Keys allowed are - C(virto[n]) where 0 ≤ n ≤ 15.
- Values allowed are - C("storage:size,format=value").
- C(storage) is the storage identifier where to create the disk.
- C(size) is the size of the disk in GB.
- C(format) is the drive’s backing file’s data format. C(qcow2|raw|subvol).
required: false
default: null
vmid:
description:
- Specifies the VM ID. Instead use I(name) parameter.
- If vmid is not set, the next available VM ID will be fetched from ProxmoxAPI.
default: null
required: false
watchdog:
description:
- Creates a virtual hardware watchdog device.
required: false
default: null
requirements: [ "proxmoxer", "requests" ]
'''
EXAMPLES = '''
# Create new VM with minimal options
- proxmox_kvm:
api_user : root@pam
api_password: secret
api_host : helldorado
name : spynal
node : sabrewulf
# Create new VM with minimal options and given vmid
- proxmox_kvm:
api_user : root@pam
api_password: secret
api_host : helldorado
name : spynal
node : sabrewulf
vmid : 100
# Create new VM with two network interface options.
- proxmox_kvm:
api_user : root@pam
api_password: secret
api_host : helldorado
name : spynal
node : sabrewulf
net : '{"net0":"virtio,bridge=vmbr1,rate=200", "net1":"e1000,bridge=vmbr2,"}'
# Create new VM with one network interface, three virto hard disk, 4 cores, and 2 vcpus.
- proxmox_kvm:
api_user : root@pam
api_password: secret
api_host : helldorado
name : spynal
node : sabrewulf
net : '{"net0":"virtio,bridge=vmbr1,rate=200"}'
virtio : '{"virtio0":"VMs_LVM:10", "virtio1":"VMs:2,format=qcow2", "virtio2":"VMs:5,format=raw"}'
cores : 4
vcpus : 2
# Clone VM with only source VM name
- proxmox_kvm:
api_user : root@pam
api_password: secret
api_host : helldorado
clone : spynal # The VM source
name : zavala # The target VM name
node : sabrewulf
storage : VMs
format : qcow2
timeout : 500 # Note: The task can take a while. Adapt
# Clone VM with source vmid and target newid and raw format
- proxmox_kvm:
api_user : root@pam
api_password: secret
api_host : helldorado
clone : arbitrary_name
vmid : 108
newid : 152
name : zavala # The target VM name
node : sabrewulf
storage : LVM_STO
format : raw
timeout : 300 # Note: The task can take a while. Adapt
# Create new VM and lock it for snapashot.
- proxmox_kvm:
api_user : root@pam
api_password: secret
api_host : helldorado
name : spynal
node : sabrewulf
lock : snapshot
# Create new VM and set protection to disable the remove VM and remove disk operations
- proxmox_kvm:
api_user : root@pam
api_password: secret
api_host : helldorado
name : spynal
node : sabrewulf
protection : yes
# Start VM
- proxmox_kvm:
api_user : root@pam
api_password: secret
api_host : helldorado
name : spynal
node : sabrewulf
state : started
# Stop VM
- proxmox_kvm:
api_user : root@pam
api_password: secret
api_host : helldorado
name : spynal
node : sabrewulf
state : stopped
# Stop VM with force
- proxmox_kvm:
api_user : root@pam
api_password: secret
api_host : helldorado
name : spynal
node : sabrewulf
state : stopped
force : yes
# Restart VM
- proxmox_kvm:
api_user : root@pam
api_password: secret
api_host : helldorado
name : spynal
node : sabrewulf
state : restarted
# Remove VM
- proxmox_kvm:
api_user : root@pam
api_password: secret
api_host : helldorado
name : spynal
node : sabrewulf
state : absent
# Get VM current state
- proxmox_kvm:
api_user : root@pam
api_password: secret
api_host : helldorado
name : spynal
node : sabrewulf
state : current
# Update VM configuration
- proxmox_kvm:
api_user : root@pam
api_password: secret
api_host : helldorado
name : spynal
node : sabrewulf
cpu : 8
memory : 16384
update : yes
# Delete QEMU parameters
- proxmox_kvm:
api_user : root@pam
api_password: secret
api_host : helldorado
name : spynal
node : sabrewulf
delete : 'args,template,cpulimit'
# Revert a pending change
- proxmox_kvm:
api_user : root@pam
api_password: secret
api_host : helldorado
name : spynal
node : sabrewulf
revert : 'template,cpulimit'
'''
RETURN = '''
devices:
description: The list of devices created or used.
returned: success
type: dict
sample: '
{
"ide0": "VMS_LVM:vm-115-disk-1",
"ide1": "VMs:115/vm-115-disk-3.raw",
"virtio0": "VMS_LVM:vm-115-disk-2",
"virtio1": "VMs:115/vm-115-disk-1.qcow2",
"virtio2": "VMs:115/vm-115-disk-2.raw"
}'
mac:
description: List of mac address created and net[n] attached. Useful when you want to use provision systems like Foreman via PXE.
returned: success
type: dict
sample: '
{
"net0": "3E:6E:97:D2:31:9F",
"net1": "B6:A1:FC:EF:78:A4"
}'
vmid:
description: The VM vmid.
returned: success
type: int
sample: 115
status:
description:
- The current virtual machine status.
- Returned only when C(state=current)
returned: success
type: dict
sample: '{
"changed": false,
"msg": "VM kropta with vmid = 110 is running",
"status": "running"
}'
'''
import os
import re
import time
# import module snippets
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
try:
from proxmoxer import ProxmoxAPI
HAS_PROXMOXER = True
except ImportError:
HAS_PROXMOXER = False
VZ_TYPE = 'qemu'
def get_nextvmid(module, proxmox):
try:
vmid = proxmox.cluster.nextid.get()
return vmid
except Exception:
exc = get_exception()
module.fail_json(msg="Unable to get next vmid. Failed with exception: %s" % exc)
def get_vmid(proxmox, name):
return [vm['vmid'] for vm in proxmox.cluster.resources.get(type='vm') if vm['name'] == name]
def get_vm(proxmox, vmid):
return [vm for vm in proxmox.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid)]
def node_check(proxmox, node):
return [True for nd in proxmox.nodes.get() if nd['node'] == node]
def get_vminfo(module, proxmox, node, vmid, **kwargs):
global results
results = {}
mac = {}
devices = {}
try:
vm = proxmox.nodes(node).qemu(vmid).config.get()
except Exception as e:
module.fail_json(msg='Getting information for VM with vmid = %s failed with exception: %s' % (vmid, e))
# Sanitize kwargs. Remove not defined args and ensure True and False converted to int.
kwargs = dict((k, v) for k, v in kwargs.items() if v is not None)
# Convert all dict in kwargs to elements. For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n]
for k in kwargs.keys():
if isinstance(kwargs[k], dict):
kwargs.update(kwargs[k])
del kwargs[k]
# Split information by type
for k, v in kwargs.items():
if re.match(r'net[0-9]', k) is not None:
interface = k
k = vm[k]
k = re.search('=(.*?),', k).group(1)
mac[interface] = k
if (re.match(r'virtio[0-9]', k) is not None or
re.match(r'ide[0-9]', k) is not None or
re.match(r'scsi[0-9]', k) is not None or
re.match(r'sata[0-9]', k) is not None):
device = k
k = vm[k]
k = re.search('(.*?),', k).group(1)
devices[device] = k
results['mac'] = mac
results['devices'] = devices
results['vmid'] = int(vmid)
def settings(module, proxmox, vmid, node, name, timeout, **kwargs):
proxmox_node = proxmox.nodes(node)
# Sanitize kwargs. Remove not defined args and ensure True and False converted to int.
kwargs = dict((k, v) for k, v in kwargs.items() if v is not None)
if getattr(proxmox_node, VZ_TYPE)(vmid).config.set(**kwargs) is None:
return True
else:
return False
def create_vm(module, proxmox, vmid, newid, node, name, memory, cpu, cores, sockets, timeout, update, **kwargs):
# Available only in PVE 4
only_v4 = ['force', 'protection', 'skiplock']
# valide clone parameters
valid_clone_params = ['format', 'full', 'pool', 'snapname', 'storage', 'target']
clone_params = {}
# Default args for vm. Note: -args option is for experts only. It allows you to pass arbitrary arguments to kvm.
vm_args = "-serial unix:/var/run/qemu-server/{}.serial,server,nowait".format(vmid)
proxmox_node = proxmox.nodes(node)
# Sanitize kwargs. Remove not defined args and ensure True and False converted to int.
kwargs = dict((k, v) for k, v in kwargs.items() if v is not None)
kwargs.update(dict([k, int(v)] for k, v in kwargs.items() if isinstance(v, bool)))
# The features work only on PVE 4
if PVE_MAJOR_VERSION < 4:
for p in only_v4:
if p in kwargs:
del kwargs[p]
# If update, don't update disk (virtio, ide, sata, scsi) and network interface
if update:
if 'virtio' in kwargs:
del kwargs['virtio']
if 'sata' in kwargs:
del kwargs['sata']
if 'scsi' in kwargs:
del kwargs['scsi']
if 'ide' in kwargs:
del kwargs['ide']
if 'net' in kwargs:
del kwargs['net']
# Convert all dict in kwargs to elements. For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n]
for k in kwargs.keys():
if isinstance(kwargs[k], dict):
kwargs.update(kwargs[k])
del kwargs[k]
# Rename numa_enabled to numa. According the API documentation
if 'numa_enabled' in kwargs:
kwargs['numa'] = kwargs['numa_enabled']
del kwargs['numa_enabled']
# -args and skiplock require root@pam user
if module.params['api_user'] == "root@pam" and module.params['args'] is None:
if not update:
kwargs['args'] = vm_args
elif module.params['api_user'] == "root@pam" and module.params['args'] is not None:
kwargs['args'] = module.params['args']
elif module.params['api_user'] != "root@pam" and module.params['args'] is not None:
module.fail_json(msg='args parameter require root@pam user. ')
if module.params['api_user'] != "root@pam" and module.params['skiplock'] is not None:
module.fail_json(msg='skiplock parameter require root@pam user. ')
if update:
if getattr(proxmox_node, VZ_TYPE)(vmid).config.set(name=name, memory=memory, cpu=cpu, cores=cores, sockets=sockets, **kwargs) is None:
return True
else:
return False
elif module.params['clone'] is not None:
for param in valid_clone_params:
if module.params[param] is not None:
clone_params[param] = module.params[param]
clone_params.update(dict([k, int(v)] for k, v in clone_params.items() if isinstance(v, bool)))
taskid = proxmox_node.qemu(vmid).clone.post(newid=newid, name=name, **clone_params)
else:
taskid = getattr(proxmox_node, VZ_TYPE).create(vmid=vmid, name=name, memory=memory, cpu=cpu, cores=cores, sockets=sockets, **kwargs)
while timeout:
if (proxmox_node.tasks(taskid).status.get()['status'] == 'stopped' and
proxmox_node.tasks(taskid).status.get()['exitstatus'] == 'OK'):
return True
timeout = timeout - 1
if timeout == 0:
module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s' %
proxmox_node.tasks(taskid).log.get()[:1])
time.sleep(1)
return False
def start_vm(module, proxmox, vm, vmid, timeout):
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.start.post()
while timeout:
if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and
proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
return True
timeout -= 1
if timeout == 0:
module.fail_json(msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s'
% proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1])
time.sleep(1)
return False
def stop_vm(module, proxmox, vm, vmid, timeout, force):
if force:
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.shutdown.post(forceStop=1)
else:
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.shutdown.post()
while timeout:
if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and
proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
return True
timeout -= 1
if timeout == 0:
module.fail_json(msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s'
% proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1])
time.sleep(1)
return False
def main():
module = AnsibleModule(
argument_spec=dict(
acpi=dict(type='bool', default='yes'),
agent=dict(type='bool'),
args=dict(type='str', default=None),
api_host=dict(required=True),
api_user=dict(required=True),
api_password=dict(no_log=True),
autostart=dict(type='bool', default='no'),
balloon=dict(type='int', default=0),
bios=dict(choices=['seabios', 'ovmf']),
boot=dict(type='str', default='cnd'),
bootdisk=dict(type='str'),
clone=dict(type='str', default=None),
cores=dict(type='int', default=1),
cpu=dict(type='str', default='kvm64'),
cpulimit=dict(type='int'),
cpuunits=dict(type='int', default=1000),
delete=dict(type='str', default=None),
description=dict(type='str'),
digest=dict(type='str'),
force=dict(type='bool', default=None),
format=dict(type='str', default='qcow2', choices=['cloop', 'cow', 'qcow', 'qcow2', 'qed', 'raw', 'vmdk']),
freeze=dict(type='bool'),
full=dict(type='bool', default='yes'),
hostpci=dict(type='dict'),
hotplug=dict(type='str'),
hugepages=dict(choices=['any', '2', '1024']),
ide=dict(type='dict', default=None),
keyboard=dict(type='str'),
kvm=dict(type='bool', default='yes'),
localtime=dict(type='bool'),
lock=dict(choices=['migrate', 'backup', 'snapshot', 'rollback']),
machine=dict(type='str'),
memory=dict(type='int', default=512),
migrate_downtime=dict(type='int'),
migrate_speed=dict(type='int'),
name=dict(type='str'),
net=dict(type='dict'),
newid=dict(type='int', default=None),
node=dict(),
numa=dict(type='dict'),
numa_enabled=dict(type='bool'),
onboot=dict(type='bool', default='yes'),
ostype=dict(default='l26', choices=['other', 'wxp', 'w2k', 'w2k3', 'w2k8', 'wvista', 'win7', 'win8', 'l24', 'l26', 'solaris']),
parallel=dict(type='dict'),
pool=dict(type='str'),
protection=dict(type='bool'),
reboot=dict(type='bool'),
revert=dict(type='str', default=None),
sata=dict(type='dict'),
scsi=dict(type='dict'),
scsihw=dict(choices=['lsi', 'lsi53c810', 'virtio-scsi-pci', 'virtio-scsi-single', 'megasas', 'pvscsi']),
serial=dict(type='dict'),
shares=dict(type='int'),
skiplock=dict(type='bool'),
smbios=dict(type='str'),
snapname=dict(type='str'),
sockets=dict(type='int', default=1),
startdate=dict(type='str'),
startup=dict(),
state=dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted', 'current']),
storage=dict(type='str'),
tablet=dict(type='bool', default='no'),
target=dict(type='str'),
tdf=dict(type='bool'),
template=dict(type='bool', default='no'),
timeout=dict(type='int', default=30),
update=dict(type='bool', default='no'),
validate_certs=dict(type='bool', default='no'),
vcpus=dict(type='int', default=None),
vga=dict(default='std', choices=['std', 'cirrus', 'vmware', 'qxl', 'serial0', 'serial1', 'serial2', 'serial3', 'qxl2', 'qxl3', 'qxl4']),
virtio=dict(type='dict', default=None),
vmid=dict(type='int', default=None),
watchdog=dict(),
),
mutually_exclusive=[('delete', 'revert'), ('delete', 'update'), ('revert', 'update'), ('clone', 'update'), ('clone', 'delete'), ('clone', 'revert')],
required_one_of=[('name', 'vmid',)],
required_if=[('state', 'present', ['node'])]
)
if not HAS_PROXMOXER:
module.fail_json(msg='proxmoxer required for this module')
api_user = module.params['api_user']
api_host = module.params['api_host']
api_password = module.params['api_password']
clone = module.params['clone']
cpu = module.params['cpu']
cores = module.params['cores']
delete = module.params['delete']
memory = module.params['memory']
name = module.params['name']
newid = module.params['newid']
node = module.params['node']
revert = module.params['revert']
sockets = module.params['sockets']
state = module.params['state']
timeout = module.params['timeout']
update = bool(module.params['update'])
vmid = module.params['vmid']
validate_certs = module.params['validate_certs']
# If password not set get it from PROXMOX_PASSWORD env
if not api_password:
try:
api_password = os.environ['PROXMOX_PASSWORD']
except KeyError as e:
module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable')
try:
proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=validate_certs)
global VZ_TYPE
global PVE_MAJOR_VERSION
PVE_MAJOR_VERSION = 3 if float(proxmox.version.get()['version']) < 4.0 else 4
except Exception as e:
module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e)
# If vmid not set get the Next VM id from ProxmoxAPI
# If vm name is set get the VM id from ProxmoxAPI
if not vmid:
if state == 'present' and (not update and not clone) and (not delete and not revert):
try:
vmid = get_nextvmid(module, proxmox)
except Exception as e:
module.fail_json(msg="Can't get the next vimd for VM {} automatically. Ensure your cluster state is good".format(name))
else:
try:
if not clone:
vmid = get_vmid(proxmox, name)[0]
else:
vmid = get_vmid(proxmox, clone)[0]
except Exception as e:
if not clone:
module.fail_json(msg="VM {} does not exist in cluster.".format(name))
else:
module.fail_json(msg="VM {} does not exist in cluster.".format(clone))
if clone is not None:
if get_vmid(proxmox, name):
module.exit_json(changed=False, msg="VM with name <%s> already exists" % name)
if vmid is not None:
vm = get_vm(proxmox, vmid)
if not vm:
module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid)
if not newid:
try:
newid = get_nextvmid(module, proxmox)
except Exception as e:
module.fail_json(msg="Can't get the next vimd for VM {} automatically. Ensure your cluster state is good".format(name))
else:
vm = get_vm(proxmox, newid)
if vm:
module.exit_json(changed=False, msg="vmid %s with VM name %s already exists" % (newid, name))
if delete is not None:
try:
settings(module, proxmox, vmid, node, name, timeout, delete=delete)
module.exit_json(changed=True, msg="Settings has deleted on VM {} with vmid {}".format(name, vmid))
except Exception as e:
module.fail_json(msg='Unable to delete settings on VM {} with vimd {}: '.format(name, vmid) + str(e))
elif revert is not None:
try:
settings(module, proxmox, vmid, node, name, timeout, revert=revert)
module.exit_json(changed=True, msg="Settings has reverted on VM {} with vmid {}".format(name, vmid))
except Exception as e:
module.fail_json(msg='Unable to revert settings on VM {} with vimd {}: Maybe is not a pending task... '.format(name, vmid) + str(e))
if state == 'present':
try:
if get_vm(proxmox, vmid) and not (update or clone):
module.exit_json(changed=False, msg="VM with vmid <%s> already exists" % vmid)
elif get_vmid(proxmox, name) and not (update or clone):
module.exit_json(changed=False, msg="VM with name <%s> already exists" % name)
elif not (node, name):
module.fail_json(msg='node, name is mandatory for creating/updating vm')
elif not node_check(proxmox, node):
module.fail_json(msg="node '%s' does not exist in cluster" % node)
create_vm(module, proxmox, vmid, newid, node, name, memory, cpu, cores, sockets, timeout, update,
acpi=module.params['acpi'],
agent=module.params['agent'],
autostart=module.params['autostart'],
balloon=module.params['balloon'],
bios=module.params['bios'],
boot=module.params['boot'],
bootdisk=module.params['bootdisk'],
cpulimit=module.params['cpulimit'],
cpuunits=module.params['cpuunits'],
description=module.params['description'],
digest=module.params['digest'],
force=module.params['force'],
freeze=module.params['freeze'],
hostpci=module.params['hostpci'],
hotplug=module.params['hotplug'],
hugepages=module.params['hugepages'],
ide=module.params['ide'],
keyboard=module.params['keyboard'],
kvm=module.params['kvm'],
localtime=module.params['localtime'],
lock=module.params['lock'],
machine=module.params['machine'],
migrate_downtime=module.params['migrate_downtime'],
migrate_speed=module.params['migrate_speed'],
net=module.params['net'],
numa=module.params['numa'],
numa_enabled=module.params['numa_enabled'],
onboot=module.params['onboot'],
ostype=module.params['ostype'],
parallel=module.params['parallel'],
pool=module.params['pool'],
protection=module.params['protection'],
reboot=module.params['reboot'],
sata=module.params['sata'],
scsi=module.params['scsi'],
scsihw=module.params['scsihw'],
serial=module.params['serial'],
shares=module.params['shares'],
skiplock=module.params['skiplock'],
smbios1=module.params['smbios'],
snapname=module.params['snapname'],
startdate=module.params['startdate'],
startup=module.params['startup'],
tablet=module.params['tablet'],
target=module.params['target'],
tdf=module.params['tdf'],
template=module.params['template'],
vcpus=module.params['vcpus'],
vga=module.params['vga'],
virtio=module.params['virtio'],
watchdog=module.params['watchdog'])
if not clone:
get_vminfo(module, proxmox, node, vmid,
ide=module.params['ide'],
net=module.params['net'],
sata=module.params['sata'],
scsi=module.params['scsi'],
virtio=module.params['virtio'])
if update:
module.exit_json(changed=True, msg="VM %s with vmid %s updated" % (name, vmid))
elif clone is not None:
module.exit_json(changed=True, msg="VM %s with newid %s cloned from vm with vmid %s" % (name, newid, vmid))
else:
module.exit_json(changed=True, msg="VM %s with vmid %s deployed" % (name, vmid), **results)
except Exception as e:
if update:
module.fail_json(msg="Unable to update vm {} with vimd {}=".format(name, vmid) + str(e))
elif clone is not None:
module.fail_json(msg="Unable to clone vm {} from vimd {}=".format(name, vmid) + str(e))
else:
module.fail_json(msg="creation of %s VM %s with vmid %s failed with exception=%s" % (VZ_TYPE, name, vmid, e))
elif state == 'started':
try:
vm = get_vm(proxmox, vmid)
if not vm:
module.fail_json(msg='VM with vmid <%s> does not exist in cluster' % vmid)
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running':
module.exit_json(changed=False, msg="VM %s is already running" % vmid)
if start_vm(module, proxmox, vm, vmid, timeout):
module.exit_json(changed=True, msg="VM %s started" % vmid)
except Exception as e:
module.fail_json(msg="starting of VM %s failed with exception: %s" % (vmid, e))
elif state == 'stopped':
try:
vm = get_vm(proxmox, vmid)
if not vm:
module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid)
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped':
module.exit_json(changed=False, msg="VM %s is already stopped" % vmid)
if stop_vm(module, proxmox, vm, vmid, timeout, force=module.params['force']):
module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
except Exception as e:
module.fail_json(msg="stopping of VM %s failed with exception: %s" % (vmid, e))
elif state == 'restarted':
try:
vm = get_vm(proxmox, vmid)
if not vm:
module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid)
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped':
module.exit_json(changed=False, msg="VM %s is not running" % vmid)
if stop_vm(module, proxmox, vm, vmid, timeout, force=module.params['force']) and start_vm(module, proxmox, vm, vmid, timeout):
module.exit_json(changed=True, msg="VM %s is restarted" % vmid)
except Exception as e:
module.fail_json(msg="restarting of VM %s failed with exception: %s" % (vmid, e))
elif state == 'absent':
try:
vm = get_vm(proxmox, vmid)
if not vm:
module.exit_json(changed=False, msg="VM %s does not exist" % vmid)
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running':
module.exit_json(changed=False, msg="VM %s is running. Stop it before deletion." % vmid)
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE).delete(vmid)
while timeout:
if (proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped' and
proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK'):
module.exit_json(changed=True, msg="VM %s removed" % vmid)
timeout -= 1
if timeout == 0:
module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s'
% proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1])
time.sleep(1)
except Exception as e:
module.fail_json(msg="deletion of VM %s failed with exception: %s" % (vmid, e))
elif state == 'current':
status = {}
try:
vm = get_vm(proxmox, vmid)
if not vm:
module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid)
current = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status']
status['status'] = current
if status:
module.exit_json(changed=False, msg="VM %s with vmid = %s is %s" % (name, vmid, current), **status)
except Exception as e:
module.fail_json(msg="Unable to get vm {} with vmid = {} status: ".format(name, vmid) + str(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
jupyter/nbgrader | nbgrader/nbgraderformat/common.py | 3 | 2370 | import json
import os
import jsonschema
from jsonschema import ValidationError
from traitlets.config import LoggingConfigurable
from nbformat.notebooknode import NotebookNode
root = os.path.dirname(__file__)
class SchemaMismatchError(Exception):
def __init__(self, message, actual_version, expected_version):
super(SchemaMismatchError, self).__init__(message)
self.actual_version = actual_version
self.expected_version = expected_version
class SchemaTooOldError(SchemaMismatchError):
pass
class SchemaTooNewError(SchemaMismatchError):
pass
class BaseMetadataValidator(LoggingConfigurable):
def __init__(self) -> None:
with open(os.path.join(root, "v{:d}.json".format(self.schema_version)), "r") as fh:
self.schema = json.loads(fh.read())
def _remove_extra_keys(self, cell: NotebookNode) -> None:
meta = cell.metadata['nbgrader']
allowed = set(self.schema["properties"].keys())
keys = set(meta.keys()) - allowed
if len(keys) > 0:
self.log.warning("extra keys detected in metadata, these will be removed: {}".format(keys))
for key in keys:
del meta[key]
def upgrade_notebook_metadata(self, nb: NotebookNode) -> NotebookNode:
for cell in nb.cells:
self.upgrade_cell_metadata(cell)
return nb
def upgrade_cell_metadata(self, cell: NotebookNode) -> NotebookNode: # pragma: no cover
raise NotImplementedError("this method must be implemented by subclasses")
def validate_cell(self, cell: NotebookNode) -> None:
if 'nbgrader' not in cell.metadata:
return
schema = cell.metadata['nbgrader'].get('schema_version', 0)
if schema < self.schema_version:
raise SchemaTooOldError(
"Outdated schema version: {} (expected {})".format(schema, self.schema_version),
schema, self.schema_version)
elif schema > self.schema_version:
raise SchemaTooNewError(
"Schema version is too new: {} (expected {})".format(schema, self.schema_version),
schema, self.schema_version)
jsonschema.validate(cell.metadata['nbgrader'], self.schema)
def validate_nb(self, nb: NotebookNode) -> None:
for cell in nb.cells:
self.validate_cell(cell)
| bsd-3-clause |
mavit/ansible-modules-extras | database/misc/mongodb_user.py | 13 | 13213 | #!/usr/bin/python
# (c) 2012, Elliott Foster <elliott@fourkitchens.com>
# Sponsored by Four Kitchens http://fourkitchens.com.
# (c) 2014, Epic Games, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: mongodb_user
short_description: Adds or removes a user from a MongoDB database.
description:
- Adds or removes a user from a MongoDB database.
version_added: "1.1"
options:
login_user:
description:
- The username used to authenticate with
required: false
default: null
login_password:
description:
- The password used to authenticate with
required: false
default: null
login_host:
description:
- The host running the database
required: false
default: localhost
login_port:
description:
- The port to connect to
required: false
default: 27017
login_database:
version_added: "2.0"
description:
- The database where login credentials are stored
required: false
default: null
replica_set:
version_added: "1.6"
description:
- Replica set to connect to (automatically connects to primary for writes)
required: false
default: null
database:
description:
- The name of the database to add/remove the user from
required: true
name:
description:
- The name of the user to add or remove
required: true
default: null
aliases: [ 'user' ]
password:
description:
- The password to use for the user
required: false
default: null
ssl:
version_added: "1.8"
description:
- Whether to use an SSL connection when connecting to the database
default: False
roles:
version_added: "1.3"
description:
- "The database user roles valid values could either be one or more of the following strings: 'read', 'readWrite', 'dbAdmin', 'userAdmin', 'clusterAdmin', 'readAnyDatabase', 'readWriteAnyDatabase', 'userAdminAnyDatabase', 'dbAdminAnyDatabase'"
- "Or the following dictionary '{ db: DATABASE_NAME, role: ROLE_NAME }'."
- "This param requires pymongo 2.5+. If it is a string, mongodb 2.4+ is also required. If it is a dictionary, mongo 2.6+ is required."
required: false
default: "readWrite"
state:
state:
description:
- The database user state
required: false
default: present
choices: [ "present", "absent" ]
update_password:
required: false
default: always
choices: ['always', 'on_create']
version_added: "2.1"
description:
- C(always) will update passwords if they differ. C(on_create) will only set the password for newly created users.
notes:
- Requires the pymongo Python package on the remote host, version 2.4.2+. This
can be installed using pip or the OS package manager. @see http://api.mongodb.org/python/current/installation.html
requirements: [ "pymongo" ]
author: "Elliott Foster (@elliotttf)"
'''
EXAMPLES = '''
# Create 'burgers' database user with name 'bob' and password '12345'.
- mongodb_user: database=burgers name=bob password=12345 state=present
# Create a database user via SSL (MongoDB must be compiled with the SSL option and configured properly)
- mongodb_user: database=burgers name=bob password=12345 state=present ssl=True
# Delete 'burgers' database user with name 'bob'.
- mongodb_user: database=burgers name=bob state=absent
# Define more users with various specific roles (if not defined, no roles is assigned, and the user will be added via pre mongo 2.2 style)
- mongodb_user: database=burgers name=ben password=12345 roles='read' state=present
- mongodb_user: database=burgers name=jim password=12345 roles='readWrite,dbAdmin,userAdmin' state=present
- mongodb_user: database=burgers name=joe password=12345 roles='readWriteAnyDatabase' state=present
# add a user to database in a replica set, the primary server is automatically discovered and written to
- mongodb_user: database=burgers name=bob replica_set=belcher password=12345 roles='readWriteAnyDatabase' state=present
# add a user 'oplog_reader' with read only access to the 'local' database on the replica_set 'belcher'. This is usefull for oplog access (MONGO_OPLOG_URL).
# please notice the credentials must be added to the 'admin' database because the 'local' database is not syncronized and can't receive user credentials
# To login with such user, the connection string should be MONGO_OPLOG_URL="mongodb://oplog_reader:oplog_reader_password@server1,server2/local?authSource=admin"
# This syntax requires mongodb 2.6+ and pymongo 2.5+
- mongodb_user:
login_user: root
login_password: root_password
database: admin
user: oplog_reader
password: oplog_reader_password
state: present
replica_set: belcher
roles:
- { db: "local" , role: "read" }
'''
import ConfigParser
from distutils.version import LooseVersion
try:
from pymongo.errors import ConnectionFailure
from pymongo.errors import OperationFailure
from pymongo import version as PyMongoVersion
from pymongo import MongoClient
except ImportError:
try: # for older PyMongo 2.2
from pymongo import Connection as MongoClient
except ImportError:
pymongo_found = False
else:
pymongo_found = True
else:
pymongo_found = True
# =========================================
# MongoDB module specific support methods.
#
def user_find(client, user, db_name):
for mongo_user in client["admin"].system.users.find():
if mongo_user['user'] == user and mongo_user['db'] == db_name:
return mongo_user
return False
def user_add(module, client, db_name, user, password, roles):
#pymongo's user_add is a _create_or_update_user so we won't know if it was changed or updated
#without reproducing a lot of the logic in database.py of pymongo
db = client[db_name]
if roles is None:
db.add_user(user, password, False)
else:
try:
db.add_user(user, password, None, roles=roles)
except OperationFailure, e:
err_msg = str(e)
if LooseVersion(PyMongoVersion) <= LooseVersion('2.5'):
err_msg = err_msg + ' (Note: you must be on mongodb 2.4+ and pymongo 2.5+ to use the roles param)'
module.fail_json(msg=err_msg)
def user_remove(module, client, db_name, user):
exists = user_find(client, user, db_name)
if exists:
if module.check_mode:
module.exit_json(changed=True, user=user)
db = client[db_name]
db.remove_user(user)
else:
module.exit_json(changed=False, user=user)
def load_mongocnf():
config = ConfigParser.RawConfigParser()
mongocnf = os.path.expanduser('~/.mongodb.cnf')
try:
config.readfp(open(mongocnf))
creds = dict(
user=config.get('client', 'user'),
password=config.get('client', 'pass')
)
except (ConfigParser.NoOptionError, IOError):
return False
return creds
def check_if_roles_changed(uinfo, roles, db_name):
# We must be aware of users which can read the oplog on a replicaset
# Such users must have access to the local DB, but since this DB does not store users credentials
# and is not synchronized among replica sets, the user must be stored on the admin db
# Therefore their structure is the following :
# {
# "_id" : "admin.oplog_reader",
# "user" : "oplog_reader",
# "db" : "admin", # <-- admin DB
# "roles" : [
# {
# "role" : "read",
# "db" : "local" # <-- local DB
# }
# ]
# }
def make_sure_roles_are_a_list_of_dict(roles, db_name):
output = list()
for role in roles:
if isinstance(role, basestring):
new_role = { "role": role, "db": db_name }
output.append(new_role)
else:
output.append(role)
return output
roles_as_list_of_dict = make_sure_roles_are_a_list_of_dict(roles, db_name)
uinfo_roles = uinfo.get('roles', [])
if sorted(roles_as_list_of_dict) == sorted(uinfo_roles):
return False
return True
# =========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec = dict(
login_user=dict(default=None),
login_password=dict(default=None),
login_host=dict(default='localhost'),
login_port=dict(default='27017'),
login_database=dict(default=None),
replica_set=dict(default=None),
database=dict(required=True, aliases=['db']),
name=dict(required=True, aliases=['user']),
password=dict(aliases=['pass']),
ssl=dict(default=False, type='bool'),
roles=dict(default=None, type='list'),
state=dict(default='present', choices=['absent', 'present']),
update_password=dict(default="always", choices=["always", "on_create"]),
),
supports_check_mode=True
)
if not pymongo_found:
module.fail_json(msg='the python pymongo module is required')
login_user = module.params['login_user']
login_password = module.params['login_password']
login_host = module.params['login_host']
login_port = module.params['login_port']
login_database = module.params['login_database']
replica_set = module.params['replica_set']
db_name = module.params['database']
user = module.params['name']
password = module.params['password']
ssl = module.params['ssl']
roles = module.params['roles']
state = module.params['state']
update_password = module.params['update_password']
try:
if replica_set:
client = MongoClient(login_host, int(login_port), replicaset=replica_set, ssl=ssl)
else:
client = MongoClient(login_host, int(login_port), ssl=ssl)
if login_user is None and login_password is None:
mongocnf_creds = load_mongocnf()
if mongocnf_creds is not False:
login_user = mongocnf_creds['user']
login_password = mongocnf_creds['password']
elif login_password is None or login_user is None:
module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided')
if login_user is not None and login_password is not None:
client.admin.authenticate(login_user, login_password, source=login_database)
elif LooseVersion(PyMongoVersion) >= LooseVersion('3.0'):
if db_name != "admin":
module.fail_json(msg='The localhost login exception only allows the first admin account to be created')
#else: this has to be the first admin user added
except ConnectionFailure, e:
module.fail_json(msg='unable to connect to database: %s' % str(e))
if state == 'present':
if password is None and update_password == 'always':
module.fail_json(msg='password parameter required when adding a user unless update_password is set to on_create')
uinfo = user_find(client, user, db_name)
if update_password != 'always' and uinfo:
password = None
if not check_if_roles_changed(uinfo, roles, db_name):
module.exit_json(changed=False, user=user)
if module.check_mode:
module.exit_json(changed=True, user=user)
try:
user_add(module, client, db_name, user, password, roles)
except OperationFailure, e:
module.fail_json(msg='Unable to add or update user: %s' % str(e))
# Here we can check password change if mongo provide a query for that : https://jira.mongodb.org/browse/SERVER-22848
#newuinfo = user_find(client, user, db_name)
#if uinfo['role'] == newuinfo['role'] and CheckPasswordHere:
# module.exit_json(changed=False, user=user)
elif state == 'absent':
try:
user_remove(module, client, db_name, user)
except OperationFailure, e:
module.fail_json(msg='Unable to remove user: %s' % str(e))
module.exit_json(changed=True, user=user)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.