repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
ideadevice/alembic | alembic/compat.py | 1 | 1557 | import sys
if sys.version_info < (2, 6):
raise NotImplementedError("Python 2.6 or greater is required.")
py3k = sys.version_info >= (3, 0)
py33 = sys.version_info >= (3, 3)
if py3k:
import builtins as compat_builtins
string_types = str,
binary_type = bytes
text_type = str
def callable(fn):
return hasattr(fn, '__call__')
else:
import __builtin__ as compat_builtins
string_types = basestring,
binary_type = str
text_type = unicode
callable = callable
if py3k:
from configparser import ConfigParser as SafeConfigParser
import configparser
else:
from ConfigParser import SafeConfigParser
import ConfigParser as configparser
if py33:
from importlib import machinery
def load_module(module_id, path):
return machinery.SourceFileLoader(module_id, path).load_module()
else:
import imp
def load_module(module_id, path):
fp = open(path, 'rb')
try:
return imp.load_source(module_id, path, fp)
finally:
fp.close()
try:
exec_ = getattr(compat_builtins, 'exec')
except AttributeError:
# Python 2
def exec_(func_text, globals_, lcl):
exec('exec func_text in globals_, lcl')
################################################
# cross-compatible metaclass implementation
# Copyright (c) 2010-2012 Benjamin Peterson
def with_metaclass(meta, base=object):
"""Create a base class with a metaclass."""
return meta("%sBase" % meta.__name__, (base,), {})
################################################
| mit |
codrut3/tensorflow | tensorflow/examples/speech_commands/generate_streaming_test_wav_test.py | 101 | 1416 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for test file generation for speech commands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.examples.speech_commands import generate_streaming_test_wav
from tensorflow.python.platform import test
class GenerateStreamingTestWavTest(test.TestCase):
def testMixInAudioSample(self):
track_data = np.zeros([10000])
sample_data = np.ones([1000])
generate_streaming_test_wav.mix_in_audio_sample(
track_data, 2000, sample_data, 0, 1000, 1.0, 100, 100)
self.assertNear(1.0, track_data[2500], 0.0001)
self.assertNear(0.0, track_data[3500], 0.0001)
if __name__ == "__main__":
test.main()
| apache-2.0 |
akkana/scripts | calendarparse.py | 1 | 4723 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Parse a text list of events in tabular format.
# Tries to be smart about the parsing, assuming the input data
# is coming from somebody who's pasting it from a word processor
# into some sort of dumb webmail page.
#
# Output either .ics iCalendar format or HTML list of events.
#
# TODO: detect times no matter where they are on a line.
# TODO: output HTML formatted like a calendar, not a list of events.
# Copyright 2016 by Akkana Peck: share and enjoy under the GPL v2 or later.
from icalendar import Calendar, Event, vDatetime
import sys
months = [ "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug",
"Sep", "Oct", "Nov", "Dec" ]
def tabular_string_to_calendar(calstr):
"""Parse an erratically formatted string.
Return a list of icalendar.Calendar entries.
"""
entries = []
for line in calstr.split('\n'):
if not line.startswith("20"):
continue
parts = line.split("\t")
monthday = parts[1].split(" ")
# print "Split '" + parts[1] + "' to:", monthday
month = None
# print "date is:", monthday
for i, m in enumerate(months):
if monthday[0].startswith(m):
month = i + 1
break
if month is None:
print("Couldn't parse month from '%s'" % line)
continue
day = int(monthday[1])
# print parts[0], month, day, ":", parts[2]
cal = Calendar()
cal['dtstart'] = '%4s%02d%02dT000000' % (parts[0], month, day)
cal['summary'] = parts[2]
entries.append(cal)
return entries
def ics_file_as_html(filename):
fp = open(filename)
ics = fp.read().split('\n\n')
fp.close()
eventlist = []
for event in ics:
if not event:
continue
eventlist.append(Calendar.from_ical(event))
return eventlist_as_html(eventlist)
def eventlist_as_html(entries):
"""Take a list of icalendar.Calendar entries;
format it as a list of events in HTML, returned as a string.
Writes to icalendar format as an intermediary because that's
the easiest way to get icalendar.Calendar to parse its date
"""
html = '''<table border=1 summary="This table shows the calendar of events. Each row is an event. Columns contain the event date, time, and description which includes the location">
<caption>Calendar of Events</caption>
<thead>
<tr>
<th id="date" width="15%">Date</th>
<th id="time" width="10%">Time</th>
<th id="description">Description and Place</th>
</tr>
</thead>
<tbody>'''
year = None
for cal in entries:
print("cal['DTSTART'] = %s" % cal['DTSTART'])
# cal['DTSTART'] might be a icalendar.prop.vDDDTypes object
# or it might be a string. Handle either type:
try:
starttime = cal['DTSTART'].dt
except AttributeError:
starttime = vDatetime.from_ical(cal['DTSTART'])
if not year or starttime.year != year:
year = starttime.year
html += '''<tr>
<td colspan="3"><h4>%d</h4></td>
</tr>''' % year
datestr = starttime.strftime("%A,<br />%B %d")
timestr = ""
html += '''<tr>
<td headers="date">%s</td><td headers="time">%s</td>
<td headers="description"><span class="alert">%s</td></tr>''' \
% (datestr, timestr, cal['SUMMARY'])
# Seems like it would be better to use
# cal['SUMMARY'].encode('utf-8', 'xmlcharrefreplace'))
# but python gives an error,
# UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2
# in position 43: ordinal not in range(128)
html += '''</tbody>
</table> <!-- calendar table -->'''
return html
if __name__ == "__main__":
if len(sys.argv) > 1:
print(ics_file_as_html(sys.argv[1]))
sys.exit(0)
# You can stuff your data into this calstr, or read it from a file
# passed on the commandline.
# Note: the dash in the Jan 16 event is a nonascii character.
# Leave it in place for testing, to make sure encode is called as needed.
test_calstr = ''' TITLE, FIRST LINE WILL BE IGNORED
2016 Jan 16 Board meeting, our city, 11 am – 3 pm
2016 Feb 3 Reception, 5:30-7:30 pm
Hotel name?
2016 Feb 4 Group meeting.
Where will we meet?
**** Lines that don't parse as dates/events will be ignored ****
2016 Mar 12 Board meeting, our city, some location
2016 March 22 Deadline for Spring newsletter
'''
entries = tabular_string_to_calendar(test_calstr)
for cal in entries:
print(cal.to_ical())
print(eventlist_as_html(entries))
| gpl-2.0 |
golismero/golismero | thirdparty_libs/requests/packages/charade/langthaimodel.py | 206 | 11475 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# The following result for thai was collected from a limited sample (1M).
# Character Mapping Table:
TIS620CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,182,106,107,100,183,184,185,101, 94,186,187,108,109,110,111, # 40
188,189,190, 89, 95,112,113,191,192,193,194,253,253,253,253,253, # 50
253, 64, 72, 73,114, 74,115,116,102, 81,201,117, 90,103, 78, 82, # 60
96,202, 91, 79, 84,104,105, 97, 98, 92,203,253,253,253,253,253, # 70
209,210,211,212,213, 88,214,215,216,217,218,219,220,118,221,222,
223,224, 99, 85, 83,225,226,227,228,229,230,231,232,233,234,235,
236, 5, 30,237, 24,238, 75, 8, 26, 52, 34, 51,119, 47, 58, 57,
49, 53, 55, 43, 20, 19, 44, 14, 48, 3, 17, 25, 39, 62, 31, 54,
45, 9, 16, 2, 61, 15,239, 12, 42, 46, 18, 21, 76, 4, 66, 63,
22, 10, 1, 36, 23, 13, 40, 27, 32, 35, 86,240,241,242,243,244,
11, 28, 41, 29, 33,245, 50, 37, 6, 7, 67, 77, 38, 93,246,247,
68, 56, 59, 65, 69, 60, 70, 80, 71, 87,248,249,250,251,252,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 92.6386%
# first 1024 sequences:7.3177%
# rest sequences: 1.0230%
# negative sequences: 0.0436%
ThaiLangModel = (
0,1,3,3,3,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,0,0,3,3,3,0,3,3,3,3,
0,3,3,0,0,0,1,3,0,3,3,2,3,3,0,1,2,3,3,3,3,0,2,0,2,0,0,3,2,1,2,2,
3,0,3,3,2,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,0,3,2,3,0,2,2,2,3,
0,2,3,0,0,0,0,1,0,1,2,3,1,1,3,2,2,0,1,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,3,3,2,3,2,3,3,2,2,2,
3,1,2,3,0,3,3,2,2,1,2,3,3,1,2,0,1,3,0,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,2,2,3,3,3,3,1,2,3,3,3,3,3,2,2,2,2,3,3,2,2,3,3,2,2,3,2,3,2,2,
3,3,1,2,3,1,2,2,3,3,1,0,2,1,0,0,3,1,2,1,0,0,1,0,0,0,0,0,0,1,0,1,
3,3,3,3,3,3,2,2,3,3,3,3,2,3,2,2,3,3,2,2,3,2,2,2,2,1,1,3,1,2,1,1,
3,2,1,0,2,1,0,1,0,1,1,0,1,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,2,2,3,2,3,3,2,3,1,1,2,3,2,2,2,3,2,2,2,2,2,1,2,1,
2,2,1,1,3,3,2,1,0,1,2,2,0,1,3,0,0,0,1,1,0,0,0,0,0,2,3,0,0,2,1,1,
3,3,2,3,3,2,0,0,3,3,0,3,3,0,2,2,3,1,2,2,1,1,1,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,0,0,1,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,0,0,3,3,0,2,3,0,2,1,2,2,2,2,1,2,0,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,1,0,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,3,2,3,2,0,2,2,1,3,2,1,3,2,1,2,3,2,2,3,0,2,3,2,2,1,2,2,2,2,
1,2,2,0,0,0,0,2,0,1,2,0,1,1,1,0,1,0,3,1,1,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,3,2,2,2,3,2,2,3,2,2,1,2,3,2,2,3,1,3,2,2,2,3,2,2,2,3,
3,2,1,3,0,1,1,1,0,2,1,1,1,1,1,0,1,0,1,1,0,0,0,0,0,0,0,0,0,2,0,0,
1,0,0,3,0,3,3,3,3,3,0,0,3,0,2,2,3,3,3,3,3,0,0,0,1,1,3,0,0,0,0,2,
0,0,1,0,0,0,0,0,0,0,2,3,0,0,0,3,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,3,3,3,3,0,0,2,3,0,0,3,0,3,3,2,3,3,3,3,3,0,0,3,3,3,0,0,0,3,3,
0,0,3,0,0,0,0,2,0,0,2,1,1,3,0,0,1,0,0,2,3,0,1,0,0,0,0,0,0,0,1,0,
3,3,3,3,2,3,3,3,3,3,3,3,1,2,1,3,3,2,2,1,2,2,2,3,1,1,2,0,2,1,2,1,
2,2,1,0,0,0,1,1,0,1,0,1,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,0,2,1,2,3,3,3,0,2,0,2,2,0,2,1,3,2,2,1,2,1,0,0,2,2,1,0,2,1,2,2,
0,1,1,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,3,1,1,3,0,2,3,1,1,3,2,1,1,2,0,2,2,3,2,1,1,1,1,1,2,
3,0,0,1,3,1,2,1,2,0,3,0,0,0,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
3,3,1,1,3,2,3,3,3,1,3,2,1,3,2,1,3,2,2,2,2,1,3,3,1,2,1,3,1,2,3,0,
2,1,1,3,2,2,2,1,2,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,
3,3,2,3,2,3,3,2,3,2,3,2,3,3,2,1,0,3,2,2,2,1,2,2,2,1,2,2,1,2,1,1,
2,2,2,3,0,1,3,1,1,1,1,0,1,1,0,2,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,3,2,2,1,1,3,2,3,2,3,2,0,3,2,2,1,2,0,2,2,2,1,2,2,2,2,1,
3,2,1,2,2,1,0,2,0,1,0,0,1,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,3,1,2,3,3,2,2,3,0,1,1,2,0,3,3,2,2,3,0,1,1,3,0,0,0,0,
3,1,0,3,3,0,2,0,2,1,0,0,3,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,0,1,3,1,1,2,1,2,1,1,3,1,1,0,2,3,1,1,1,1,1,1,1,1,
3,1,1,2,2,2,2,1,1,1,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,1,1,2,1,3,3,2,3,2,2,3,2,2,3,1,2,2,1,2,0,3,2,1,2,2,2,2,2,1,
3,2,1,2,2,2,1,1,1,1,0,0,1,1,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,1,3,3,0,2,1,0,3,2,0,0,3,1,0,1,1,0,1,0,0,0,0,0,1,
1,0,0,1,0,3,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,2,2,3,0,0,1,3,0,3,2,0,3,2,2,3,3,3,3,3,1,0,2,2,2,0,2,2,1,2,
0,2,3,0,0,0,0,1,0,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,0,2,3,1,3,3,2,3,3,0,3,3,0,3,2,2,3,2,3,3,3,0,0,2,2,3,0,1,1,1,3,
0,0,3,0,0,0,2,2,0,1,3,0,1,2,2,2,3,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,
3,2,3,3,2,0,3,3,2,2,3,1,3,2,1,3,2,0,1,2,2,0,2,3,2,1,0,3,0,0,0,0,
3,0,0,2,3,1,3,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,3,2,2,2,1,2,0,1,3,1,1,3,1,3,0,0,2,1,1,1,1,2,1,1,1,0,2,1,0,1,
1,2,0,0,0,3,1,1,0,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,0,3,1,0,0,0,1,0,
3,3,3,3,2,2,2,2,2,1,3,1,1,1,2,0,1,1,2,1,2,1,3,2,0,0,3,1,1,1,1,1,
3,1,0,2,3,0,0,0,3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,2,3,0,3,3,0,2,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,3,1,3,0,0,1,2,0,0,2,0,3,3,2,3,3,3,2,3,0,0,2,2,2,0,0,0,2,2,
0,0,1,0,0,0,0,3,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,1,2,3,1,3,3,0,0,1,0,3,0,0,0,0,0,
0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,2,3,1,2,3,1,0,3,0,2,2,1,0,2,1,1,2,0,1,0,0,1,1,1,1,0,1,0,0,
1,0,0,0,0,1,1,0,3,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,0,1,1,1,3,1,2,2,2,2,2,2,1,1,1,1,0,3,1,0,1,3,1,1,1,1,
1,1,0,2,0,1,3,1,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,1,
3,0,2,2,1,3,3,2,3,3,0,1,1,0,2,2,1,2,1,3,3,1,0,0,3,2,0,0,0,0,2,1,
0,1,0,0,0,0,1,2,0,1,1,3,1,1,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,3,0,0,1,0,0,0,3,0,0,3,0,3,1,0,1,1,1,3,2,0,0,0,3,0,0,0,0,2,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,1,3,2,1,3,3,1,2,2,0,1,2,1,0,1,2,0,0,0,0,0,3,0,0,0,3,0,0,0,0,
3,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,2,0,3,3,3,2,2,0,1,1,0,1,3,0,0,0,2,2,0,0,0,0,3,1,0,1,0,0,0,
0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,3,1,2,0,0,2,1,0,3,1,0,1,2,0,1,1,1,1,3,0,0,3,1,1,0,2,2,1,1,
0,2,0,0,0,0,0,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,3,1,2,0,0,2,2,0,1,2,0,1,0,1,3,1,2,1,0,0,0,2,0,3,0,0,0,1,0,
0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,1,2,2,0,0,0,2,0,2,1,0,1,1,0,1,1,1,2,1,0,0,1,1,1,0,2,1,1,1,
0,1,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,1,
0,0,0,2,0,1,3,1,1,1,1,0,0,0,0,3,2,0,1,0,0,0,1,2,0,0,0,1,0,0,0,0,
0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,2,3,2,2,0,0,0,1,0,0,0,0,2,3,2,1,2,2,3,0,0,0,2,3,1,0,0,0,1,1,
0,0,1,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,2,0,1,0,0,0,0,2,0,2,0,1,0,0,0,1,1,0,0,0,2,1,0,1,0,1,1,0,0,
0,1,0,2,0,0,1,0,3,0,1,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,0,0,1,0,0,0,0,0,1,1,2,0,0,0,0,1,0,0,1,3,1,0,0,0,0,1,1,0,0,
0,1,0,0,0,0,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,
3,3,1,1,1,1,2,3,0,0,2,1,1,1,1,1,0,2,1,1,0,0,0,2,1,0,1,2,1,1,0,1,
2,1,0,3,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,3,1,0,0,0,0,0,0,0,3,0,0,0,3,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,
0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,0,0,0,0,0,0,1,2,1,0,1,1,0,2,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,2,0,0,0,1,3,0,1,0,0,0,2,0,0,0,0,0,0,0,1,2,0,0,0,0,0,
3,3,0,0,1,1,2,0,0,1,2,1,0,1,1,1,0,1,1,0,0,2,1,1,0,1,0,0,1,1,1,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,1,0,0,0,0,1,0,0,0,0,3,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,0,0,1,1,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,1,2,0,1,2,0,0,1,1,0,2,0,1,0,0,1,0,0,0,0,1,0,0,0,2,0,0,0,0,
1,0,0,1,0,1,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,0,0,0,0,0,0,0,1,1,0,1,1,0,2,1,3,0,0,0,0,1,1,0,0,0,0,0,0,0,3,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,1,0,0,2,0,0,2,0,0,1,1,2,0,0,1,1,0,0,0,1,0,0,0,1,1,0,0,0,
1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,3,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,
1,0,0,0,0,0,0,0,0,1,0,0,0,0,2,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,1,0,0,2,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
TIS620ThaiModel = {
'charToOrderMap': TIS620CharToOrderMap,
'precedenceMatrix': ThaiLangModel,
'mTypicalPositiveRatio': 0.926386,
'keepEnglishLetter': False,
'charsetName': "TIS-620"
}
# flake8: noqa
| gpl-2.0 |
emergence/transmogrifydict | transmogrifydict.py | 1 | 13140 | import json
import six
import re
# (?<!\\) - don't match leading slashes
# (?:\\\\)* - allow any even number of slashes
# (\.) - capture the actual separator
PERIOD_SPLIT = re.compile(r'(?<!\\)(?:\\\\)*(\.)')
OPEN_SQUARE_BRACKET_SPLIT = re.compile(r'(?<!\\)(?:\\\\)*(\[)')
EQUAL_SPLIT = re.compile(r'(?<!\\)(?:\\\\)*(=)')
TIDLE_SPLIT = re.compile(r'(?<!\\)(?:\\\\)*(~)')
SINGLE_SLASH = re.compile(r'(?<!\\)(\\)')
def _non_quoted_split(regex, string):
indices = list(regex.finditer(string))
retval = []
for x, y in zip([None]+indices, indices+[None]):
retval.append(string[x.end(1) if x else 0:y.start(1) if y else None])
return retval
def _un_slash_escape(string):
return SINGLE_SLASH.sub('', string).replace('\\\\', '\\')
def _get_next_mapped_value_for_key(mapped_value, key, found_value, path_parts, path_parts_index, path_parts_break):
try:
if isinstance(mapped_value, six.string_types):
# ugh, maybe it is json?
try:
mapped_value = json.loads(mapped_value)
except ValueError:
raise ValueError(
'string found when looking for dict-like object at {!r}. failed to convert to json.'.format(
'.'.join(path_parts[:path_parts_index])
)
)
if hasattr(mapped_value, 'keys'):
mapped_value = mapped_value[key]
else:
found_value = False
path_parts_break = True
except KeyError:
found_value = False
path_parts_break = True
return found_value, mapped_value, path_parts_break
def _array_part_is_digit(mapped_value, array_part, key, path_parts, path_parts_index):
# [0]
try:
mapped_value = mapped_value[int(array_part)]
except KeyError:
raise ValueError('array expected at {!r}, found dict-like object.'.format(
'.'.join(path_parts[:path_parts_index] + [key])
))
except IndexError:
raise IndexError('index {!r} out of range on array at {!r}.'.format(
int(array_part),
'.'.join(path_parts[:path_parts_index] + [key])
))
return mapped_value
def _array_part_is_key_or_sub_key_equal(found_value, mapped_value, array_part, path_parts_break):
# [Key=Value] or [Key~SubKey=Value]
# split on non quoted equals signs
array_part_break = False
equal_parts = _non_quoted_split(EQUAL_SPLIT, array_part)
find_key = equal_parts[0]
find_value = equal_parts[1:]
# future: when dropping python 2 support do this instead.
# find_key, *find_value = _non_quoted_split(EQUAL_SPLIT, array_part)
if len(find_value) >= 2:
raise ValueError('too many unquoted equals signs in square brackets for {!r}'.format(array_part))
find_value = find_value[0]
if find_value.isdigit():
find_value = int(find_value)
elif find_value.startswith('"') and find_value.endswith('"'):
find_value = find_value[1:-1]
if isinstance(find_value, six.string_types):
find_value = _un_slash_escape(find_value)
for item in [mapped_value] if hasattr(mapped_value, 'keys') else mapped_value:
sub_item = item
sub_keys = _non_quoted_split(TIDLE_SPLIT, find_key)
try:
while sub_keys:
sub_key = _un_slash_escape(sub_keys.pop(0))
sub_item = sub_item[sub_key]
except (KeyError, IndexError):
pass
else:
if sub_item == find_value:
mapped_value = item
break
else:
# raise KeyError('no item with %r == %r' % (find_key, find_value))
found_value = False
path_parts_break = True # break the outer loop, we are done here.
array_part_break = True
return found_value, mapped_value, array_part_break, path_parts_break
def _array_part_is_whole_array(found_value, mapped_value, key, path_parts, path_parts_index):
# empty []
if hasattr(mapped_value, 'keys'):
raise ValueError('array expected at {!r}, found dict-like object.'.format(
'.'.join(path_parts[:path_parts_index] + [key])
))
if not mapped_value:
if path_parts[path_parts_index + 1:]:
found_value = False
else:
remainder = '.'.join(path_parts[path_parts_index + 1:])
mapped_value = [resolve_path_to_value(x, remainder) for x in mapped_value]
mapped_value = [value for found, value in mapped_value if found]
if not mapped_value:
found_value = False
return found_value, mapped_value
def resolve_path_to_value(source, path):
r"""
fetch a value out of `source` using `path` as the pointer to the desired value.
a `path` should be in one of or a combination of the following formats:
- dictionary keys using dot notation
key.subkey
- array item using square bracket notation
key[0]
- find dict in array using keys
key[Key=Value]
- find dict in array using sub keys
key[Key~SubKey=Value]
if the substring `Value` `isdigit()`, we look for an `int` version. You can wrap `'8'` into `'"8"'` to find the
`string` version.
examples:
>>> source_dict = {
... 'first_key': 'a',
... 'second_key' : ['x', 'y', 'z'],
... 'third_key' : [
... {'c': 'asdf'},
... {'b': 3},
... {'b': '5'},
... {'h': 'qw"er'}
... ],
... 'fourth_key': [
... {
... 'd': {'f': 5, 'g': 6},
... 'e': {'f': 7, 'g': 8}
... },
... {
... 'd': {'f': 9, 'g': 10},
... 'e': {'f': 11, 'g': 12}
... }
... ],
... 'fifth_key': [
... {'b.c': '9.a'},
... {'b[c': '9[a'},
... {'b]c': '9]a'},
... {'b\c': '9\\a'},
... ],
... 'sixth_key': {
... 'a': [
... {'b':6},
... {'b':5},
... {'b':4},
... ],
... 'c': [
... {'d':100},
... {'d':{'e': 3}},
... {'d':{'e': 2}},
... ],
... 'f': []
... },
... 'seventh_key': {
... 'bad_api': '{"z":1,"y":2,"x":3}',
... 'bad_json': '{"z":1!"y":2,"x":3}',
... }
... }
>>> resolve_path_to_value(source_dict, 'zero_key')[0]
False
>>> resolve_path_to_value(source_dict, 'first_key')
(True, 'a')
>>> resolve_path_to_value(source_dict, 'second_key[1]')
(True, 'y')
>>> resolve_path_to_value(source_dict, 'second_key[4]')
Traceback (most recent call last):
...
IndexError: index 4 out of range on array at 'second_key'.
>>> resolve_path_to_value(source_dict, 'third_key[b=3]')
(True, {'b': 3})
>>> resolve_path_to_value(source_dict, 'third_key[b=4]')[0]
False
>>> resolve_path_to_value(source_dict, 'third_key[b="5"]')
(True, {'b': '5'})
>>> resolve_path_to_value(source_dict, 'third_key[h=qw"er]')
(True, {'h': 'qw"er'})
>>> resolve_path_to_value(source_dict, 'third_key[c=asdf].c')
(True, 'asdf')
>>> resolve_path_to_value(source_dict, 'third_key[c=asdf].b')
(False, {'c': 'asdf'})
>>> resolve_path_to_value(source_dict, 'fourth_key[d~g=6].e.f')
(True, 7)
>>> resolve_path_to_value(source_dict, r'fifth_key[b\.c=9\.a].b\.c')
(True, '9.a')
>>> resolve_path_to_value(source_dict, r'fifth_key[b\[c=9\[a].b\[c')
(True, '9[a')
>>> resolve_path_to_value(source_dict, r'fifth_key[b\]c=9\]a].b\]c')
(True, '9]a')
>>> resolve_path_to_value(source_dict, r'fifth_key[b\\c=9\\a].b\\c')
(True, '9\\a')
>>> resolve_path_to_value(source_dict, 'sixth_key.a[].b')
(True, [6, 5, 4])
>>> resolve_path_to_value(source_dict, 'sixth_key.c[].d.e')
(True, [3, 2])
>>> resolve_path_to_value(source_dict, 'sixth_key.c[].x')
(False, [])
>>> resolve_path_to_value(source_dict, 'sixth_key.f')
(True, [])
>>> resolve_path_to_value(source_dict, 'sixth_key.f[]')
(True, [])
>>> resolve_path_to_value(source_dict, 'sixth_key.f[].g')
(False, [])
>>> resolve_path_to_value(source_dict, 'seventh_key.bad_api.x')
(True, 3)
>>> results = resolve_path_to_value(source_dict, 'seventh_key.bad_api.a')
>>> results[0]
False
>>> results[1] == {'x': 3, 'y': 2, 'z': 1}
True
>>> resolve_path_to_value(source_dict, 'seventh_key.bad_api[bad-squares]')
Traceback (most recent call last):
...
ValueError: Bad square brackets syntax on 'bad-squares'
>>> resolve_path_to_value(source_dict, 'seventh_key.bad_api[a=b=c=]')
Traceback (most recent call last):
...
ValueError: too many unquoted equals signs in square brackets for 'a=b=c='
>>> resolve_path_to_value(source_dict, 'seventh_key[0]')
Traceback (most recent call last):
...
ValueError: array expected at 'seventh_key', found dict-like object.
>>> resolve_path_to_value(source_dict, 'seventh_key[]')
Traceback (most recent call last):
...
ValueError: array expected at 'seventh_key', found dict-like object.
>>> resolve_path_to_value(source_dict, 'seventh_key.bad_json.z')
Traceback (most recent call last):
...
ValueError: string found when looking for dict-like object at 'seventh_key.bad_json'. failed to convert to json.
:param source: potentially holds the desired value
:type source: dict
:param path: points to the desired value
:type path: six.string_types
:returns: a boolean indicating found status, the value that was found
:rtype: tuple
:raises ValueError: if we don't understand what went inside some square brackets.
"""
mapped_value = source
found_value = True
path_parts_break = False
path_parts = _non_quoted_split(PERIOD_SPLIT, path)
for path_parts_index, path_part_raw in enumerate(path_parts):
# split on non quoted open bracket
parts = _non_quoted_split(OPEN_SQUARE_BRACKET_SPLIT, path_part_raw)
key = parts[0]
array = parts[1:]
# future: when dropping python 2 support do this instead.
# key, *array = _non_quoted_split(OPEN_SQUARE_BRACKET_SPLIT, path_part_raw)
key = _un_slash_escape(key)
found_value, mapped_value, path_parts_break = _get_next_mapped_value_for_key(
mapped_value, key, found_value, path_parts, path_parts_index, path_parts_break
)
if path_parts_break:
break
for array_part_raw in array:
array_part = array_part_raw.strip(']')
if array_part.isdigit():
mapped_value = _array_part_is_digit(
mapped_value, array_part, key, path_parts, path_parts_index
)
elif '=' in array_part:
found_value, mapped_value, array_part_break, path_parts_break = _array_part_is_key_or_sub_key_equal(
found_value, mapped_value, array_part, path_parts_break
)
if array_part_break:
break
elif array_part == '':
found_value, mapped_value = _array_part_is_whole_array(
found_value, mapped_value, key, path_parts, path_parts_index
)
path_parts_break = True # break the outer loop, we are done here.
break
else:
raise ValueError('Bad square brackets syntax on {!r}'.format(array_part))
if path_parts_break:
break
return found_value, mapped_value
def resolve_mapping_to_dict(mapping, source):
"""
move values from `source` into a returned dict, using `mapping` for paths and returned keys.
see resolve_path_to_value for path string formats.
>>> mapping = {
... 'a': 'x[type=other_type].aa',
... 'b': 'x[type=some_type].bb',
... 'c': 'x[type=other_type].cc',
... }
>>> source = {
... 'x': [
... {
... 'type': 'some_type',
... 'aa': '4',
... 'bb': '5',
... 'cc': '6'
... },
... {
... 'type': 'other_type',
... 'aa': '1',
... 'bb': '2',
... 'cc': '3'
... }
... ]
... }
>>> resolve_mapping_to_dict(mapping, source) == {'a': '1', 'b': '5', 'c': '3'}
True
:param mapping: values are paths to find the corresponding value in `source`, keys are were to store said values
:type mapping: dict
:param source: potentially holds the desired values
:type source: dict
:returns: destination dict, containing any found values
:rtype: dict
"""
destination_dict = {}
for destination_key, path in mapping.items():
found_value, mapped_value = resolve_path_to_value(source, path)
if found_value:
destination_dict[destination_key] = mapped_value
return destination_dict
| bsd-3-clause |
cctaylor/googleads-python-lib | examples/dfp/v201408/creative_service/create_creative_from_template.py | 4 | 3612 | #!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example creates a new template creative for a given advertiser.
To determine which companies are advertisers, run get_advertisers.py.
To determine which creative templates exist, run
get_all_creative_templates.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: CreativeService.createCreative
"""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
import base64
import os
import uuid
# Import appropriate modules from the client library.
from googleads import dfp
# Set id of the advertiser (company) that the creative will be assigned to.
ADVERTISER_ID = 'INSERT_ADVERTISER_COMPANY_ID_HERE'
def main(client, advertiser_id):
# Initialize appropriate service.
creative_service = client.GetService('CreativeService', version='v201408')
# Use the image banner with optional third party tracking template.
creative_template_id = '10000680'
image_data = open(os.path.join(os.path.split(__file__)[0], '..', '..', 'data',
'medium_rectangle.jpg'), 'r').read()
image_data = base64.encodestring(image_data)
# Create creative from templates.
creative = {
'xsi_type': 'TemplateCreative',
'name': 'Template Creative #%s' % uuid.uuid4(),
'advertiserId': advertiser_id,
'size': {'width': '300', 'height': '250'},
'creativeTemplateId': creative_template_id,
'creativeTemplateVariableValues': [
{
'xsi_type': 'AssetCreativeTemplateVariableValue',
'uniqueName': 'Imagefile',
'assetByteArray': image_data,
'fileName': 'image%s.jpg' % uuid.uuid4()
},
{
'xsi_type': 'LongCreativeTemplateVariableValue',
'uniqueName': 'Imagewidth',
'value': '300'
},
{
'xsi_type': 'LongCreativeTemplateVariableValue',
'uniqueName': 'Imageheight',
'value': '250'
},
{
'xsi_type': 'UrlCreativeTemplateVariableValue',
'uniqueName': 'ClickthroughURL',
'value': 'www.google.com'
},
{
'xsi_type': 'StringCreativeTemplateVariableValue',
'uniqueName': 'Targetwindow',
'value': '_blank'
}
]
}
# Call service to create the creative.
creative = creative_service.createCreative(creative)
# Display results.
print ('Template creative with id \'%s\', name \'%s\', and type \'%s\' was '
'created and can be previewed at %s.'
% (creative['id'], creative['name'], creative['Creative.Type'],
creative['previewUrl']))
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, ADVERTISER_ID)
| apache-2.0 |
CydarLtd/ansible | lib/ansible/modules/network/nxos/nxos_rollback.py | 45 | 3780 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nxos_rollback
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Set a checkpoint or rollback to a checkpoint.
description:
- This module offers the ability to set a configuration checkpoint
file or rollback to a configuration checkpoint file on Cisco NXOS
switches.
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
notes:
- Sometimes C(transport=nxapi) may cause a timeout error.
options:
checkpoint_file:
description:
- Name of checkpoint file to create. Mutually exclusive
with rollback_to.
required: false
default: null
rollback_to:
description:
- Name of checkpoint file to rollback to. Mutually exclusive
with checkpoint_file.
required: false
default: null
'''
EXAMPLES = '''
- nxos_rollback:
checkpoint_file: backup.cfg
username: "{{ un }}"
password: "{{ pwd }}"
host: "{{ inventory_hostname }}"
- nxos_rollback:
rollback_to: backup.cfg
username: "{{ un }}"
password: "{{ pwd }}"
host: "{{ inventory_hostname }}"
'''
RETURN = '''
filename:
description: The filename of the checkpoint/rollback file.
returned: success
type: string
sample: 'backup.cfg'
status:
description: Which operation took place and whether it was successful.
returned: success
type: string
sample: 'rollback executed'
'''
from ansible.module_utils.nxos import nxos_argument_spec, run_commands
from ansible.module_utils.basic import AnsibleModule
def checkpoint(filename, module):
commands = ['terminal dont-ask', 'checkpoint file %s' % filename]
run_commands(module, commands)
def rollback(filename, module):
commands = ['rollback running-config file %s' % filename]
run_commands(module, commands)
def main():
argument_spec = dict(
checkpoint_file=dict(required=False),
rollback_to=dict(required=False),
include_defaults=dict(default=True),
config=dict(),
save=dict(type='bool', default=False)
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=[['checkpoint_file',
'rollback_to']],
supports_check_mode=False)
checkpoint_file = module.params['checkpoint_file']
rollback_to = module.params['rollback_to']
status = None
filename = None
changed = False
if checkpoint_file:
checkpoint(checkpoint_file, module)
status = 'checkpoint file created'
elif rollback_to:
rollback(rollback_to, module)
status = 'rollback executed'
changed = True
filename = rollback_to or checkpoint_file
module.exit_json(changed=changed, status=status, filename=filename)
if __name__ == '__main__':
main()
| gpl-3.0 |
tech2free/kubernetes | examples/cluster-dns/images/frontend/client.py | 468 | 1227 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import requests
import socket
from urlparse import urlparse
def CheckServiceAddress(address):
hostname = urlparse(address).hostname
service_address = socket.gethostbyname(hostname)
print service_address
def GetServerResponse(address):
print 'Send request to:', address
response = requests.get(address)
print response
print response.content
def Main():
parser = argparse.ArgumentParser()
parser.add_argument('address')
args = parser.parse_args()
CheckServiceAddress(args.address)
GetServerResponse(args.address)
if __name__ == "__main__":
Main()
| apache-2.0 |
rob356/SickRage | lib/cachecontrol/adapter.py | 59 | 4184 | import functools
from requests.adapters import HTTPAdapter
from .controller import CacheController
from .cache import DictCache
from .filewrapper import CallbackFileWrapper
class CacheControlAdapter(HTTPAdapter):
invalidating_methods = set(['PUT', 'DELETE'])
def __init__(self, cache=None,
cache_etags=True,
controller_class=None,
serializer=None,
heuristic=None,
*args, **kw):
super(CacheControlAdapter, self).__init__(*args, **kw)
self.cache = cache or DictCache()
self.heuristic = heuristic
controller_factory = controller_class or CacheController
self.controller = controller_factory(
self.cache,
cache_etags=cache_etags,
serializer=serializer,
)
def send(self, request, **kw):
"""
Send a request. Use the request information to see if it
exists in the cache and cache the response if we need to and can.
"""
if request.method == 'GET':
cached_response = self.controller.cached_request(request)
if cached_response:
return self.build_response(request, cached_response,
from_cache=True)
# check for etags and add headers if appropriate
request.headers.update(
self.controller.conditional_headers(request)
)
resp = super(CacheControlAdapter, self).send(request, **kw)
return resp
def build_response(self, request, response, from_cache=False):
"""
Build a response by making a request or using the cache.
This will end up calling send and returning a potentially
cached response
"""
if not from_cache and request.method == 'GET':
# apply any expiration heuristics
if response.status == 304:
# We must have sent an ETag request. This could mean
# that we've been expired already or that we simply
# have an etag. In either case, we want to try and
# update the cache if that is the case.
cached_response = self.controller.update_cached_response(
request, response
)
if cached_response is not response:
from_cache = True
# We are done with the server response, read a
# possible response body (compliant servers will
# not return one, but we cannot be 100% sure) and
# release the connection back to the pool.
response.read(decode_content=False)
response.release_conn()
response = cached_response
# We always cache the 301 responses
elif response.status == 301:
self.controller.cache_response(request, response)
else:
# Check for any heuristics that might update headers
# before trying to cache.
if self.heuristic:
response = self.heuristic.apply(response)
# Wrap the response file with a wrapper that will cache the
# response when the stream has been consumed.
response._fp = CallbackFileWrapper(
response._fp,
functools.partial(
self.controller.cache_response,
request,
response,
)
)
resp = super(CacheControlAdapter, self).build_response(
request, response
)
# See if we should invalidate the cache.
if request.method in self.invalidating_methods and resp.ok:
cache_url = self.controller.cache_url(request.url)
self.cache.delete(cache_url)
# Give the request a from_cache attr to let people use it
resp.from_cache = from_cache
return resp
def close(self):
self.cache.close()
super(CacheControlAdapter, self).close()
| gpl-3.0 |
ASCrookes/django | tests/messages_tests/test_session.py | 368 | 1870 | from django.contrib.messages import constants
from django.contrib.messages.storage.base import Message
from django.contrib.messages.storage.session import SessionStorage
from django.test import TestCase
from django.utils.safestring import SafeData, mark_safe
from .base import BaseTests
def set_session_data(storage, messages):
"""
Sets the messages into the backend request's session and remove the
backend's loaded data cache.
"""
storage.request.session[storage.session_key] = storage.serialize_messages(messages)
if hasattr(storage, '_loaded_data'):
del storage._loaded_data
def stored_session_messages_count(storage):
data = storage.deserialize_messages(storage.request.session.get(storage.session_key, []))
return len(data)
class SessionTest(BaseTests, TestCase):
storage_class = SessionStorage
def get_request(self):
self.session = {}
request = super(SessionTest, self).get_request()
request.session = self.session
return request
def stored_messages_count(self, storage, response):
return stored_session_messages_count(storage)
def test_get(self):
storage = self.storage_class(self.get_request())
# Set initial data.
example_messages = ['test', 'me']
set_session_data(storage, example_messages)
# Test that the message actually contains what we expect.
self.assertEqual(list(storage), example_messages)
def test_safedata(self):
"""
Tests that a message containing SafeData is keeping its safe status when
retrieved from the message storage.
"""
storage = self.get_storage()
message = Message(constants.DEBUG, mark_safe("<b>Hello Django!</b>"))
set_session_data(storage, [message])
self.assertIsInstance(list(storage)[0].message, SafeData)
| bsd-3-clause |
sonaht/ansible | lib/ansible/modules/cloud/ovirt/ovirt_users.py | 45 | 4894 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_users
short_description: Module to manage users in oVirt/RHV
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "Module to manage users in oVirt/RHV."
options:
name:
description:
- "Name of the user to manage. In most LDAPs it's I(uid) of the user, but in Active Directory you must specify I(UPN) of the user."
required: true
state:
description:
- "Should the user be present/absent."
choices: ['present', 'absent']
default: present
authz_name:
description:
- "Authorization provider of the user. In previous versions of oVirt/RHV known as domain."
required: true
aliases: ['domain']
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Add user user1 from authorization provider example.com-authz
ovirt_users:
name: user1
domain: example.com-authz
# Add user user1 from authorization provider example.com-authz
# In case of Active Directory specify UPN:
ovirt_users:
name: user1@ad2.example.com
domain: example.com-authz
# Remove user user1 with authorization provider example.com-authz
ovirt_users:
state: absent
name: user1
authz_name: example.com-authz
'''
RETURN = '''
id:
description: ID of the user which is managed
returned: On success if user is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
user:
description: "Dictionary of all the user attributes. User attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/user."
returned: On success if user is found.
type: dict
'''
import traceback
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
check_params,
create_connection,
ovirt_full_argument_spec,
)
def username(module):
return '{}@{}'.format(module.params['name'], module.params['authz_name'])
class UsersModule(BaseModule):
def build_entity(self):
return otypes.User(
domain=otypes.Domain(
name=self._module.params['authz_name']
),
user_name=username(self._module),
principal=self._module.params['name'],
namespace=self._module.params['namespace'],
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent'],
default='present',
),
name=dict(required=True),
authz_name=dict(required=True, aliases=['domain']),
namespace=dict(default=None),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
check_params(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
users_service = connection.system_service().users_service()
users_module = UsersModule(
connection=connection,
module=module,
service=users_service,
)
state = module.params['state']
if state == 'present':
ret = users_module.create(
search_params={
'usrname': username(module),
}
)
elif state == 'absent':
ret = users_module.remove(
search_params={
'usrname': username(module),
}
)
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
| gpl-3.0 |
gwh59/cloud-custodian | tests/test_policy.py | 1 | 7158 | # Copyright 2016 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime, timedelta
import json
import shutil
import tempfile
from c7n import policy, manager
from c7n.resources.ec2 import EC2
from c7n.utils import dumps
from .common import BaseTest, Config
class DummyResource(manager.ResourceManager):
def resources(self):
return [
{'abc': 123},
{'def': 456}]
@property
def actions(self):
class _a(object):
def name(self):
return self.f.__name__
def __init__(self, f):
self.f = f
def process(self, resources):
return self.f(resources)
def p1(resources):
return [
{'abc': 456},
{'def': 321}]
def p2(resources):
return resources
return [_a(p1), _a(p2)]
class TestPolicy(BaseTest):
def test_policy_name_filtering(self):
collection = self.load_policy_set(
{'policies': [
{'name': 's3-remediate',
'resource': 's3'},
{'name': 's3-global-grants',
'resource': 's3'},
{'name': 'ec2-tag-compliance-stop',
'resource': 'ec2'},
{'name': 'ec2-tag-compliance-kill',
'resource': 'ec2'},
{'name': 'ec2-tag-compliance-remove',
'resource': 'ec2'}]},
)
self.assertEqual(
[p.name for p in collection.policies('s3*')],
['s3-remediate', 's3-global-grants'])
self.assertEqual(
[p.name for p in collection.policies('ec2*')],
['ec2-tag-compliance-stop',
'ec2-tag-compliance-kill',
'ec2-tag-compliance-remove'])
def test_file_not_found(self):
self.assertRaises(
ValueError, policy.load, Config.empty(), "/asdf12")
def test_lambda_policy_metrics(self):
session_factory = self.replay_flight_data('test_lambda_policy_metrics')
p = self.load_policy({
'name': 'ec2-tag-compliance-v6',
'resource': 'ec2',
'mode': {
'type': 'ec2-instance-state',
'events': ['running']},
'role': "arn:aws:iam::619193117841:role/CustodianDemoRole",
'filters': [
{"tag:custodian_status": 'absent'},
{'or': [
{"tag:App": 'absent'},
{"tag:Env": 'absent'},
{"tag:Owner": 'absent'}]}]},
session_factory=session_factory)
end = datetime.utcnow()
start = end - timedelta(14)
period = 24 * 60 * 60 * 14
self.assertEqual(
json.loads(dumps(p.get_metrics(start, end, period), indent=2)),
{u'Durations': [],
u'Errors': [{u'Sum': 0.0,
u'Timestamp': u'2016-05-30T10:50:00',
u'Unit': u'Count'}],
u'Invocations': [{u'Sum': 4.0,
u'Timestamp': u'2016-05-30T10:50:00',
u'Unit': u'Count'}],
u'ResourceCount': [{u'Average': 1.0,
u'Sum': 2.0,
u'Timestamp': u'2016-05-30T10:50:00',
u'Unit': u'Count'}],
u'Throttles': [{u'Sum': 0.0,
u'Timestamp': u'2016-05-30T10:50:00',
u'Unit': u'Count'}]})
def test_policy_metrics(self):
session_factory = self.replay_flight_data('test_policy_metrics')
p = self.load_policy(
{'name': 's3-encrypt-keys',
'resource': 's3',
'actions': [
{'type': 'encrypt-keys'}]},
session_factory=session_factory)
end = datetime.now().replace(hour=0, minute=0, microsecond=0)
start = end - timedelta(14)
period = 24 * 60 * 60 * 14
self.maxDiff = None
self.assertEqual(
json.loads(dumps(p.get_metrics(start, end, period), indent=2)),
{
"ActionTime": [
{
"Timestamp": "2016-05-30T00:00:00",
"Average": 8541.752702140668,
"Sum": 128126.29053211001,
"Unit": "Seconds"
}
],
"Total Keys": [
{
"Timestamp": "2016-05-30T00:00:00",
"Average": 1575708.7333333334,
"Sum": 23635631.0,
"Unit": "Count"
}
],
"ResourceTime": [
{
"Timestamp": "2016-05-30T00:00:00",
"Average": 8.682969363532667,
"Sum": 130.24454045299,
"Unit": "Seconds"
}
],
"ResourceCount": [
{
"Timestamp": "2016-05-30T00:00:00",
"Average": 23.6,
"Sum": 354.0,
"Unit": "Count"
}
],
"Unencrypted": [
{
"Timestamp": "2016-05-30T00:00:00",
"Average": 10942.266666666666,
"Sum": 164134.0,
"Unit": "Count"
}
]})
def test_get_resource_manager(self):
collection = self.load_policy_set(
{'policies': [
{'name': 'query-instances',
'resource': 'ec2',
'filters': [
{'tag-key': 'CMDBEnvironment'}
]}]})
p = collection.policies()[0]
self.assertTrue(
isinstance(p.get_resource_manager(), EC2))
def xtest_policy_run(self):
manager.resources.register('dummy', DummyResource)
self.addCleanup(manager.resources.unregister, 'dummy')
self.output_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.output_dir)
collection = self.load_policy_set(
{'policies': [
{'name': 'process-instances',
'resource': 'dummy'}]},
{'output_dir': self.output_dir})
p = collection.policies()[0]
p()
self.assertEqual(len(p.ctx.metrics.data), 3)
| apache-2.0 |
gquirozbogner/contentbox-master | third_party/unidecode/x071.py | 4 | 4928 | data = (
'Hu ', # 0x00
'Xi ', # 0x01
'Shu ', # 0x02
'He ', # 0x03
'Xun ', # 0x04
'Ku ', # 0x05
'Jue ', # 0x06
'Xiao ', # 0x07
'Xi ', # 0x08
'Yan ', # 0x09
'Han ', # 0x0a
'Zhuang ', # 0x0b
'Jun ', # 0x0c
'Di ', # 0x0d
'Xie ', # 0x0e
'Ji ', # 0x0f
'Wu ', # 0x10
'[?] ', # 0x11
'[?] ', # 0x12
'Han ', # 0x13
'Yan ', # 0x14
'Huan ', # 0x15
'Men ', # 0x16
'Ju ', # 0x17
'Chou ', # 0x18
'Bei ', # 0x19
'Fen ', # 0x1a
'Lin ', # 0x1b
'Kun ', # 0x1c
'Hun ', # 0x1d
'Tun ', # 0x1e
'Xi ', # 0x1f
'Cui ', # 0x20
'Wu ', # 0x21
'Hong ', # 0x22
'Ju ', # 0x23
'Fu ', # 0x24
'Wo ', # 0x25
'Jiao ', # 0x26
'Cong ', # 0x27
'Feng ', # 0x28
'Ping ', # 0x29
'Qiong ', # 0x2a
'Ruo ', # 0x2b
'Xi ', # 0x2c
'Qiong ', # 0x2d
'Xin ', # 0x2e
'Zhuo ', # 0x2f
'Yan ', # 0x30
'Yan ', # 0x31
'Yi ', # 0x32
'Jue ', # 0x33
'Yu ', # 0x34
'Gang ', # 0x35
'Ran ', # 0x36
'Pi ', # 0x37
'Gu ', # 0x38
'[?] ', # 0x39
'Sheng ', # 0x3a
'Chang ', # 0x3b
'Shao ', # 0x3c
'[?] ', # 0x3d
'[?] ', # 0x3e
'[?] ', # 0x3f
'[?] ', # 0x40
'Chen ', # 0x41
'He ', # 0x42
'Kui ', # 0x43
'Zhong ', # 0x44
'Duan ', # 0x45
'Xia ', # 0x46
'Hui ', # 0x47
'Feng ', # 0x48
'Lian ', # 0x49
'Xuan ', # 0x4a
'Xing ', # 0x4b
'Huang ', # 0x4c
'Jiao ', # 0x4d
'Jian ', # 0x4e
'Bi ', # 0x4f
'Ying ', # 0x50
'Zhu ', # 0x51
'Wei ', # 0x52
'Tuan ', # 0x53
'Tian ', # 0x54
'Xi ', # 0x55
'Nuan ', # 0x56
'Nuan ', # 0x57
'Chan ', # 0x58
'Yan ', # 0x59
'Jiong ', # 0x5a
'Jiong ', # 0x5b
'Yu ', # 0x5c
'Mei ', # 0x5d
'Sha ', # 0x5e
'Wei ', # 0x5f
'Ye ', # 0x60
'Xin ', # 0x61
'Qiong ', # 0x62
'Rou ', # 0x63
'Mei ', # 0x64
'Huan ', # 0x65
'Xu ', # 0x66
'Zhao ', # 0x67
'Wei ', # 0x68
'Fan ', # 0x69
'Qiu ', # 0x6a
'Sui ', # 0x6b
'Yang ', # 0x6c
'Lie ', # 0x6d
'Zhu ', # 0x6e
'Jie ', # 0x6f
'Gao ', # 0x70
'Gua ', # 0x71
'Bao ', # 0x72
'Hu ', # 0x73
'Yun ', # 0x74
'Xia ', # 0x75
'[?] ', # 0x76
'[?] ', # 0x77
'Bian ', # 0x78
'Gou ', # 0x79
'Tui ', # 0x7a
'Tang ', # 0x7b
'Chao ', # 0x7c
'Shan ', # 0x7d
'N ', # 0x7e
'Bo ', # 0x7f
'Huang ', # 0x80
'Xie ', # 0x81
'Xi ', # 0x82
'Wu ', # 0x83
'Xi ', # 0x84
'Yun ', # 0x85
'He ', # 0x86
'He ', # 0x87
'Xi ', # 0x88
'Yun ', # 0x89
'Xiong ', # 0x8a
'Nai ', # 0x8b
'Shan ', # 0x8c
'Qiong ', # 0x8d
'Yao ', # 0x8e
'Xun ', # 0x8f
'Mi ', # 0x90
'Lian ', # 0x91
'Ying ', # 0x92
'Wen ', # 0x93
'Rong ', # 0x94
'Oozutsu ', # 0x95
'[?] ', # 0x96
'Qiang ', # 0x97
'Liu ', # 0x98
'Xi ', # 0x99
'Bi ', # 0x9a
'Biao ', # 0x9b
'Zong ', # 0x9c
'Lu ', # 0x9d
'Jian ', # 0x9e
'Shou ', # 0x9f
'Yi ', # 0xa0
'Lou ', # 0xa1
'Feng ', # 0xa2
'Sui ', # 0xa3
'Yi ', # 0xa4
'Tong ', # 0xa5
'Jue ', # 0xa6
'Zong ', # 0xa7
'Yun ', # 0xa8
'Hu ', # 0xa9
'Yi ', # 0xaa
'Zhi ', # 0xab
'Ao ', # 0xac
'Wei ', # 0xad
'Liao ', # 0xae
'Han ', # 0xaf
'Ou ', # 0xb0
'Re ', # 0xb1
'Jiong ', # 0xb2
'Man ', # 0xb3
'[?] ', # 0xb4
'Shang ', # 0xb5
'Cuan ', # 0xb6
'Zeng ', # 0xb7
'Jian ', # 0xb8
'Xi ', # 0xb9
'Xi ', # 0xba
'Xi ', # 0xbb
'Yi ', # 0xbc
'Xiao ', # 0xbd
'Chi ', # 0xbe
'Huang ', # 0xbf
'Chan ', # 0xc0
'Ye ', # 0xc1
'Qian ', # 0xc2
'Ran ', # 0xc3
'Yan ', # 0xc4
'Xian ', # 0xc5
'Qiao ', # 0xc6
'Zun ', # 0xc7
'Deng ', # 0xc8
'Dun ', # 0xc9
'Shen ', # 0xca
'Jiao ', # 0xcb
'Fen ', # 0xcc
'Si ', # 0xcd
'Liao ', # 0xce
'Yu ', # 0xcf
'Lin ', # 0xd0
'Tong ', # 0xd1
'Shao ', # 0xd2
'Fen ', # 0xd3
'Fan ', # 0xd4
'Yan ', # 0xd5
'Xun ', # 0xd6
'Lan ', # 0xd7
'Mei ', # 0xd8
'Tang ', # 0xd9
'Yi ', # 0xda
'Jing ', # 0xdb
'Men ', # 0xdc
'[?] ', # 0xdd
'[?] ', # 0xde
'Ying ', # 0xdf
'Yu ', # 0xe0
'Yi ', # 0xe1
'Xue ', # 0xe2
'Lan ', # 0xe3
'Tai ', # 0xe4
'Zao ', # 0xe5
'Can ', # 0xe6
'Sui ', # 0xe7
'Xi ', # 0xe8
'Que ', # 0xe9
'Cong ', # 0xea
'Lian ', # 0xeb
'Hui ', # 0xec
'Zhu ', # 0xed
'Xie ', # 0xee
'Ling ', # 0xef
'Wei ', # 0xf0
'Yi ', # 0xf1
'Xie ', # 0xf2
'Zhao ', # 0xf3
'Hui ', # 0xf4
'Tatsu ', # 0xf5
'Nung ', # 0xf6
'Lan ', # 0xf7
'Ru ', # 0xf8
'Xian ', # 0xf9
'Kao ', # 0xfa
'Xun ', # 0xfb
'Jin ', # 0xfc
'Chou ', # 0xfd
'Chou ', # 0xfe
'Yao ', # 0xff
)
| apache-2.0 |
Nidylei/azure-linux-automation | remote-scripts/E2E-SIEGE-TEST.py | 8 | 2963 | #!/usr/bin/python
import re
import time
import imp
import sys
import argparse
import os
import linecache
from azuremodules import *
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--user', help='specifies user to login', required=True, type= str )
parser.add_argument('-p', '--password', help='specifies which password should be used to login', required=True, type= str)
parser.add_argument('-l', '--url', help='Datrader scenario url for siege test', required=True, type= str )
parser.add_argument('-t', '--siegetime', help='time for siege test', required=True, type= str )
parser.add_argument('-n', '--numofusers', help='number of users for siege test', required=True, type= str )
args = parser.parse_args()
vm_username = args.user
vm_password = args.password
daytrader_scenario_url = args.url
siegetime= args.siegetime
numofusers = args.numofusers
current_distro="unknown"
distro_version="unknown"
def EndOfTheScript():
print FileGetContents("/home/"+vm_username+"/Runtime.log")
exit()
def CollectLogs():
ExecMultiCmdsLocalSudo(["mkdir logs","cp -f /tmp/*.log logs/","cp -f *.txt logs/","tar -czvf logs.tar.gz logs/"])
def SiegeTest():
if (current_distro == "centos" or current_distro == "rhel" or current_distro == "oracle" or current_distro == "ol"):
ExecMultiCmdsLocalSudo(["yum update -y ","yum install -y tar wget gcc make"])
Run("echo '"+vm_password+"' | sudo -S rpm -ivh siege*.rpm")
elif(current_distro == "openSUSE" or current_distro == "sles"):
ExecMultiCmdsLocalSudo(["zypper update -y ","zypper install -y gcc make"])
Run("echo '"+vm_password+"' | sudo -S rpm -ivh siege*.rpm")
elif(current_distro == "ubuntu"):
ExecMultiCmdsLocalSudo(["apt-get update -y","apt-get install -y gcc make siege"])
siegeinfo = Run("echo '"+vm_password+"' | sudo -S siege -V 2>&1")
if((siegeinfo.rfind("SIEGE ") != -1) and (siegeinfo.rfind("Copyright ") != -1)):
RunLog.info("Siege tool Available ..")
else:
Run("echo '"+vm_password+"' | sudo -S dpkg -i siege*.deb")
else:
RunLog.error( "\ndetected distro not in the list and for more details check logs...")
siegeinfo = Run("echo '"+vm_password+"' | sudo -S siege -V 2>&1")
if((siegeinfo.rfind("SIEGE ") != -1) and (siegeinfo.rfind("Copyright ") != -1)):
RunLog.info("Siege Setup Completed Successfully")
#Siege test start here..
command = "siege "+ daytrader_scenario_url +" -t"+siegetime+" -c"+numofusers+" > SiegeConsoleOutput.txt 2>&1"
RunLog.info( "fcmd:'" +command+"'")
Run("echo SIEGE_TEST_STARTED > siegeteststatus.txt")
Run(command)
RunLog.info("siege fininshed")
RunLog.info("siege fininshed >> siegeteststatus.txt")
else:
RunLog.info("Siege Setup Failed")
EndOfTheScript()
#Test start here
[current_distro, distro_version] = DetectDistro()
SiegeTest()
CollectLogs()
EndOfTheScript() | apache-2.0 |
meyerson/luigi | luigi/s3.py | 2 | 18771 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Implementation of Simple Storage Service support.
:py:class:`S3Target` is a subclass of the Target class to support S3 file system operations
"""
from __future__ import division
import itertools
import logging
import os
import os.path
try:
from urlparse import urlsplit
except ImportError:
from urllib.parse import urlsplit
import warnings
try:
from ConfigParser import NoSectionError
except ImportError:
from configparser import NoSectionError
from luigi import six
from luigi.six.moves import range
from luigi import configuration
from luigi.format import get_default_format
from luigi.parameter import Parameter
from luigi.target import FileAlreadyExists, FileSystem, FileSystemException, FileSystemTarget, AtomicLocalFile, MissingParentDirectory
from luigi.task import ExternalTask
logger = logging.getLogger('luigi-interface')
try:
import boto
from boto.s3.key import Key
except ImportError:
logger.warning("Loading s3 module without boto installed. Will crash at "
"runtime if s3 functionality is used.")
# two different ways of marking a directory
# with a suffix in S3
S3_DIRECTORY_MARKER_SUFFIX_0 = '_$folder$'
S3_DIRECTORY_MARKER_SUFFIX_1 = '/'
class InvalidDeleteException(FileSystemException):
pass
class FileNotFoundException(FileSystemException):
pass
class S3Client(FileSystem):
"""
boto-powered S3 client.
"""
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
**kwargs):
options = self._get_s3_config()
options.update(kwargs)
# Removing key args would break backwards compability
if not aws_access_key_id:
aws_access_key_id = options.get('aws_access_key_id')
if not aws_secret_access_key:
aws_secret_access_key = options.get('aws_secret_access_key')
for key in ['aws_access_key_id', 'aws_secret_access_key']:
if key in options:
options.pop(key)
self.s3 = boto.connect_s3(aws_access_key_id,
aws_secret_access_key,
**options)
def exists(self, path):
"""
Does provided path exist on S3?
"""
(bucket, key) = self._path_to_bucket_and_key(path)
# grab and validate the bucket
s3_bucket = self.s3.get_bucket(bucket, validate=True)
# root always exists
if self._is_root(key):
return True
# file
s3_key = s3_bucket.get_key(key)
if s3_key:
return True
if self.isdir(path):
return True
logger.debug('Path %s does not exist', path)
return False
def remove(self, path, recursive=True):
"""
Remove a file or directory from S3.
"""
if not self.exists(path):
logger.debug('Could not delete %s; path does not exist', path)
return False
(bucket, key) = self._path_to_bucket_and_key(path)
# root
if self._is_root(key):
raise InvalidDeleteException(
'Cannot delete root of bucket at path %s' % path)
# grab and validate the bucket
s3_bucket = self.s3.get_bucket(bucket, validate=True)
# file
s3_key = s3_bucket.get_key(key)
if s3_key:
s3_bucket.delete_key(s3_key)
logger.debug('Deleting %s from bucket %s', key, bucket)
return True
if self.isdir(path) and not recursive:
raise InvalidDeleteException(
'Path %s is a directory. Must use recursive delete' % path)
delete_key_list = [
k for k in s3_bucket.list(self._add_path_delimiter(key))]
# delete the directory marker file if it exists
s3_dir_with_suffix_key = s3_bucket.get_key(key + S3_DIRECTORY_MARKER_SUFFIX_0)
if s3_dir_with_suffix_key:
delete_key_list.append(s3_dir_with_suffix_key)
if len(delete_key_list) > 0:
for k in delete_key_list:
logger.debug('Deleting %s from bucket %s', k, bucket)
s3_bucket.delete_keys(delete_key_list)
return True
return False
def get_key(self, path):
(bucket, key) = self._path_to_bucket_and_key(path)
s3_bucket = self.s3.get_bucket(bucket, validate=True)
return s3_bucket.get_key(key)
def put(self, local_path, destination_s3_path):
"""
Put an object stored locally to an S3 path.
"""
(bucket, key) = self._path_to_bucket_and_key(destination_s3_path)
# grab and validate the bucket
s3_bucket = self.s3.get_bucket(bucket, validate=True)
# put the file
s3_key = Key(s3_bucket)
s3_key.key = key
s3_key.set_contents_from_filename(local_path)
def put_string(self, content, destination_s3_path):
"""
Put a string to an S3 path.
"""
(bucket, key) = self._path_to_bucket_and_key(destination_s3_path)
# grab and validate the bucket
s3_bucket = self.s3.get_bucket(bucket, validate=True)
# put the content
s3_key = Key(s3_bucket)
s3_key.key = key
s3_key.set_contents_from_string(content)
def put_multipart(self, local_path, destination_s3_path, part_size=67108864):
"""
Put an object stored locally to an S3 path
using S3 multi-part upload (for files > 5GB).
:param local_path: Path to source local file
:param destination_s3_path: URL for target S3 location
:param part_size: Part size in bytes. Default: 67108864 (64MB), must be >= 5MB and <= 5 GB.
"""
# calculate number of parts to upload
# based on the size of the file
source_size = os.stat(local_path).st_size
if source_size <= part_size:
# fallback to standard, non-multipart strategy
return self.put(local_path, destination_s3_path)
(bucket, key) = self._path_to_bucket_and_key(destination_s3_path)
# grab and validate the bucket
s3_bucket = self.s3.get_bucket(bucket, validate=True)
# calculate the number of parts (int division).
# use modulo to avoid float precision issues
# for exactly-sized fits
num_parts = \
(source_size // part_size) \
if source_size % part_size == 0 \
else (source_size // part_size) + 1
mp = None
try:
mp = s3_bucket.initiate_multipart_upload(key)
for i in range(num_parts):
# upload a part at a time to S3
offset = part_size * i
bytes = min(part_size, source_size - offset)
with open(local_path, 'rb') as fp:
part_num = i + 1
logger.info('Uploading part %s/%s to %s', part_num, num_parts, destination_s3_path)
fp.seek(offset)
mp.upload_part_from_file(fp, part_num=part_num, size=bytes)
# finish the upload, making the file available in S3
mp.complete_upload()
except BaseException:
if mp:
logger.info('Canceling multipart s3 upload for %s', destination_s3_path)
# cancel the upload so we don't get charged for
# storage consumed by uploaded parts
mp.cancel_upload()
raise
def get(self, s3_path, destination_local_path):
"""
Get an object stored in S3 and write it to a local path.
"""
(bucket, key) = self._path_to_bucket_and_key(s3_path)
# grab and validate the bucket
s3_bucket = self.s3.get_bucket(bucket, validate=True)
# download the file
s3_key = Key(s3_bucket)
s3_key.key = key
s3_key.get_contents_to_filename(destination_local_path)
def get_as_string(self, s3_path):
"""
Get the contents of an object stored in S3 as a string.
"""
(bucket, key) = self._path_to_bucket_and_key(s3_path)
# grab and validate the bucket
s3_bucket = self.s3.get_bucket(bucket, validate=True)
# get the content
s3_key = Key(s3_bucket)
s3_key.key = key
contents = s3_key.get_contents_as_string()
return contents
def copy(self, source_path, destination_path):
"""
Copy an object from one S3 location to another.
"""
(src_bucket, src_key) = self._path_to_bucket_and_key(source_path)
(dst_bucket, dst_key) = self._path_to_bucket_and_key(destination_path)
s3_bucket = self.s3.get_bucket(dst_bucket, validate=True)
if self.isdir(source_path):
src_prefix = self._add_path_delimiter(src_key)
dst_prefix = self._add_path_delimiter(dst_key)
for key in self.list(source_path):
s3_bucket.copy_key(dst_prefix + key,
src_bucket,
src_prefix + key)
else:
s3_bucket.copy_key(dst_key, src_bucket, src_key)
def rename(self, source_path, destination_path):
"""
Rename/move an object from one S3 location to another.
"""
self.copy(source_path, destination_path)
self.remove(source_path)
def listdir(self, path):
"""
Get an iterable with S3 folder contents.
Iterable contains paths relative to queried path.
"""
(bucket, key) = self._path_to_bucket_and_key(path)
# grab and validate the bucket
s3_bucket = self.s3.get_bucket(bucket, validate=True)
key_path = self._add_path_delimiter(key)
key_path_len = len(key_path)
for item in s3_bucket.list(prefix=key_path):
yield self._add_path_delimiter(path) + item.key[key_path_len:]
def list(self, path): # backwards compat
key_path_len = len(self._add_path_delimiter(path))
for item in self.listdir(path):
yield item[key_path_len:]
def isdir(self, path):
"""
Is the parameter S3 path a directory?
"""
(bucket, key) = self._path_to_bucket_and_key(path)
# grab and validate the bucket
s3_bucket = self.s3.get_bucket(bucket, validate=True)
# root is a directory
if self._is_root(key):
return True
for suffix in (S3_DIRECTORY_MARKER_SUFFIX_0,
S3_DIRECTORY_MARKER_SUFFIX_1):
s3_dir_with_suffix_key = s3_bucket.get_key(key + suffix)
if s3_dir_with_suffix_key:
return True
# files with this prefix
key_path = self._add_path_delimiter(key)
s3_bucket_list_result = \
list(itertools.islice(
s3_bucket.list(prefix=key_path),
1))
if s3_bucket_list_result:
return True
return False
is_dir = isdir # compatibility with old version.
def mkdir(self, path, parents=True, raise_if_exists=False):
if raise_if_exists and self.isdir(path):
raise FileAlreadyExists()
_, key = self._path_to_bucket_and_key(path)
if self._is_root(key):
return # isdir raises if the bucket doesn't exist; nothing to do here.
key = self._add_path_delimiter(key)
if not parents and not self.isdir(os.path.dirname(key)):
raise MissingParentDirectory()
return self.put_string("", self._add_path_delimiter(path))
def _get_s3_config(self, key=None):
try:
config = dict(configuration.get_config().items('s3'))
except NoSectionError:
return {}
# So what ports etc can be read without us having to specify all dtypes
for k, v in six.iteritems(config):
try:
config[k] = int(v)
except ValueError:
pass
if key:
return config.get(key)
return config
def _path_to_bucket_and_key(self, path):
(scheme, netloc, path, query, fragment) = urlsplit(path)
path_without_initial_slash = path[1:]
return netloc, path_without_initial_slash
def _is_root(self, key):
return (len(key) == 0) or (key == '/')
def _add_path_delimiter(self, key):
return key if key[-1:] == '/' else key + '/'
class AtomicS3File(AtomicLocalFile):
"""
An S3 file that writes to a temp file and put to S3 on close.
"""
def __init__(self, path, s3_client):
self.s3_client = s3_client
super(AtomicS3File, self).__init__(path)
def move_to_final_destination(self):
self.s3_client.put_multipart(self.tmp_path, self.path)
class ReadableS3File(object):
def __init__(self, s3_key):
self.s3_key = s3_key
self.buffer = []
self.closed = False
self.finished = False
def read(self, size=0):
f = self.s3_key.read(size=size)
# boto will loop on the key forever and it's not what is expected by
# the python io interface
# boto/boto#2805
if f == b'':
self.finished = True
if self.finished:
return b''
return f
def close(self):
self.s3_key.close()
self.closed = True
def __del__(self):
self.close()
def __exit__(self, exc_type, exc, traceback):
self.close()
def __enter__(self):
return self
def _add_to_buffer(self, line):
self.buffer.append(line)
def _flush_buffer(self):
output = b''.join(self.buffer)
self.buffer = []
return output
def readable(self):
return True
def writable(self):
return False
def seekable(self):
return False
def __iter__(self):
key_iter = self.s3_key.__iter__()
has_next = True
while has_next:
try:
# grab the next chunk
chunk = next(key_iter)
# split on newlines, preserving the newline
for line in chunk.splitlines(True):
if not line.endswith(os.linesep):
# no newline, so store in buffer
self._add_to_buffer(line)
else:
# newline found, send it out
if self.buffer:
self._add_to_buffer(line)
yield self._flush_buffer()
else:
yield line
except StopIteration:
# send out anything we have left in the buffer
output = self._flush_buffer()
if output:
yield output
has_next = False
self.close()
class S3Target(FileSystemTarget):
"""
"""
fs = None
def __init__(self, path, format=None, client=None):
super(S3Target, self).__init__(path)
if format is None:
format = get_default_format()
self.path = path
self.format = format
self.fs = client or S3Client()
def open(self, mode='r'):
"""
"""
if mode not in ('r', 'w'):
raise ValueError("Unsupported open mode '%s'" % mode)
if mode == 'r':
s3_key = self.fs.get_key(self.path)
if not s3_key:
raise FileNotFoundException("Could not find file at %s" % self.path)
fileobj = ReadableS3File(s3_key)
return self.format.pipe_reader(fileobj)
else:
return self.format.pipe_writer(AtomicS3File(self.path, self.fs))
class S3FlagTarget(S3Target):
"""
Defines a target directory with a flag-file (defaults to `_SUCCESS`) used
to signify job success.
This checks for two things:
* the path exists (just like the S3Target)
* the _SUCCESS file exists within the directory.
Because Hadoop outputs into a directory and not a single file,
the path is assumed to be a directory.
This is meant to be a handy alternative to AtomicS3File.
The AtomicFile approach can be burdensome for S3 since there are no directories, per se.
If we have 1,000,000 output files, then we have to rename 1,000,000 objects.
"""
fs = None
def __init__(self, path, format=None, client=None, flag='_SUCCESS'):
"""
Initializes a S3FlagTarget.
:param path: the directory where the files are stored.
:type path: str
:param client:
:type client:
:param flag:
:type flag: str
"""
if format is None:
format = get_default_format()
if path[-1] != "/":
raise ValueError("S3FlagTarget requires the path to be to a "
"directory. It must end with a slash ( / ).")
super(S3FlagTarget, self).__init__(path)
self.format = format
self.fs = client or S3Client()
self.flag = flag
def exists(self):
hadoopSemaphore = self.path + self.flag
return self.fs.exists(hadoopSemaphore)
class S3EmrTarget(S3FlagTarget):
"""
Deprecated. Use :py:class:`S3FlagTarget`
"""
def __init__(self, *args, **kwargs):
warnings.warn("S3EmrTarget is deprecated. Please use S3FlagTarget")
super(S3EmrTarget, self).__init__(*args, **kwargs)
class S3PathTask(ExternalTask):
"""
A external task that to require existence of a path in S3.
"""
path = Parameter()
def output(self):
return S3Target(self.path)
class S3EmrTask(ExternalTask):
"""
An external task that requires the existence of EMR output in S3.
"""
path = Parameter()
def output(self):
return S3EmrTarget(self.path)
class S3FlagTask(ExternalTask):
"""
An external task that requires the existence of EMR output in S3.
"""
path = Parameter()
flag = Parameter(default=None)
def output(self):
return S3FlagTarget(self.path, flag=self.flag)
| apache-2.0 |
hbradlow/autograde | autograde/forms.py | 1 | 1440 | from django import forms
from django.utils.translation import ugettext_lazy as _, ugettext
from django.forms.widgets import SplitDateTimeWidget,DateTimeInput
from autograde.models import *
from autograde.utils import *
class ProjectMetaForm(forms.ModelForm):
def __init__(self,*args,**kwargs):
super(ProjectMetaForm,self).__init__(*args,**kwargs)
self.fields['due_date'].widget = SplitDateTimeWidget()
self.fields['release_date'].widget = SplitDateTimeWidget()
class Meta:
model = ProjectMeta
exclude = ("project",)
class ProjectCreateForm(forms.ModelForm):
def __init__(self,*args,**kwargs):
if len(args)>=2:
self.project_files = args[1].getlist("project_files",[])
super(ProjectCreateForm,self).__init__(*args,**kwargs)
def save(self,*args,**kwargs):
super(ProjectCreateForm,self).save(*args,**kwargs)
ProjectMeta.objects.create(project=self.instance)
#save the files
p = self.instance
for file in self.project_files:
ProjectFile.objects.create(project=p, file=file)
class Meta:
model = Project
exclude = ("instructors",)
class TestCaseForm(forms.ModelForm):
class Meta:
model = TestCase
fields = ("file","expected_results",)
class ProjectFileForm(forms.ModelForm):
class Meta:
model = ProjectFile
fields = ("file","is_student_viewable")
| gpl-3.0 |
jaggu303619/asylum-v2.0 | openerp/report/render/makohtml2html/__init__.py | 76 | 1120 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from makohtml2html import parseNode
#.apidoc title: MAKO to HTML engine
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
nachandr/cfme_tests | cfme/tests/automate/test_smoke.py | 2 | 1220 | """This test contains necessary smoke tests for the Automate."""
import pytest
from cfme import test_requirements
pytestmark = [
test_requirements.automate,
pytest.mark.smoke,
pytest.mark.tier(2),
pytest.mark.ignore_stream(("upstream", {"domain_name": "RedHat"}))
]
@pytest.mark.rhel_testing
@pytest.mark.parametrize("domain_name", ["ManageIQ", "RedHat"])
def test_domain_present(domain_name, soft_assert, appliance):
"""This test verifies presence of domains that are included in the appliance.
Polarion:
assignee: dgaikwad
casecomponent: Automate
caseimportance: critical
initialEstimate: 1/60h
testtype: functional
tags: automate
testSteps:
1. Clean appliance.
2. Open the Automate Explorer.
3. Verify that all of the required domains are present.
"""
domain = appliance.collections.domains.instantiate(name=domain_name)
soft_assert(domain.exists, f"Domain {domain_name} does not exist!")
soft_assert(domain.locked, f"Domain {domain_name} is not locked!")
soft_assert(
appliance.check_domain_enabled(
domain_name), f"Domain {domain_name} is not enabled!")
| gpl-2.0 |
nvoron23/hue | desktop/core/ext-py/Django-1.6.10/tests/extra_regress/models.py | 114 | 1365 | from __future__ import unicode_literals
import copy
import datetime
from django.contrib.auth.models import User
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class RevisionableModel(models.Model):
base = models.ForeignKey('self', null=True)
title = models.CharField(blank=True, max_length=255)
when = models.DateTimeField(default=datetime.datetime.now)
def __str__(self):
return "%s (%s, %s)" % (self.title, self.id, self.base.id)
def save(self, *args, **kwargs):
super(RevisionableModel, self).save(*args, **kwargs)
if not self.base:
self.base = self
kwargs.pop('force_insert', None)
kwargs.pop('force_update', None)
super(RevisionableModel, self).save(*args, **kwargs)
def new_revision(self):
new_revision = copy.copy(self)
new_revision.pk = None
return new_revision
class Order(models.Model):
created_by = models.ForeignKey(User)
text = models.TextField()
@python_2_unicode_compatible
class TestObject(models.Model):
first = models.CharField(max_length=20)
second = models.CharField(max_length=20)
third = models.CharField(max_length=20)
def __str__(self):
return 'TestObject: %s,%s,%s' % (self.first,self.second,self.third)
| apache-2.0 |
CloverHealth/airflow | airflow/example_dags/example_docker_operator.py | 20 | 1937 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
from airflow import DAG
from airflow.operators import BashOperator
from datetime import datetime, timedelta
from airflow.operators.docker_operator import DockerOperator
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': datetime.utcnow(),
'email': ['airflow@example.com'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5)
}
dag = DAG(
'docker_sample', default_args=default_args, schedule_interval=timedelta(minutes=10))
t1 = BashOperator(
task_id='print_date',
bash_command='date',
dag=dag)
t2 = BashOperator(
task_id='sleep',
bash_command='sleep 5',
retries=3,
dag=dag)
t3 = DockerOperator(api_version='1.19',
docker_url='tcp://localhost:2375', #Set your docker URL
command='/bin/sleep 30',
image='centos:latest',
network_mode='bridge',
task_id='docker_op_tester',
dag=dag)
t4 = BashOperator(
task_id='print_hello',
bash_command='echo "hello world!!!"',
dag=dag)
t1.set_downstream(t2)
t1.set_downstream(t3)
t3.set_downstream(t4)
"""
| apache-2.0 |
EliotBerriot/django-modelstats | tests/test_datasets.py | 1 | 6592 | import datetime
from .base import TestBase
from modelstats import models
from modelstats import datasets
class TestDateDataSet(TestBase):
def test_datedataset(self):
self.create_users()
queryset = self.user_model.objects.all()
dataset = datasets.DateDataSet(field='date_joined', fill_missing_dates=False, queryset=queryset).process()
for i, date_data in enumerate(self.default_dates_joined):
date, quantity = date_data
self.assertEqual(dataset.data[i]['key'], date.strftime('%Y-%m-%d'))
self.assertEqual(dataset.data[i]['value'], quantity)
def test_datedataset_lookup_year(self):
dates_joined = [
(datetime.datetime(2016, 1, 1, 12, 12), 11),
(datetime.datetime(2016, 2, 1, 12, 12), 9),
(datetime.datetime(2017, 1, 2, 12, 12), 8),
(datetime.datetime(2018, 1, 5, 12, 12), 7),
]
self.create_users(dates_joined)
queryset = self.user_model.objects.all()
dataset = datasets.DateDataSet(field='date_joined', queryset=queryset, fill_missing_dates=False, year=2016).process()
self.assertEqual(sum([d['value'] for d in dataset.data]), 20)
def test_datedataset_lookup_month(self):
dates_joined = [
(datetime.datetime(2016, 1, 1, 12, 12), 11),
(datetime.datetime(2016, 2, 1, 12, 12), 9),
(datetime.datetime(2017, 1, 2, 12, 12), 8),
(datetime.datetime(2018, 1, 1, 12, 12), 8),
(datetime.datetime(2019, 2, 1, 12, 12), 5),
]
self.create_users(dates_joined)
queryset = self.user_model.objects.all()
dataset = datasets.DateDataSet(field='date_joined', queryset=queryset, fill_missing_dates=False, month=2).process()
self.assertEqual(sum([d['value'] for d in dataset.data]), 14)
def test_datedataset_lookup_day(self):
dates_joined = [
(datetime.datetime(2016, 1, 1, 12, 12), 11),
(datetime.datetime(2016, 2, 2, 12, 12), 9),
(datetime.datetime(2017, 1, 1, 12, 12), 8),
(datetime.datetime(2018, 1, 4, 12, 12), 8),
(datetime.datetime(2019, 2, 5, 12, 12), 5),
]
self.create_users(dates_joined)
queryset = self.user_model.objects.all()
dataset = datasets.DateDataSet(field='date_joined', queryset=queryset, fill_missing_dates=False, day=1).process()
self.assertEqual(sum([d['value'] for d in dataset.data]), 19)
def test_datedataset_group_by_month(self):
self.create_users()
queryset = self.user_model.objects.all()
dataset = datasets.DateDataSet(field='date_joined', group_by='month', fill_missing_dates=False, queryset=queryset).process()
total_quantity = sum([quantity for date, quantity in self.default_dates_joined])
self.assertEqual(dataset.data[0]['key'], '2015-01-01')
self.assertEqual(dataset.data[0]['value'], total_quantity)
def test_datedataset_group_by_year(self):
self.create_users()
queryset = self.user_model.objects.all()
dataset = datasets.DateDataSet(field='date_joined', group_by='year', fill_missing_dates=False, queryset=queryset).process()
total_quantity = sum([quantity for date, quantity in self.default_dates_joined])
self.assertEqual(dataset.data[0]['key'], '2015-01-01')
self.assertEqual(dataset.data[0]['value'], total_quantity)
def test_datedataset_sort_reverse(self):
dates_joined = [
(datetime.datetime(2015, 1, 1, 12, 12), 11),
(datetime.datetime(2015, 1, 2, 12, 12), 8),
(datetime.datetime(2015, 1, 3, 12, 12), 7),
]
users = self.create_users(dates_joined)
queryset = self.user_model.objects.all()
dataset = datasets.DateDataSet(field='date_joined', sort='reverse', fill_missing_dates=False, queryset=queryset).process()
self.assertEqual(dataset.data[0]['key'], '2015-01-03')
self.assertEqual(dataset.data[1]['key'], '2015-01-02')
self.assertEqual(dataset.data[2]['key'], '2015-01-01')
def test_datedataset_fill_missing_dates_day(self):
dates_joined = [
(datetime.datetime(2015, 1, 1, 12, 12), 11),
(datetime.datetime(2015, 1, 2, 12, 12), 8),
(datetime.datetime(2015, 1, 5, 12, 12), 7),
]
users = self.create_users(dates_joined)
queryset = self.user_model.objects.all()
dataset = datasets.DateDataSet(field='date_joined', fill_missing_dates=True, queryset=queryset).process()
self.assertEqual(dataset.data[0]['key'], '2015/01/01')
self.assertEqual(dataset.data[1]['key'], '2015/01/02')
self.assertEqual(dataset.data[2]['key'], '2015/01/03')
self.assertEqual(dataset.data[3]['key'], '2015/01/04')
self.assertEqual(dataset.data[4]['key'], '2015/01/05')
self.assertEqual(dataset.data[2]['value'], 0)
def test_datedataset_fill_missing_dates_month(self):
dates_joined = [
(datetime.datetime(2015, 1, 1, 12, 12), 11),
(datetime.datetime(2015, 3, 1, 12, 12), 8),
(datetime.datetime(2015, 5, 1, 12, 12), 7),
]
users = self.create_users(dates_joined)
queryset = self.user_model.objects.all()
dataset = datasets.DateDataSet(field='date_joined', group_by='month', fill_missing_dates=True, queryset=queryset).process()
self.assertEqual(dataset.data[0]['key'], '2015/01')
self.assertEqual(dataset.data[1]['key'], '2015/02')
self.assertEqual(dataset.data[2]['key'], '2015/03')
self.assertEqual(dataset.data[3]['key'], '2015/04')
self.assertEqual(dataset.data[4]['key'], '2015/05')
def test_datedataset_fill_missing_dates_year(self):
dates_joined = [
(datetime.datetime(2015, 1, 1, 12, 12), 11),
(datetime.datetime(2017, 3, 1, 12, 12), 8),
(datetime.datetime(2020, 5, 1, 12, 12), 7),
]
users = self.create_users(dates_joined)
queryset = self.user_model.objects.all()
dataset = datasets.DateDataSet(field='date_joined', group_by='year', fill_missing_dates=True, queryset=queryset).process()
self.assertEqual(dataset.data[0]['key'], '2015')
self.assertEqual(dataset.data[1]['key'], '2016')
self.assertEqual(dataset.data[2]['key'], '2017')
self.assertEqual(dataset.data[3]['key'], '2018')
self.assertEqual(dataset.data[4]['key'], '2019')
self.assertEqual(dataset.data[5]['key'], '2020')
| bsd-3-clause |
disabler/isida3 | lib/chardet/euckrfreq.py | 3 | 45956 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Sampling from about 20M text materials include literature and computer technology
# 128 --> 0.79
# 256 --> 0.92
# 512 --> 0.986
# 1024 --> 0.99944
# 2048 --> 0.99999
#
# Idea Distribution Ratio = 0.98653 / (1-0.98653) = 73.24
# Random Distribution Ration = 512 / (2350-512) = 0.279.
#
# Typical Distribution Ratio
EUCKR_TYPICAL_DISTRIBUTION_RATIO = 6.0
EUCKR_TABLE_SIZE = 2352
# Char to FreqOrder table ,
EUCKRCharToFreqOrder = ( \
13, 130, 120,1396, 481,1719,1720, 328, 609, 212,1721, 707, 400, 299,1722, 87,
1397,1723, 104, 536,1117,1203,1724,1267, 685,1268, 508,1725,1726,1727,1728,1398,
1399,1729,1730,1731, 141, 621, 326,1057, 368,1732, 267, 488, 20,1733,1269,1734,
945,1400,1735, 47, 904,1270,1736,1737, 773, 248,1738, 409, 313, 786, 429,1739,
116, 987, 813,1401, 683, 75,1204, 145,1740,1741,1742,1743, 16, 847, 667, 622,
708,1744,1745,1746, 966, 787, 304, 129,1747, 60, 820, 123, 676,1748,1749,1750,
1751, 617,1752, 626,1753,1754,1755,1756, 653,1757,1758,1759,1760,1761,1762, 856,
344,1763,1764,1765,1766, 89, 401, 418, 806, 905, 848,1767,1768,1769, 946,1205,
709,1770,1118,1771, 241,1772,1773,1774,1271,1775, 569,1776, 999,1777,1778,1779,
1780, 337, 751,1058, 28, 628, 254,1781, 177, 906, 270, 349, 891,1079,1782, 19,
1783, 379,1784, 315,1785, 629, 754,1402, 559,1786, 636, 203,1206,1787, 710, 567,
1788, 935, 814,1789,1790,1207, 766, 528,1791,1792,1208,1793,1794,1795,1796,1797,
1403,1798,1799, 533,1059,1404,1405,1156,1406, 936, 884,1080,1800, 351,1801,1802,
1803,1804,1805, 801,1806,1807,1808,1119,1809,1157, 714, 474,1407,1810, 298, 899,
885,1811,1120, 802,1158,1812, 892,1813,1814,1408, 659,1815,1816,1121,1817,1818,
1819,1820,1821,1822, 319,1823, 594, 545,1824, 815, 937,1209,1825,1826, 573,1409,
1022,1827,1210,1828,1829,1830,1831,1832,1833, 556, 722, 807,1122,1060,1834, 697,
1835, 900, 557, 715,1836,1410, 540,1411, 752,1159, 294, 597,1211, 976, 803, 770,
1412,1837,1838, 39, 794,1413, 358,1839, 371, 925,1840, 453, 661, 788, 531, 723,
544,1023,1081, 869, 91,1841, 392, 430, 790, 602,1414, 677,1082, 457,1415,1416,
1842,1843, 475, 327,1024,1417, 795, 121,1844, 733, 403,1418,1845,1846,1847, 300,
119, 711,1212, 627,1848,1272, 207,1849,1850, 796,1213, 382,1851, 519,1852,1083,
893,1853,1854,1855, 367, 809, 487, 671,1856, 663,1857,1858, 956, 471, 306, 857,
1859,1860,1160,1084,1861,1862,1863,1864,1865,1061,1866,1867,1868,1869,1870,1871,
282, 96, 574,1872, 502,1085,1873,1214,1874, 907,1875,1876, 827, 977,1419,1420,
1421, 268,1877,1422,1878,1879,1880, 308,1881, 2, 537,1882,1883,1215,1884,1885,
127, 791,1886,1273,1423,1887, 34, 336, 404, 643,1888, 571, 654, 894, 840,1889,
0, 886,1274, 122, 575, 260, 908, 938,1890,1275, 410, 316,1891,1892, 100,1893,
1894,1123, 48,1161,1124,1025,1895, 633, 901,1276,1896,1897, 115, 816,1898, 317,
1899, 694,1900, 909, 734,1424, 572, 866,1425, 691, 85, 524,1010, 543, 394, 841,
1901,1902,1903,1026,1904,1905,1906,1907,1908,1909, 30, 451, 651, 988, 310,1910,
1911,1426, 810,1216, 93,1912,1913,1277,1217,1914, 858, 759, 45, 58, 181, 610,
269,1915,1916, 131,1062, 551, 443,1000, 821,1427, 957, 895,1086,1917,1918, 375,
1919, 359,1920, 687,1921, 822,1922, 293,1923,1924, 40, 662, 118, 692, 29, 939,
887, 640, 482, 174,1925, 69,1162, 728,1428, 910,1926,1278,1218,1279, 386, 870,
217, 854,1163, 823,1927,1928,1929,1930, 834,1931, 78,1932, 859,1933,1063,1934,
1935,1936,1937, 438,1164, 208, 595,1938,1939,1940,1941,1219,1125,1942, 280, 888,
1429,1430,1220,1431,1943,1944,1945,1946,1947,1280, 150, 510,1432,1948,1949,1950,
1951,1952,1953,1954,1011,1087,1955,1433,1043,1956, 881,1957, 614, 958,1064,1065,
1221,1958, 638,1001, 860, 967, 896,1434, 989, 492, 553,1281,1165,1959,1282,1002,
1283,1222,1960,1961,1962,1963, 36, 383, 228, 753, 247, 454,1964, 876, 678,1965,
1966,1284, 126, 464, 490, 835, 136, 672, 529, 940,1088,1435, 473,1967,1968, 467,
50, 390, 227, 587, 279, 378, 598, 792, 968, 240, 151, 160, 849, 882,1126,1285,
639,1044, 133, 140, 288, 360, 811, 563,1027, 561, 142, 523,1969,1970,1971, 7,
103, 296, 439, 407, 506, 634, 990,1972,1973,1974,1975, 645,1976,1977,1978,1979,
1980,1981, 236,1982,1436,1983,1984,1089, 192, 828, 618, 518,1166, 333,1127,1985,
818,1223,1986,1987,1988,1989,1990,1991,1992,1993, 342,1128,1286, 746, 842,1994,
1995, 560, 223,1287, 98, 8, 189, 650, 978,1288,1996,1437,1997, 17, 345, 250,
423, 277, 234, 512, 226, 97, 289, 42, 167,1998, 201,1999,2000, 843, 836, 824,
532, 338, 783,1090, 182, 576, 436,1438,1439, 527, 500,2001, 947, 889,2002,2003,
2004,2005, 262, 600, 314, 447,2006, 547,2007, 693, 738,1129,2008, 71,1440, 745,
619, 688,2009, 829,2010,2011, 147,2012, 33, 948,2013,2014, 74, 224,2015, 61,
191, 918, 399, 637,2016,1028,1130, 257, 902,2017,2018,2019,2020,2021,2022,2023,
2024,2025,2026, 837,2027,2028,2029,2030, 179, 874, 591, 52, 724, 246,2031,2032,
2033,2034,1167, 969,2035,1289, 630, 605, 911,1091,1168,2036,2037,2038,1441, 912,
2039, 623,2040,2041, 253,1169,1290,2042,1442, 146, 620, 611, 577, 433,2043,1224,
719,1170, 959, 440, 437, 534, 84, 388, 480,1131, 159, 220, 198, 679,2044,1012,
819,1066,1443, 113,1225, 194, 318,1003,1029,2045,2046,2047,2048,1067,2049,2050,
2051,2052,2053, 59, 913, 112,2054, 632,2055, 455, 144, 739,1291,2056, 273, 681,
499,2057, 448,2058,2059, 760,2060,2061, 970, 384, 169, 245,1132,2062,2063, 414,
1444,2064,2065, 41, 235,2066, 157, 252, 877, 568, 919, 789, 580,2067, 725,2068,
2069,1292,2070,2071,1445,2072,1446,2073,2074, 55, 588, 66,1447, 271,1092,2075,
1226,2076, 960,1013, 372,2077,2078,2079,2080,2081,1293,2082,2083,2084,2085, 850,
2086,2087,2088,2089,2090, 186,2091,1068, 180,2092,2093,2094, 109,1227, 522, 606,
2095, 867,1448,1093, 991,1171, 926, 353,1133,2096, 581,2097,2098,2099,1294,1449,
1450,2100, 596,1172,1014,1228,2101,1451,1295,1173,1229,2102,2103,1296,1134,1452,
949,1135,2104,2105,1094,1453,1454,1455,2106,1095,2107,2108,2109,2110,2111,2112,
2113,2114,2115,2116,2117, 804,2118,2119,1230,1231, 805,1456, 405,1136,2120,2121,
2122,2123,2124, 720, 701,1297, 992,1457, 927,1004,2125,2126,2127,2128,2129,2130,
22, 417,2131, 303,2132, 385,2133, 971, 520, 513,2134,1174, 73,1096, 231, 274,
962,1458, 673,2135,1459,2136, 152,1137,2137,2138,2139,2140,1005,1138,1460,1139,
2141,2142,2143,2144, 11, 374, 844,2145, 154,1232, 46,1461,2146, 838, 830, 721,
1233, 106,2147, 90, 428, 462, 578, 566,1175, 352,2148,2149, 538,1234, 124,1298,
2150,1462, 761, 565,2151, 686,2152, 649,2153, 72, 173,2154, 460, 415,2155,1463,
2156,1235, 305,2157,2158,2159,2160,2161,2162, 579,2163,2164,2165,2166,2167, 747,
2168,2169,2170,2171,1464, 669,2172,2173,2174,2175,2176,1465,2177, 23, 530, 285,
2178, 335, 729,2179, 397,2180,2181,2182,1030,2183,2184, 698,2185,2186, 325,2187,
2188, 369,2189, 799,1097,1015, 348,2190,1069, 680,2191, 851,1466,2192,2193, 10,
2194, 613, 424,2195, 979, 108, 449, 589, 27, 172, 81,1031, 80, 774, 281, 350,
1032, 525, 301, 582,1176,2196, 674,1045,2197,2198,1467, 730, 762,2199,2200,2201,
2202,1468,2203, 993,2204,2205, 266,1070, 963,1140,2206,2207,2208, 664,1098, 972,
2209,2210,2211,1177,1469,1470, 871,2212,2213,2214,2215,2216,1471,2217,2218,2219,
2220,2221,2222,2223,2224,2225,2226,2227,1472,1236,2228,2229,2230,2231,2232,2233,
2234,2235,1299,2236,2237, 200,2238, 477, 373,2239,2240, 731, 825, 777,2241,2242,
2243, 521, 486, 548,2244,2245,2246,1473,1300, 53, 549, 137, 875, 76, 158,2247,
1301,1474, 469, 396,1016, 278, 712,2248, 321, 442, 503, 767, 744, 941,1237,1178,
1475,2249, 82, 178,1141,1179, 973,2250,1302,2251, 297,2252,2253, 570,2254,2255,
2256, 18, 450, 206,2257, 290, 292,1142,2258, 511, 162, 99, 346, 164, 735,2259,
1476,1477, 4, 554, 343, 798,1099,2260,1100,2261, 43, 171,1303, 139, 215,2262,
2263, 717, 775,2264,1033, 322, 216,2265, 831,2266, 149,2267,1304,2268,2269, 702,
1238, 135, 845, 347, 309,2270, 484,2271, 878, 655, 238,1006,1478,2272, 67,2273,
295,2274,2275, 461,2276, 478, 942, 412,2277,1034,2278,2279,2280, 265,2281, 541,
2282,2283,2284,2285,2286, 70, 852,1071,2287,2288,2289,2290, 21, 56, 509, 117,
432,2291,2292, 331, 980, 552,1101, 148, 284, 105, 393,1180,1239, 755,2293, 187,
2294,1046,1479,2295, 340,2296, 63,1047, 230,2297,2298,1305, 763,1306, 101, 800,
808, 494,2299,2300,2301, 903,2302, 37,1072, 14, 5,2303, 79, 675,2304, 312,
2305,2306,2307,2308,2309,1480, 6,1307,2310,2311,2312, 1, 470, 35, 24, 229,
2313, 695, 210, 86, 778, 15, 784, 592, 779, 32, 77, 855, 964,2314, 259,2315,
501, 380,2316,2317, 83, 981, 153, 689,1308,1481,1482,1483,2318,2319, 716,1484,
2320,2321,2322,2323,2324,2325,1485,2326,2327, 128, 57, 68, 261,1048, 211, 170,
1240, 31,2328, 51, 435, 742,2329,2330,2331, 635,2332, 264, 456,2333,2334,2335,
425,2336,1486, 143, 507, 263, 943,2337, 363, 920,1487, 256,1488,1102, 243, 601,
1489,2338,2339,2340,2341,2342,2343,2344, 861,2345,2346,2347,2348,2349,2350, 395,
2351,1490,1491, 62, 535, 166, 225,2352,2353, 668, 419,1241, 138, 604, 928,2354,
1181,2355,1492,1493,2356,2357,2358,1143,2359, 696,2360, 387, 307,1309, 682, 476,
2361,2362, 332, 12, 222, 156,2363, 232,2364, 641, 276, 656, 517,1494,1495,1035,
416, 736,1496,2365,1017, 586,2366,2367,2368,1497,2369, 242,2370,2371,2372,1498,
2373, 965, 713,2374,2375,2376,2377, 740, 982,1499, 944,1500,1007,2378,2379,1310,
1501,2380,2381,2382, 785, 329,2383,2384,1502,2385,2386,2387, 932,2388,1503,2389,
2390,2391,2392,1242,2393,2394,2395,2396,2397, 994, 950,2398,2399,2400,2401,1504,
1311,2402,2403,2404,2405,1049, 749,2406,2407, 853, 718,1144,1312,2408,1182,1505,
2409,2410, 255, 516, 479, 564, 550, 214,1506,1507,1313, 413, 239, 444, 339,1145,
1036,1508,1509,1314,1037,1510,1315,2411,1511,2412,2413,2414, 176, 703, 497, 624,
593, 921, 302,2415, 341, 165,1103,1512,2416,1513,2417,2418,2419, 376,2420, 700,
2421,2422,2423, 258, 768,1316,2424,1183,2425, 995, 608,2426,2427,2428,2429, 221,
2430,2431,2432,2433,2434,2435,2436,2437, 195, 323, 726, 188, 897, 983,1317, 377,
644,1050, 879,2438, 452,2439,2440,2441,2442,2443,2444, 914,2445,2446,2447,2448,
915, 489,2449,1514,1184,2450,2451, 515, 64, 427, 495,2452, 583,2453, 483, 485,
1038, 562, 213,1515, 748, 666,2454,2455,2456,2457, 334,2458, 780, 996,1008, 705,
1243,2459,2460,2461,2462,2463, 114,2464, 493,1146, 366, 163,1516, 961,1104,2465,
291,2466,1318,1105,2467,1517, 365,2468, 355, 951,1244,2469,1319,2470, 631,2471,
2472, 218,1320, 364, 320, 756,1518,1519,1321,1520,1322,2473,2474,2475,2476, 997,
2477,2478,2479,2480, 665,1185,2481, 916,1521,2482,2483,2484, 584, 684,2485,2486,
797,2487,1051,1186,2488,2489,2490,1522,2491,2492, 370,2493,1039,1187, 65,2494,
434, 205, 463,1188,2495, 125, 812, 391, 402, 826, 699, 286, 398, 155, 781, 771,
585,2496, 590, 505,1073,2497, 599, 244, 219, 917,1018, 952, 646,1523,2498,1323,
2499,2500, 49, 984, 354, 741,2501, 625,2502,1324,2503,1019, 190, 357, 757, 491,
95, 782, 868,2504,2505,2506,2507,2508,2509, 134,1524,1074, 422,1525, 898,2510,
161,2511,2512,2513,2514, 769,2515,1526,2516,2517, 411,1325,2518, 472,1527,2519,
2520,2521,2522,2523,2524, 985,2525,2526,2527,2528,2529,2530, 764,2531,1245,2532,
2533, 25, 204, 311,2534, 496,2535,1052,2536,2537,2538,2539,2540,2541,2542, 199,
704, 504, 468, 758, 657,1528, 196, 44, 839,1246, 272, 750,2543, 765, 862,2544,
2545,1326,2546, 132, 615, 933,2547, 732,2548,2549,2550,1189,1529,2551, 283,1247,
1053, 607, 929,2552,2553,2554, 930, 183, 872, 616,1040,1147,2555,1148,1020, 441,
249,1075,2556,2557,2558, 466, 743,2559,2560,2561, 92, 514, 426, 420, 526,2562,
2563,2564,2565,2566,2567,2568, 185,2569,2570,2571,2572, 776,1530, 658,2573, 362,
2574, 361, 922,1076, 793,2575,2576,2577,2578,2579,2580,1531, 251,2581,2582,2583,
2584,1532, 54, 612, 237,1327,2585,2586, 275, 408, 647, 111,2587,1533,1106, 465,
3, 458, 9, 38,2588, 107, 110, 890, 209, 26, 737, 498,2589,1534,2590, 431,
202, 88,1535, 356, 287,1107, 660,1149,2591, 381,1536, 986,1150, 445,1248,1151,
974,2592,2593, 846,2594, 446, 953, 184,1249,1250, 727,2595, 923, 193, 883,2596,
2597,2598, 102, 324, 539, 817,2599, 421,1041,2600, 832,2601, 94, 175, 197, 406,
2602, 459,2603,2604,2605,2606,2607, 330, 555,2608,2609,2610, 706,1108, 389,2611,
2612,2613,2614, 233,2615, 833, 558, 931, 954,1251,2616,2617,1537, 546,2618,2619,
1009,2620,2621,2622,1538, 690,1328,2623, 955,2624,1539,2625,2626, 772,2627,2628,
2629,2630,2631, 924, 648, 863, 603,2632,2633, 934,1540, 864, 865,2634, 642,1042,
670,1190,2635,2636,2637,2638, 168,2639, 652, 873, 542,1054,1541,2640,2641,2642, # 512, 256
#Everything below is of no interest for detection purpose
2643,2644,2645,2646,2647,2648,2649,2650,2651,2652,2653,2654,2655,2656,2657,2658,
2659,2660,2661,2662,2663,2664,2665,2666,2667,2668,2669,2670,2671,2672,2673,2674,
2675,2676,2677,2678,2679,2680,2681,2682,2683,2684,2685,2686,2687,2688,2689,2690,
2691,2692,2693,2694,2695,2696,2697,2698,2699,1542, 880,2700,2701,2702,2703,2704,
2705,2706,2707,2708,2709,2710,2711,2712,2713,2714,2715,2716,2717,2718,2719,2720,
2721,2722,2723,2724,2725,1543,2726,2727,2728,2729,2730,2731,2732,1544,2733,2734,
2735,2736,2737,2738,2739,2740,2741,2742,2743,2744,2745,2746,2747,2748,2749,2750,
2751,2752,2753,2754,1545,2755,2756,2757,2758,2759,2760,2761,2762,2763,2764,2765,
2766,1546,2767,1547,2768,2769,2770,2771,2772,2773,2774,2775,2776,2777,2778,2779,
2780,2781,2782,2783,2784,2785,2786,1548,2787,2788,2789,1109,2790,2791,2792,2793,
2794,2795,2796,2797,2798,2799,2800,2801,2802,2803,2804,2805,2806,2807,2808,2809,
2810,2811,2812,1329,2813,2814,2815,2816,2817,2818,2819,2820,2821,2822,2823,2824,
2825,2826,2827,2828,2829,2830,2831,2832,2833,2834,2835,2836,2837,2838,2839,2840,
2841,2842,2843,2844,2845,2846,2847,2848,2849,2850,2851,2852,2853,2854,2855,2856,
1549,2857,2858,2859,2860,1550,2861,2862,1551,2863,2864,2865,2866,2867,2868,2869,
2870,2871,2872,2873,2874,1110,1330,2875,2876,2877,2878,2879,2880,2881,2882,2883,
2884,2885,2886,2887,2888,2889,2890,2891,2892,2893,2894,2895,2896,2897,2898,2899,
2900,2901,2902,2903,2904,2905,2906,2907,2908,2909,2910,2911,2912,2913,2914,2915,
2916,2917,2918,2919,2920,2921,2922,2923,2924,2925,2926,2927,2928,2929,2930,1331,
2931,2932,2933,2934,2935,2936,2937,2938,2939,2940,2941,2942,2943,1552,2944,2945,
2946,2947,2948,2949,2950,2951,2952,2953,2954,2955,2956,2957,2958,2959,2960,2961,
2962,2963,2964,1252,2965,2966,2967,2968,2969,2970,2971,2972,2973,2974,2975,2976,
2977,2978,2979,2980,2981,2982,2983,2984,2985,2986,2987,2988,2989,2990,2991,2992,
2993,2994,2995,2996,2997,2998,2999,3000,3001,3002,3003,3004,3005,3006,3007,3008,
3009,3010,3011,3012,1553,3013,3014,3015,3016,3017,1554,3018,1332,3019,3020,3021,
3022,3023,3024,3025,3026,3027,3028,3029,3030,3031,3032,3033,3034,3035,3036,3037,
3038,3039,3040,3041,3042,3043,3044,3045,3046,3047,3048,3049,3050,1555,3051,3052,
3053,1556,1557,3054,3055,3056,3057,3058,3059,3060,3061,3062,3063,3064,3065,3066,
3067,1558,3068,3069,3070,3071,3072,3073,3074,3075,3076,1559,3077,3078,3079,3080,
3081,3082,3083,1253,3084,3085,3086,3087,3088,3089,3090,3091,3092,3093,3094,3095,
3096,3097,3098,3099,3100,3101,3102,3103,3104,3105,3106,3107,3108,1152,3109,3110,
3111,3112,3113,1560,3114,3115,3116,3117,1111,3118,3119,3120,3121,3122,3123,3124,
3125,3126,3127,3128,3129,3130,3131,3132,3133,3134,3135,3136,3137,3138,3139,3140,
3141,3142,3143,3144,3145,3146,3147,3148,3149,3150,3151,3152,3153,3154,3155,3156,
3157,3158,3159,3160,3161,3162,3163,3164,3165,3166,3167,3168,3169,3170,3171,3172,
3173,3174,3175,3176,1333,3177,3178,3179,3180,3181,3182,3183,3184,3185,3186,3187,
3188,3189,1561,3190,3191,1334,3192,3193,3194,3195,3196,3197,3198,3199,3200,3201,
3202,3203,3204,3205,3206,3207,3208,3209,3210,3211,3212,3213,3214,3215,3216,3217,
3218,3219,3220,3221,3222,3223,3224,3225,3226,3227,3228,3229,3230,3231,3232,3233,
3234,1562,3235,3236,3237,3238,3239,3240,3241,3242,3243,3244,3245,3246,3247,3248,
3249,3250,3251,3252,3253,3254,3255,3256,3257,3258,3259,3260,3261,3262,3263,3264,
3265,3266,3267,3268,3269,3270,3271,3272,3273,3274,3275,3276,3277,1563,3278,3279,
3280,3281,3282,3283,3284,3285,3286,3287,3288,3289,3290,3291,3292,3293,3294,3295,
3296,3297,3298,3299,3300,3301,3302,3303,3304,3305,3306,3307,3308,3309,3310,3311,
3312,3313,3314,3315,3316,3317,3318,3319,3320,3321,3322,3323,3324,3325,3326,3327,
3328,3329,3330,3331,3332,3333,3334,3335,3336,3337,3338,3339,3340,3341,3342,3343,
3344,3345,3346,3347,3348,3349,3350,3351,3352,3353,3354,3355,3356,3357,3358,3359,
3360,3361,3362,3363,3364,1335,3365,3366,3367,3368,3369,3370,3371,3372,3373,3374,
3375,3376,3377,3378,3379,3380,3381,3382,3383,3384,3385,3386,3387,1336,3388,3389,
3390,3391,3392,3393,3394,3395,3396,3397,3398,3399,3400,3401,3402,3403,3404,3405,
3406,3407,3408,3409,3410,3411,3412,3413,3414,1337,3415,3416,3417,3418,3419,1338,
3420,3421,3422,1564,1565,3423,3424,3425,3426,3427,3428,3429,3430,3431,1254,3432,
3433,3434,1339,3435,3436,3437,3438,3439,1566,3440,3441,3442,3443,3444,3445,3446,
3447,3448,3449,3450,3451,3452,3453,3454,1255,3455,3456,3457,3458,3459,1567,1191,
3460,1568,1569,3461,3462,3463,1570,3464,3465,3466,3467,3468,1571,3469,3470,3471,
3472,3473,1572,3474,3475,3476,3477,3478,3479,3480,3481,3482,3483,3484,3485,3486,
1340,3487,3488,3489,3490,3491,3492,1021,3493,3494,3495,3496,3497,3498,1573,3499,
1341,3500,3501,3502,3503,3504,3505,3506,3507,3508,3509,3510,3511,1342,3512,3513,
3514,3515,3516,1574,1343,3517,3518,3519,1575,3520,1576,3521,3522,3523,3524,3525,
3526,3527,3528,3529,3530,3531,3532,3533,3534,3535,3536,3537,3538,3539,3540,3541,
3542,3543,3544,3545,3546,3547,3548,3549,3550,3551,3552,3553,3554,3555,3556,3557,
3558,3559,3560,3561,3562,3563,3564,3565,3566,3567,3568,3569,3570,3571,3572,3573,
3574,3575,3576,3577,3578,3579,3580,1577,3581,3582,1578,3583,3584,3585,3586,3587,
3588,3589,3590,3591,3592,3593,3594,3595,3596,3597,3598,3599,3600,3601,3602,3603,
3604,1579,3605,3606,3607,3608,3609,3610,3611,3612,3613,3614,3615,3616,3617,3618,
3619,3620,3621,3622,3623,3624,3625,3626,3627,3628,3629,1580,3630,3631,1581,3632,
3633,3634,3635,3636,3637,3638,3639,3640,3641,3642,3643,3644,3645,3646,3647,3648,
3649,3650,3651,3652,3653,3654,3655,3656,1582,3657,3658,3659,3660,3661,3662,3663,
3664,3665,3666,3667,3668,3669,3670,3671,3672,3673,3674,3675,3676,3677,3678,3679,
3680,3681,3682,3683,3684,3685,3686,3687,3688,3689,3690,3691,3692,3693,3694,3695,
3696,3697,3698,3699,3700,1192,3701,3702,3703,3704,1256,3705,3706,3707,3708,1583,
1257,3709,3710,3711,3712,3713,3714,3715,3716,1584,3717,3718,3719,3720,3721,3722,
3723,3724,3725,3726,3727,3728,3729,3730,3731,3732,3733,3734,3735,3736,3737,3738,
3739,3740,3741,3742,3743,3744,3745,1344,3746,3747,3748,3749,3750,3751,3752,3753,
3754,3755,3756,1585,3757,3758,3759,3760,3761,3762,3763,3764,3765,3766,1586,3767,
3768,3769,3770,3771,3772,3773,3774,3775,3776,3777,3778,1345,3779,3780,3781,3782,
3783,3784,3785,3786,3787,3788,3789,3790,3791,3792,3793,3794,3795,1346,1587,3796,
3797,1588,3798,3799,3800,3801,3802,3803,3804,3805,3806,1347,3807,3808,3809,3810,
3811,1589,3812,3813,3814,3815,3816,3817,3818,3819,3820,3821,1590,3822,3823,1591,
1348,3824,3825,3826,3827,3828,3829,3830,1592,3831,3832,1593,3833,3834,3835,3836,
3837,3838,3839,3840,3841,3842,3843,3844,1349,3845,3846,3847,3848,3849,3850,3851,
3852,3853,3854,3855,3856,3857,3858,1594,3859,3860,3861,3862,3863,3864,3865,3866,
3867,3868,3869,1595,3870,3871,3872,3873,1596,3874,3875,3876,3877,3878,3879,3880,
3881,3882,3883,3884,3885,3886,1597,3887,3888,3889,3890,3891,3892,3893,3894,3895,
1598,3896,3897,3898,1599,1600,3899,1350,3900,1351,3901,3902,1352,3903,3904,3905,
3906,3907,3908,3909,3910,3911,3912,3913,3914,3915,3916,3917,3918,3919,3920,3921,
3922,3923,3924,1258,3925,3926,3927,3928,3929,3930,3931,1193,3932,1601,3933,3934,
3935,3936,3937,3938,3939,3940,3941,3942,3943,1602,3944,3945,3946,3947,3948,1603,
3949,3950,3951,3952,3953,3954,3955,3956,3957,3958,3959,3960,3961,3962,3963,3964,
3965,1604,3966,3967,3968,3969,3970,3971,3972,3973,3974,3975,3976,3977,1353,3978,
3979,3980,3981,3982,3983,3984,3985,3986,3987,3988,3989,3990,3991,1354,3992,3993,
3994,3995,3996,3997,3998,3999,4000,4001,4002,4003,4004,4005,4006,4007,4008,4009,
4010,4011,4012,4013,4014,4015,4016,4017,4018,4019,4020,4021,4022,4023,1355,4024,
4025,4026,4027,4028,4029,4030,4031,4032,4033,4034,4035,4036,4037,4038,4039,4040,
1605,4041,4042,4043,4044,4045,4046,4047,4048,4049,4050,4051,4052,4053,4054,4055,
4056,4057,4058,4059,4060,1606,4061,4062,4063,4064,1607,4065,4066,4067,4068,4069,
4070,4071,4072,4073,4074,4075,4076,1194,4077,4078,1608,4079,4080,4081,4082,4083,
4084,4085,4086,4087,1609,4088,4089,4090,4091,4092,4093,4094,4095,4096,4097,4098,
4099,4100,4101,4102,4103,4104,4105,4106,4107,4108,1259,4109,4110,4111,4112,4113,
4114,4115,4116,4117,4118,4119,4120,4121,4122,4123,4124,1195,4125,4126,4127,1610,
4128,4129,4130,4131,4132,4133,4134,4135,4136,4137,1356,4138,4139,4140,4141,4142,
4143,4144,1611,4145,4146,4147,4148,4149,4150,4151,4152,4153,4154,4155,4156,4157,
4158,4159,4160,4161,4162,4163,4164,4165,4166,4167,4168,4169,4170,4171,4172,4173,
4174,4175,4176,4177,4178,4179,4180,4181,4182,4183,4184,4185,4186,4187,4188,4189,
4190,4191,4192,4193,4194,4195,4196,4197,4198,4199,4200,4201,4202,4203,4204,4205,
4206,4207,4208,4209,4210,4211,4212,4213,4214,4215,4216,4217,4218,4219,1612,4220,
4221,4222,4223,4224,4225,4226,4227,1357,4228,1613,4229,4230,4231,4232,4233,4234,
4235,4236,4237,4238,4239,4240,4241,4242,4243,1614,4244,4245,4246,4247,4248,4249,
4250,4251,4252,4253,4254,4255,4256,4257,4258,4259,4260,4261,4262,4263,4264,4265,
4266,4267,4268,4269,4270,1196,1358,4271,4272,4273,4274,4275,4276,4277,4278,4279,
4280,4281,4282,4283,4284,4285,4286,4287,1615,4288,4289,4290,4291,4292,4293,4294,
4295,4296,4297,4298,4299,4300,4301,4302,4303,4304,4305,4306,4307,4308,4309,4310,
4311,4312,4313,4314,4315,4316,4317,4318,4319,4320,4321,4322,4323,4324,4325,4326,
4327,4328,4329,4330,4331,4332,4333,4334,1616,4335,4336,4337,4338,4339,4340,4341,
4342,4343,4344,4345,4346,4347,4348,4349,4350,4351,4352,4353,4354,4355,4356,4357,
4358,4359,4360,1617,4361,4362,4363,4364,4365,1618,4366,4367,4368,4369,4370,4371,
4372,4373,4374,4375,4376,4377,4378,4379,4380,4381,4382,4383,4384,4385,4386,4387,
4388,4389,4390,4391,4392,4393,4394,4395,4396,4397,4398,4399,4400,4401,4402,4403,
4404,4405,4406,4407,4408,4409,4410,4411,4412,4413,4414,4415,4416,1619,4417,4418,
4419,4420,4421,4422,4423,4424,4425,1112,4426,4427,4428,4429,4430,1620,4431,4432,
4433,4434,4435,4436,4437,4438,4439,4440,4441,4442,1260,1261,4443,4444,4445,4446,
4447,4448,4449,4450,4451,4452,4453,4454,4455,1359,4456,4457,4458,4459,4460,4461,
4462,4463,4464,4465,1621,4466,4467,4468,4469,4470,4471,4472,4473,4474,4475,4476,
4477,4478,4479,4480,4481,4482,4483,4484,4485,4486,4487,4488,4489,1055,4490,4491,
4492,4493,4494,4495,4496,4497,4498,4499,4500,4501,4502,4503,4504,4505,4506,4507,
4508,4509,4510,4511,4512,4513,4514,4515,4516,4517,4518,1622,4519,4520,4521,1623,
4522,4523,4524,4525,4526,4527,4528,4529,4530,4531,4532,4533,4534,4535,1360,4536,
4537,4538,4539,4540,4541,4542,4543, 975,4544,4545,4546,4547,4548,4549,4550,4551,
4552,4553,4554,4555,4556,4557,4558,4559,4560,4561,4562,4563,4564,4565,4566,4567,
4568,4569,4570,4571,1624,4572,4573,4574,4575,4576,1625,4577,4578,4579,4580,4581,
4582,4583,4584,1626,4585,4586,4587,4588,4589,4590,4591,4592,4593,4594,4595,1627,
4596,4597,4598,4599,4600,4601,4602,4603,4604,4605,4606,4607,4608,4609,4610,4611,
4612,4613,4614,4615,1628,4616,4617,4618,4619,4620,4621,4622,4623,4624,4625,4626,
4627,4628,4629,4630,4631,4632,4633,4634,4635,4636,4637,4638,4639,4640,4641,4642,
4643,4644,4645,4646,4647,4648,4649,1361,4650,4651,4652,4653,4654,4655,4656,4657,
4658,4659,4660,4661,1362,4662,4663,4664,4665,4666,4667,4668,4669,4670,4671,4672,
4673,4674,4675,4676,4677,4678,4679,4680,4681,4682,1629,4683,4684,4685,4686,4687,
1630,4688,4689,4690,4691,1153,4692,4693,4694,1113,4695,4696,4697,4698,4699,4700,
4701,4702,4703,4704,4705,4706,4707,4708,4709,4710,4711,1197,4712,4713,4714,4715,
4716,4717,4718,4719,4720,4721,4722,4723,4724,4725,4726,4727,4728,4729,4730,4731,
4732,4733,4734,4735,1631,4736,1632,4737,4738,4739,4740,4741,4742,4743,4744,1633,
4745,4746,4747,4748,4749,1262,4750,4751,4752,4753,4754,1363,4755,4756,4757,4758,
4759,4760,4761,4762,4763,4764,4765,4766,4767,4768,1634,4769,4770,4771,4772,4773,
4774,4775,4776,4777,4778,1635,4779,4780,4781,4782,4783,4784,4785,4786,4787,4788,
4789,1636,4790,4791,4792,4793,4794,4795,4796,4797,4798,4799,4800,4801,4802,4803,
4804,4805,4806,1637,4807,4808,4809,1638,4810,4811,4812,4813,4814,4815,4816,4817,
4818,1639,4819,4820,4821,4822,4823,4824,4825,4826,4827,4828,4829,4830,4831,4832,
4833,1077,4834,4835,4836,4837,4838,4839,4840,4841,4842,4843,4844,4845,4846,4847,
4848,4849,4850,4851,4852,4853,4854,4855,4856,4857,4858,4859,4860,4861,4862,4863,
4864,4865,4866,4867,4868,4869,4870,4871,4872,4873,4874,4875,4876,4877,4878,4879,
4880,4881,4882,4883,1640,4884,4885,1641,4886,4887,4888,4889,4890,4891,4892,4893,
4894,4895,4896,4897,4898,4899,4900,4901,4902,4903,4904,4905,4906,4907,4908,4909,
4910,4911,1642,4912,4913,4914,1364,4915,4916,4917,4918,4919,4920,4921,4922,4923,
4924,4925,4926,4927,4928,4929,4930,4931,1643,4932,4933,4934,4935,4936,4937,4938,
4939,4940,4941,4942,4943,4944,4945,4946,4947,4948,4949,4950,4951,4952,4953,4954,
4955,4956,4957,4958,4959,4960,4961,4962,4963,4964,4965,4966,4967,4968,4969,4970,
4971,4972,4973,4974,4975,4976,4977,4978,4979,4980,1644,4981,4982,4983,4984,1645,
4985,4986,1646,4987,4988,4989,4990,4991,4992,4993,4994,4995,4996,4997,4998,4999,
5000,5001,5002,5003,5004,5005,1647,5006,1648,5007,5008,5009,5010,5011,5012,1078,
5013,5014,5015,5016,5017,5018,5019,5020,5021,5022,5023,5024,5025,5026,5027,5028,
1365,5029,5030,5031,5032,5033,5034,5035,5036,5037,5038,5039,1649,5040,5041,5042,
5043,5044,5045,1366,5046,5047,5048,5049,5050,5051,5052,5053,5054,5055,1650,5056,
5057,5058,5059,5060,5061,5062,5063,5064,5065,5066,5067,5068,5069,5070,5071,5072,
5073,5074,5075,5076,5077,1651,5078,5079,5080,5081,5082,5083,5084,5085,5086,5087,
5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102,5103,
5104,5105,5106,5107,5108,5109,5110,1652,5111,5112,5113,5114,5115,5116,5117,5118,
1367,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,1653,5130,5131,5132,
5133,5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148,
5149,1368,5150,1654,5151,1369,5152,5153,5154,5155,5156,5157,5158,5159,5160,5161,
5162,5163,5164,5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,5176,5177,
5178,1370,5179,5180,5181,5182,5183,5184,5185,5186,5187,5188,5189,5190,5191,5192,
5193,5194,5195,5196,5197,5198,1655,5199,5200,5201,5202,1656,5203,5204,5205,5206,
1371,5207,1372,5208,5209,5210,5211,1373,5212,5213,1374,5214,5215,5216,5217,5218,
5219,5220,5221,5222,5223,5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234,
5235,5236,5237,5238,5239,5240,5241,5242,5243,5244,5245,5246,5247,1657,5248,5249,
5250,5251,1658,1263,5252,5253,5254,5255,5256,1375,5257,5258,5259,5260,5261,5262,
5263,5264,5265,5266,5267,5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278,
5279,5280,5281,5282,5283,1659,5284,5285,5286,5287,5288,5289,5290,5291,5292,5293,
5294,5295,5296,5297,5298,5299,5300,1660,5301,5302,5303,5304,5305,5306,5307,5308,
5309,5310,5311,5312,5313,5314,5315,5316,5317,5318,5319,5320,5321,1376,5322,5323,
5324,5325,5326,5327,5328,5329,5330,5331,5332,5333,1198,5334,5335,5336,5337,5338,
5339,5340,5341,5342,5343,1661,5344,5345,5346,5347,5348,5349,5350,5351,5352,5353,
5354,5355,5356,5357,5358,5359,5360,5361,5362,5363,5364,5365,5366,5367,5368,5369,
5370,5371,5372,5373,5374,5375,5376,5377,5378,5379,5380,5381,5382,5383,5384,5385,
5386,5387,5388,5389,5390,5391,5392,5393,5394,5395,5396,5397,5398,1264,5399,5400,
5401,5402,5403,5404,5405,5406,5407,5408,5409,5410,5411,5412,1662,5413,5414,5415,
5416,1663,5417,5418,5419,5420,5421,5422,5423,5424,5425,5426,5427,5428,5429,5430,
5431,5432,5433,5434,5435,5436,5437,5438,1664,5439,5440,5441,5442,5443,5444,5445,
5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456,5457,5458,5459,5460,5461,
5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472,5473,5474,5475,5476,5477,
5478,1154,5479,5480,5481,5482,5483,5484,5485,1665,5486,5487,5488,5489,5490,5491,
5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504,5505,5506,5507,
5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520,5521,5522,5523,
5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536,5537,5538,5539,
5540,5541,5542,5543,5544,5545,5546,5547,5548,1377,5549,5550,5551,5552,5553,5554,
5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568,5569,5570,
1114,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584,5585,
5586,5587,5588,5589,5590,5591,5592,1378,5593,5594,5595,5596,5597,5598,5599,5600,
5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,1379,5615,
5616,5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631,
5632,5633,5634,1380,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646,
5647,5648,5649,1381,1056,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660,
1666,5661,5662,5663,5664,5665,5666,5667,5668,1667,5669,1668,5670,5671,5672,5673,
5674,5675,5676,5677,5678,1155,5679,5680,5681,5682,5683,5684,5685,5686,5687,5688,
5689,5690,5691,5692,5693,5694,5695,5696,5697,5698,1669,5699,5700,5701,5702,5703,
5704,5705,1670,5706,5707,5708,5709,5710,1671,5711,5712,5713,5714,1382,5715,5716,
5717,5718,5719,5720,5721,5722,5723,5724,5725,1672,5726,5727,1673,1674,5728,5729,
5730,5731,5732,5733,5734,5735,5736,1675,5737,5738,5739,5740,5741,5742,5743,5744,
1676,5745,5746,5747,5748,5749,5750,5751,1383,5752,5753,5754,5755,5756,5757,5758,
5759,5760,5761,5762,5763,5764,5765,5766,5767,5768,1677,5769,5770,5771,5772,5773,
1678,5774,5775,5776, 998,5777,5778,5779,5780,5781,5782,5783,5784,5785,1384,5786,
5787,5788,5789,5790,5791,5792,5793,5794,5795,5796,5797,5798,5799,5800,1679,5801,
5802,5803,1115,1116,5804,5805,5806,5807,5808,5809,5810,5811,5812,5813,5814,5815,
5816,5817,5818,5819,5820,5821,5822,5823,5824,5825,5826,5827,5828,5829,5830,5831,
5832,5833,5834,5835,5836,5837,5838,5839,5840,5841,5842,5843,5844,5845,5846,5847,
5848,5849,5850,5851,5852,5853,5854,5855,1680,5856,5857,5858,5859,5860,5861,5862,
5863,5864,1681,5865,5866,5867,1682,5868,5869,5870,5871,5872,5873,5874,5875,5876,
5877,5878,5879,1683,5880,1684,5881,5882,5883,5884,1685,5885,5886,5887,5888,5889,
5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904,5905,
5906,5907,1686,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920,
5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,1687,
5936,5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,
5952,1688,1689,5953,1199,5954,5955,5956,5957,5958,5959,5960,5961,1690,5962,5963,
5964,5965,5966,5967,5968,5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979,
5980,5981,1385,5982,1386,5983,5984,5985,5986,5987,5988,5989,5990,5991,5992,5993,
5994,5995,5996,5997,5998,5999,6000,6001,6002,6003,6004,6005,6006,6007,6008,6009,
6010,6011,6012,6013,6014,6015,6016,6017,6018,6019,6020,6021,6022,6023,6024,6025,
6026,6027,1265,6028,6029,1691,6030,6031,6032,6033,6034,6035,6036,6037,6038,6039,
6040,6041,6042,6043,6044,6045,6046,6047,6048,6049,6050,6051,6052,6053,6054,6055,
6056,6057,6058,6059,6060,6061,6062,6063,6064,6065,6066,6067,6068,6069,6070,6071,
6072,6073,6074,6075,6076,6077,6078,6079,6080,6081,6082,6083,6084,1692,6085,6086,
6087,6088,6089,6090,6091,6092,6093,6094,6095,6096,6097,6098,6099,6100,6101,6102,
6103,6104,6105,6106,6107,6108,6109,6110,6111,6112,6113,6114,6115,6116,6117,6118,
6119,6120,6121,6122,6123,6124,6125,6126,6127,6128,6129,6130,6131,1693,6132,6133,
6134,6135,6136,1694,6137,6138,6139,6140,6141,1695,6142,6143,6144,6145,6146,6147,
6148,6149,6150,6151,6152,6153,6154,6155,6156,6157,6158,6159,6160,6161,6162,6163,
6164,6165,6166,6167,6168,6169,6170,6171,6172,6173,6174,6175,6176,6177,6178,6179,
6180,6181,6182,6183,6184,6185,1696,6186,6187,6188,6189,6190,6191,6192,6193,6194,
6195,6196,6197,6198,6199,6200,6201,6202,6203,6204,6205,6206,6207,6208,6209,6210,
6211,6212,6213,6214,6215,6216,6217,6218,6219,1697,6220,6221,6222,6223,6224,6225,
6226,6227,6228,6229,6230,6231,6232,6233,6234,6235,6236,6237,6238,6239,6240,6241,
6242,6243,6244,6245,6246,6247,6248,6249,6250,6251,6252,6253,1698,6254,6255,6256,
6257,6258,6259,6260,6261,6262,6263,1200,6264,6265,6266,6267,6268,6269,6270,6271, #1024
6272,6273,6274,6275,6276,6277,6278,6279,6280,6281,6282,6283,6284,6285,6286,6287,
6288,6289,6290,6291,6292,6293,6294,6295,6296,6297,6298,6299,6300,6301,6302,1699,
6303,6304,1700,6305,6306,6307,6308,6309,6310,6311,6312,6313,6314,6315,6316,6317,
6318,6319,6320,6321,6322,6323,6324,6325,6326,6327,6328,6329,6330,6331,6332,6333,
6334,6335,6336,6337,6338,6339,1701,6340,6341,6342,6343,6344,1387,6345,6346,6347,
6348,6349,6350,6351,6352,6353,6354,6355,6356,6357,6358,6359,6360,6361,6362,6363,
6364,6365,6366,6367,6368,6369,6370,6371,6372,6373,6374,6375,6376,6377,6378,6379,
6380,6381,6382,6383,6384,6385,6386,6387,6388,6389,6390,6391,6392,6393,6394,6395,
6396,6397,6398,6399,6400,6401,6402,6403,6404,6405,6406,6407,6408,6409,6410,6411,
6412,6413,1702,6414,6415,6416,6417,6418,6419,6420,6421,6422,1703,6423,6424,6425,
6426,6427,6428,6429,6430,6431,6432,6433,6434,6435,6436,6437,6438,1704,6439,6440,
6441,6442,6443,6444,6445,6446,6447,6448,6449,6450,6451,6452,6453,6454,6455,6456,
6457,6458,6459,6460,6461,6462,6463,6464,6465,6466,6467,6468,6469,6470,6471,6472,
6473,6474,6475,6476,6477,6478,6479,6480,6481,6482,6483,6484,6485,6486,6487,6488,
6489,6490,6491,6492,6493,6494,6495,6496,6497,6498,6499,6500,6501,6502,6503,1266,
6504,6505,6506,6507,6508,6509,6510,6511,6512,6513,6514,6515,6516,6517,6518,6519,
6520,6521,6522,6523,6524,6525,6526,6527,6528,6529,6530,6531,6532,6533,6534,6535,
6536,6537,6538,6539,6540,6541,6542,6543,6544,6545,6546,6547,6548,6549,6550,6551,
1705,1706,6552,6553,6554,6555,6556,6557,6558,6559,6560,6561,6562,6563,6564,6565,
6566,6567,6568,6569,6570,6571,6572,6573,6574,6575,6576,6577,6578,6579,6580,6581,
6582,6583,6584,6585,6586,6587,6588,6589,6590,6591,6592,6593,6594,6595,6596,6597,
6598,6599,6600,6601,6602,6603,6604,6605,6606,6607,6608,6609,6610,6611,6612,6613,
6614,6615,6616,6617,6618,6619,6620,6621,6622,6623,6624,6625,6626,6627,6628,6629,
6630,6631,6632,6633,6634,6635,6636,6637,1388,6638,6639,6640,6641,6642,6643,6644,
1707,6645,6646,6647,6648,6649,6650,6651,6652,6653,6654,6655,6656,6657,6658,6659,
6660,6661,6662,6663,1708,6664,6665,6666,6667,6668,6669,6670,6671,6672,6673,6674,
1201,6675,6676,6677,6678,6679,6680,6681,6682,6683,6684,6685,6686,6687,6688,6689,
6690,6691,6692,6693,6694,6695,6696,6697,6698,6699,6700,6701,6702,6703,6704,6705,
6706,6707,6708,6709,6710,6711,6712,6713,6714,6715,6716,6717,6718,6719,6720,6721,
6722,6723,6724,6725,1389,6726,6727,6728,6729,6730,6731,6732,6733,6734,6735,6736,
1390,1709,6737,6738,6739,6740,6741,6742,1710,6743,6744,6745,6746,1391,6747,6748,
6749,6750,6751,6752,6753,6754,6755,6756,6757,1392,6758,6759,6760,6761,6762,6763,
6764,6765,6766,6767,6768,6769,6770,6771,6772,6773,6774,6775,6776,6777,6778,6779,
6780,1202,6781,6782,6783,6784,6785,6786,6787,6788,6789,6790,6791,6792,6793,6794,
6795,6796,6797,6798,6799,6800,6801,6802,6803,6804,6805,6806,6807,6808,6809,1711,
6810,6811,6812,6813,6814,6815,6816,6817,6818,6819,6820,6821,6822,6823,6824,6825,
6826,6827,6828,6829,6830,6831,6832,6833,6834,6835,6836,1393,6837,6838,6839,6840,
6841,6842,6843,6844,6845,6846,6847,6848,6849,6850,6851,6852,6853,6854,6855,6856,
6857,6858,6859,6860,6861,6862,6863,6864,6865,6866,6867,6868,6869,6870,6871,6872,
6873,6874,6875,6876,6877,6878,6879,6880,6881,6882,6883,6884,6885,6886,6887,6888,
6889,6890,6891,6892,6893,6894,6895,6896,6897,6898,6899,6900,6901,6902,1712,6903,
6904,6905,6906,6907,6908,6909,6910,1713,6911,6912,6913,6914,6915,6916,6917,6918,
6919,6920,6921,6922,6923,6924,6925,6926,6927,6928,6929,6930,6931,6932,6933,6934,
6935,6936,6937,6938,6939,6940,6941,6942,6943,6944,6945,6946,6947,6948,6949,6950,
6951,6952,6953,6954,6955,6956,6957,6958,6959,6960,6961,6962,6963,6964,6965,6966,
6967,6968,6969,6970,6971,6972,6973,6974,1714,6975,6976,6977,6978,6979,6980,6981,
6982,6983,6984,6985,6986,6987,6988,1394,6989,6990,6991,6992,6993,6994,6995,6996,
6997,6998,6999,7000,1715,7001,7002,7003,7004,7005,7006,7007,7008,7009,7010,7011,
7012,7013,7014,7015,7016,7017,7018,7019,7020,7021,7022,7023,7024,7025,7026,7027,
7028,1716,7029,7030,7031,7032,7033,7034,7035,7036,7037,7038,7039,7040,7041,7042,
7043,7044,7045,7046,7047,7048,7049,7050,7051,7052,7053,7054,7055,7056,7057,7058,
7059,7060,7061,7062,7063,7064,7065,7066,7067,7068,7069,7070,7071,7072,7073,7074,
7075,7076,7077,7078,7079,7080,7081,7082,7083,7084,7085,7086,7087,7088,7089,7090,
7091,7092,7093,7094,7095,7096,7097,7098,7099,7100,7101,7102,7103,7104,7105,7106,
7107,7108,7109,7110,7111,7112,7113,7114,7115,7116,7117,7118,7119,7120,7121,7122,
7123,7124,7125,7126,7127,7128,7129,7130,7131,7132,7133,7134,7135,7136,7137,7138,
7139,7140,7141,7142,7143,7144,7145,7146,7147,7148,7149,7150,7151,7152,7153,7154,
7155,7156,7157,7158,7159,7160,7161,7162,7163,7164,7165,7166,7167,7168,7169,7170,
7171,7172,7173,7174,7175,7176,7177,7178,7179,7180,7181,7182,7183,7184,7185,7186,
7187,7188,7189,7190,7191,7192,7193,7194,7195,7196,7197,7198,7199,7200,7201,7202,
7203,7204,7205,7206,7207,1395,7208,7209,7210,7211,7212,7213,1717,7214,7215,7216,
7217,7218,7219,7220,7221,7222,7223,7224,7225,7226,7227,7228,7229,7230,7231,7232,
7233,7234,7235,7236,7237,7238,7239,7240,7241,7242,7243,7244,7245,7246,7247,7248,
7249,7250,7251,7252,7253,7254,7255,7256,7257,7258,7259,7260,7261,7262,7263,7264,
7265,7266,7267,7268,7269,7270,7271,7272,7273,7274,7275,7276,7277,7278,7279,7280,
7281,7282,7283,7284,7285,7286,7287,7288,7289,7290,7291,7292,7293,7294,7295,7296,
7297,7298,7299,7300,7301,7302,7303,7304,7305,7306,7307,7308,7309,7310,7311,7312,
7313,1718,7314,7315,7316,7317,7318,7319,7320,7321,7322,7323,7324,7325,7326,7327,
7328,7329,7330,7331,7332,7333,7334,7335,7336,7337,7338,7339,7340,7341,7342,7343,
7344,7345,7346,7347,7348,7349,7350,7351,7352,7353,7354,7355,7356,7357,7358,7359,
7360,7361,7362,7363,7364,7365,7366,7367,7368,7369,7370,7371,7372,7373,7374,7375,
7376,7377,7378,7379,7380,7381,7382,7383,7384,7385,7386,7387,7388,7389,7390,7391,
7392,7393,7394,7395,7396,7397,7398,7399,7400,7401,7402,7403,7404,7405,7406,7407,
7408,7409,7410,7411,7412,7413,7414,7415,7416,7417,7418,7419,7420,7421,7422,7423,
7424,7425,7426,7427,7428,7429,7430,7431,7432,7433,7434,7435,7436,7437,7438,7439,
7440,7441,7442,7443,7444,7445,7446,7447,7448,7449,7450,7451,7452,7453,7454,7455,
7456,7457,7458,7459,7460,7461,7462,7463,7464,7465,7466,7467,7468,7469,7470,7471,
7472,7473,7474,7475,7476,7477,7478,7479,7480,7481,7482,7483,7484,7485,7486,7487,
7488,7489,7490,7491,7492,7493,7494,7495,7496,7497,7498,7499,7500,7501,7502,7503,
7504,7505,7506,7507,7508,7509,7510,7511,7512,7513,7514,7515,7516,7517,7518,7519,
7520,7521,7522,7523,7524,7525,7526,7527,7528,7529,7530,7531,7532,7533,7534,7535,
7536,7537,7538,7539,7540,7541,7542,7543,7544,7545,7546,7547,7548,7549,7550,7551,
7552,7553,7554,7555,7556,7557,7558,7559,7560,7561,7562,7563,7564,7565,7566,7567,
7568,7569,7570,7571,7572,7573,7574,7575,7576,7577,7578,7579,7580,7581,7582,7583,
7584,7585,7586,7587,7588,7589,7590,7591,7592,7593,7594,7595,7596,7597,7598,7599,
7600,7601,7602,7603,7604,7605,7606,7607,7608,7609,7610,7611,7612,7613,7614,7615,
7616,7617,7618,7619,7620,7621,7622,7623,7624,7625,7626,7627,7628,7629,7630,7631,
7632,7633,7634,7635,7636,7637,7638,7639,7640,7641,7642,7643,7644,7645,7646,7647,
7648,7649,7650,7651,7652,7653,7654,7655,7656,7657,7658,7659,7660,7661,7662,7663,
7664,7665,7666,7667,7668,7669,7670,7671,7672,7673,7674,7675,7676,7677,7678,7679,
7680,7681,7682,7683,7684,7685,7686,7687,7688,7689,7690,7691,7692,7693,7694,7695,
7696,7697,7698,7699,7700,7701,7702,7703,7704,7705,7706,7707,7708,7709,7710,7711,
7712,7713,7714,7715,7716,7717,7718,7719,7720,7721,7722,7723,7724,7725,7726,7727,
7728,7729,7730,7731,7732,7733,7734,7735,7736,7737,7738,7739,7740,7741,7742,7743,
7744,7745,7746,7747,7748,7749,7750,7751,7752,7753,7754,7755,7756,7757,7758,7759,
7760,7761,7762,7763,7764,7765,7766,7767,7768,7769,7770,7771,7772,7773,7774,7775,
7776,7777,7778,7779,7780,7781,7782,7783,7784,7785,7786,7787,7788,7789,7790,7791,
7792,7793,7794,7795,7796,7797,7798,7799,7800,7801,7802,7803,7804,7805,7806,7807,
7808,7809,7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,
7824,7825,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,
7840,7841,7842,7843,7844,7845,7846,7847,7848,7849,7850,7851,7852,7853,7854,7855,
7856,7857,7858,7859,7860,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870,7871,
7872,7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886,7887,
7888,7889,7890,7891,7892,7893,7894,7895,7896,7897,7898,7899,7900,7901,7902,7903,
7904,7905,7906,7907,7908,7909,7910,7911,7912,7913,7914,7915,7916,7917,7918,7919,
7920,7921,7922,7923,7924,7925,7926,7927,7928,7929,7930,7931,7932,7933,7934,7935,
7936,7937,7938,7939,7940,7941,7942,7943,7944,7945,7946,7947,7948,7949,7950,7951,
7952,7953,7954,7955,7956,7957,7958,7959,7960,7961,7962,7963,7964,7965,7966,7967,
7968,7969,7970,7971,7972,7973,7974,7975,7976,7977,7978,7979,7980,7981,7982,7983,
7984,7985,7986,7987,7988,7989,7990,7991,7992,7993,7994,7995,7996,7997,7998,7999,
8000,8001,8002,8003,8004,8005,8006,8007,8008,8009,8010,8011,8012,8013,8014,8015,
8016,8017,8018,8019,8020,8021,8022,8023,8024,8025,8026,8027,8028,8029,8030,8031,
8032,8033,8034,8035,8036,8037,8038,8039,8040,8041,8042,8043,8044,8045,8046,8047,
8048,8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063,
8064,8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079,
8080,8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095,
8096,8097,8098,8099,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110,8111,
8112,8113,8114,8115,8116,8117,8118,8119,8120,8121,8122,8123,8124,8125,8126,8127,
8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141,8142,8143,
8144,8145,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155,8156,8157,8158,8159,
8160,8161,8162,8163,8164,8165,8166,8167,8168,8169,8170,8171,8172,8173,8174,8175,
8176,8177,8178,8179,8180,8181,8182,8183,8184,8185,8186,8187,8188,8189,8190,8191,
8192,8193,8194,8195,8196,8197,8198,8199,8200,8201,8202,8203,8204,8205,8206,8207,
8208,8209,8210,8211,8212,8213,8214,8215,8216,8217,8218,8219,8220,8221,8222,8223,
8224,8225,8226,8227,8228,8229,8230,8231,8232,8233,8234,8235,8236,8237,8238,8239,
8240,8241,8242,8243,8244,8245,8246,8247,8248,8249,8250,8251,8252,8253,8254,8255,
8256,8257,8258,8259,8260,8261,8262,8263,8264,8265,8266,8267,8268,8269,8270,8271,
8272,8273,8274,8275,8276,8277,8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,
8288,8289,8290,8291,8292,8293,8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,
8304,8305,8306,8307,8308,8309,8310,8311,8312,8313,8314,8315,8316,8317,8318,8319,
8320,8321,8322,8323,8324,8325,8326,8327,8328,8329,8330,8331,8332,8333,8334,8335,
8336,8337,8338,8339,8340,8341,8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,
8352,8353,8354,8355,8356,8357,8358,8359,8360,8361,8362,8363,8364,8365,8366,8367,
8368,8369,8370,8371,8372,8373,8374,8375,8376,8377,8378,8379,8380,8381,8382,8383,
8384,8385,8386,8387,8388,8389,8390,8391,8392,8393,8394,8395,8396,8397,8398,8399,
8400,8401,8402,8403,8404,8405,8406,8407,8408,8409,8410,8411,8412,8413,8414,8415,
8416,8417,8418,8419,8420,8421,8422,8423,8424,8425,8426,8427,8428,8429,8430,8431,
8432,8433,8434,8435,8436,8437,8438,8439,8440,8441,8442,8443,8444,8445,8446,8447,
8448,8449,8450,8451,8452,8453,8454,8455,8456,8457,8458,8459,8460,8461,8462,8463,
8464,8465,8466,8467,8468,8469,8470,8471,8472,8473,8474,8475,8476,8477,8478,8479,
8480,8481,8482,8483,8484,8485,8486,8487,8488,8489,8490,8491,8492,8493,8494,8495,
8496,8497,8498,8499,8500,8501,8502,8503,8504,8505,8506,8507,8508,8509,8510,8511,
8512,8513,8514,8515,8516,8517,8518,8519,8520,8521,8522,8523,8524,8525,8526,8527,
8528,8529,8530,8531,8532,8533,8534,8535,8536,8537,8538,8539,8540,8541,8542,8543,
8544,8545,8546,8547,8548,8549,8550,8551,8552,8553,8554,8555,8556,8557,8558,8559,
8560,8561,8562,8563,8564,8565,8566,8567,8568,8569,8570,8571,8572,8573,8574,8575,
8576,8577,8578,8579,8580,8581,8582,8583,8584,8585,8586,8587,8588,8589,8590,8591,
8592,8593,8594,8595,8596,8597,8598,8599,8600,8601,8602,8603,8604,8605,8606,8607,
8608,8609,8610,8611,8612,8613,8614,8615,8616,8617,8618,8619,8620,8621,8622,8623,
8624,8625,8626,8627,8628,8629,8630,8631,8632,8633,8634,8635,8636,8637,8638,8639,
8640,8641,8642,8643,8644,8645,8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,
8656,8657,8658,8659,8660,8661,8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,
8672,8673,8674,8675,8676,8677,8678,8679,8680,8681,8682,8683,8684,8685,8686,8687,
8688,8689,8690,8691,8692,8693,8694,8695,8696,8697,8698,8699,8700,8701,8702,8703,
8704,8705,8706,8707,8708,8709,8710,8711,8712,8713,8714,8715,8716,8717,8718,8719,
8720,8721,8722,8723,8724,8725,8726,8727,8728,8729,8730,8731,8732,8733,8734,8735,
8736,8737,8738,8739,8740,8741)
| gpl-3.0 |
archf/ansible | test/units/modules/network/iosxr/test_iosxr_system.py | 62 | 3738 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from .iosxr_module import TestIosxrModule, load_fixture, set_module_args
from ansible.modules.network.iosxr import iosxr_system
class TestIosxrSystemModule(TestIosxrModule):
module = iosxr_system
def setUp(self):
self.mock_get_config = patch('ansible.modules.network.iosxr.iosxr_system.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.iosxr.iosxr_system.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
self.mock_get_config.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None):
self.get_config.return_value = load_fixture('iosxr_system_config.cfg')
self.load_config.return_value = dict(diff=None, session='session')
def test_iosxr_system_hostname_changed(self):
set_module_args(dict(hostname='foo'))
commands = ['hostname foo']
self.execute_module(changed=True, commands=commands)
def test_iosxr_system_domain_name(self):
set_module_args(dict(domain_name='test.com'))
commands = ['domain name test.com']
self.execute_module(changed=True, commands=commands)
def test_iosxr_system_domain_search(self):
set_module_args(dict(domain_search=['ansible.com', 'redhat.com']))
commands = ['domain list ansible.com', 'no domain list cisco.com']
self.execute_module(changed=True, commands=commands)
def test_iosxr_system_lookup_source(self):
set_module_args(dict(lookup_source='Ethernet1'))
commands = ['domain lookup source-interface Ethernet1']
self.execute_module(changed=True, commands=commands)
def test_iosxr_system_lookup_enabled(self):
set_module_args(dict(lookup_enabled=True))
commands = ['no domain lookup disable']
self.execute_module(changed=True, commands=commands)
def test_iosxr_system_name_servers(self):
name_servers = ['8.8.8.8', '8.8.4.4', '1.1.1.1']
set_module_args(dict(name_servers=name_servers))
commands = ['domain name-server 1.1.1.1', 'no domain name-server 8.8.4.4']
self.execute_module(changed=True)
def test_iosxr_system_state_absent(self):
set_module_args(dict(state='absent'))
commands = [
'no hostname',
'no domain name',
'no domain lookup disable',
'no domain lookup source-interface MgmtEth0/0/CPU0/0',
'no domain list redhat.com',
'no domain list cisco.com',
'no domain name-server 8.8.8.8',
'no domain name-server 8.8.4.4'
]
self.execute_module(changed=True, commands=commands)
def test_iosxr_system_no_change(self):
set_module_args(dict(hostname='iosxr01', domain_name='eng.ansible.com'))
self.execute_module()
| gpl-3.0 |
cdrooom/odoo | addons/base_setup/res_config.py | 3 | 6242 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
import re
from openerp.report.render.rml2pdf import customfonts
class base_config_settings(osv.osv_memory):
_name = 'base.config.settings'
_inherit = 'res.config.settings'
_columns = {
'module_multi_company': fields.boolean('Manage multiple companies',
help='Work in multi-company environments, with appropriate security access between companies.\n'
'-This installs the module multi_company.'),
'module_share': fields.boolean('Allow documents sharing',
help="""Share or embbed any screen of Odoo."""),
'module_portal': fields.boolean('Activate the customer portal',
help="""Give your customers access to their documents."""),
'module_auth_oauth': fields.boolean('Use external authentication providers, sign in with google, facebook, ...'),
'module_base_import': fields.boolean("Allow users to import data from CSV files"),
'module_google_drive': fields.boolean('Attach Google documents to any record',
help="""This installs the module google_docs."""),
'module_google_calendar': fields.boolean('Allow the users to synchronize their calendar with Google Calendar',
help="""This installs the module google_calendar."""),
'font': fields.many2one('res.font', string="Report Font", domain=[('mode', 'in', ('Normal', 'Regular', 'all', 'Book'))],
help="Set the font into the report header, it will be used as default font in the RML reports of the user company"),
'module_inter_company_rules': fields.boolean('Manage Inter Company',
help="""This installs the module inter_company_rules.\n Configure company rules to automatically create SO/PO when one of your company sells/buys to another of your company."""),
'company_share_partner': fields.boolean('Share partners to all companies',
help="Share your partners to all companies defined in your instance.\n"
" * Checked : Partners are visible for every companies, even if a company is defined on the partner.\n"
" * Unchecked : Each company can see only its partner (partners where company is defined). Partners not related to a company are visible for all companies."),
}
_defaults= {
'font': lambda self,cr,uid,c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.font.id,
}
def open_company(self, cr, uid, ids, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context)
return {
'type': 'ir.actions.act_window',
'name': 'Your Company',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'res.company',
'res_id': user.company_id.id,
'target': 'current',
}
def _change_header(self, header,font):
""" Replace default fontname use in header and setfont tag """
default_para = re.sub('fontName.?=.?".*"', 'fontName="%s"'% font,header)
return re.sub('(<setFont.?name.?=.?)(".*?")(.)', '\g<1>"%s"\g<3>'% font,default_para)
def set_base_defaults(self, cr, uid, ids, context=None):
ir_model_data = self.pool.get('ir.model.data')
wizard = self.browse(cr, uid, ids, context)[0]
if wizard.font:
user = self.pool.get('res.users').browse(cr, uid, uid, context)
font_name = wizard.font.name
user.company_id.write({'font': wizard.font.id,'rml_header': self._change_header(user.company_id.rml_header,font_name), 'rml_header2': self._change_header(user.company_id.rml_header2, font_name), 'rml_header3': self._change_header(user.company_id.rml_header3, font_name)})
return {}
def act_discover_fonts(self, cr, uid, ids, context=None):
return self.pool.get("res.font").font_scan(cr, uid, context=context)
def get_default_company_share_partner(self, cr, uid, ids, fields, context=None):
partner_rule = self.pool['ir.model.data'].xmlid_to_object(cr, uid, 'base.res_partner_rule', context=context)
return {
'company_share_partner': not bool(partner_rule.active)
}
def set_default_company_share_partner(self, cr, uid, ids, context=None):
partner_rule = self.pool['ir.model.data'].xmlid_to_object(cr, uid, 'base.res_partner_rule', context=context)
for wizard in self.browse(cr, uid, ids, context=context):
self.pool['ir.rule'].write(cr, uid, [partner_rule.id], {'active': not bool(wizard.company_share_partner)}, context=context)
# Preferences wizard for Sales & CRM.
# It is defined here because it is inherited independently in modules sale, crm.
class sale_config_settings(osv.osv_memory):
_name = 'sale.config.settings'
_inherit = 'res.config.settings'
_columns = {
'module_web_linkedin': fields.boolean('Get contacts automatically from linkedIn',
help="""When you create a new contact (person or company), you will be able to load all the data from LinkedIn (photos, address, etc)."""),
'module_crm': fields.boolean('CRM'),
'module_sale' : fields.boolean('SALE'),
}
| agpl-3.0 |
shadowsocksR-private/shadowsocksR | shadowsocks/encrypt_test.py | 20 | 1045 | from __future__ import absolute_import, division, print_function, \
with_statement
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../'))
from shadowsocks.crypto import rc4_md5
from shadowsocks.crypto import openssl
from shadowsocks.crypto import sodium
from shadowsocks.crypto import table
def run(func):
try:
func()
except:
pass
def run_n(func, name):
try:
func(name)
except:
pass
def main():
print("\n""rc4_md5")
rc4_md5.test()
print("\n""aes-256-cfb")
openssl.test_aes_256_cfb()
print("\n""aes-128-cfb")
openssl.test_aes_128_cfb()
print("\n""bf-cfb")
run(openssl.test_bf_cfb)
print("\n""camellia-128-cfb")
run_n(openssl.run_method, "camellia-128-cfb")
print("\n""cast5-cfb")
run_n(openssl.run_method, "cast5-cfb")
print("\n""idea-cfb")
run_n(openssl.run_method, "idea-cfb")
print("\n""seed-cfb")
run_n(openssl.run_method, "seed-cfb")
print("\n""salsa20")
run(sodium.test_salsa20)
print("\n""chacha20")
run(sodium.test_chacha20)
if __name__ == '__main__':
main()
| apache-2.0 |
raymondxyang/tensorflow | tensorflow/python/framework/op_def_library.py | 60 | 31657 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class to hold a library of OpDefs and use it to create Brain operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.core.framework import tensor_pb2
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import tf_contextlib
def _Attr(op_def, name):
for attr in op_def.attr:
if attr.name == name:
return attr
raise TypeError("Inconsistent OpDef for '%s', missing attr '%s'" %
(op_def.name, name))
def _AttrValue(attr_protos, name):
if name in attr_protos:
return attr_protos[name]
raise TypeError("Inconsistent OpDef, missing attr '%s' from '%s'." %
(name, attr_protos))
def _SatisfiesTypeConstraint(dtype, attr_def, param_name):
if attr_def.HasField("allowed_values"):
allowed_list = attr_def.allowed_values.list.type
if dtype not in allowed_list:
raise TypeError(
"Value passed to parameter '%s' has DataType %s not in list of "
"allowed values: %s" %
(param_name, dtypes.as_dtype(dtype).name,
", ".join(dtypes.as_dtype(x).name for x in allowed_list)))
def _IsListParameter(arg):
if arg.number_attr:
return True
elif arg.type_list_attr:
return True
return False
def _NumTypeFields(arg):
num = 0
if arg.type != types_pb2.DT_INVALID: num += 1
if arg.type_attr: num += 1
if arg.type_list_attr: num += 1
return num
def _IsListValue(v):
return isinstance(v, (list, tuple))
def _Flatten(l):
"""Converts [1, 2, [3, 4], [5]] to [1, 2, 3, 4, 5]."""
# [1, 2, [3, 4], [5]] -> [[1], [2], [3, 4], [5]]
l_of_l = [x if _IsListValue(x) else [x] for x in l]
# [[1], [2], [3, 4], [5]] -> [1, 2, 3, 4, 5]
return [item for sublist in l_of_l for item in sublist]
def _Restructure(l, structure):
"""Returns the elements of list l structured according to the given structure.
A structure is represented by a list whose elements are either
`None` or a non-negative integer. `None` corresponds to a single
element in the output list, and an integer N corresponds to a nested
list of length N.
The function returns a data structure whose shape is given by
`structure`, and whose elements are taken from `l`. If `structure`
is a singleton, the function returns the single data structure
implied by the 0th element of `structure`. For example:
_Restructure(["foo", "bar", "baz", "qux"], [None, 2, None])
-> ["foo", ["bar", "baz"], "qux"]
_Restructure(["foo"], [None]) -> "foo"
_Restructure(["foo"], [1]) -> ["foo"]
_Restructure([], [0]) -> []
Args:
l: A list.
structure: A list whose elements are either `None` or a non-negative
integer.
Returns:
The elements of `l`, restructured according to `structure`. If
`structure` is a list of length 1, this function returns the
single data structure implied by `structure[0]`.
"""
result = []
current_index = 0
for element in structure:
if element is None:
result.append(l[current_index])
current_index += 1
else:
result.append(l[current_index:current_index+element])
current_index += element
if len(result) == 1:
return result[0]
else:
return tuple(result)
def _MakeFloat(v, arg_name):
if not isinstance(v, compat.real_types):
raise TypeError("Expected float for argument '%s' not %s." %
(arg_name, repr(v)))
return float(v)
def _MakeInt(v, arg_name):
if isinstance(v, six.string_types):
raise TypeError("Expected int for argument '%s' not %s." %
(arg_name, repr(v)))
try:
return int(v)
except (ValueError, TypeError):
raise TypeError("Expected int for argument '%s' not %s." %
(arg_name, repr(v)))
def _MakeStr(v, arg_name):
if not isinstance(v, compat.bytes_or_text_types):
raise TypeError("Expected string for argument '%s' not %s." %
(arg_name, repr(v)))
return compat.as_bytes(v) # Convert unicode strings to bytes.
def _MakeBool(v, arg_name):
if not isinstance(v, bool):
raise TypeError("Expected bool for argument '%s' not %s." %
(arg_name, repr(v)))
return v
def _MakeType(v, attr_def):
try:
v = dtypes.as_dtype(v).base_dtype
except TypeError:
raise TypeError("Expected DataType for argument '%s' not %s." %
(attr_def.name, repr(v)))
i = v.as_datatype_enum
_SatisfiesTypeConstraint(i, attr_def, param_name=attr_def.name)
return i
def _MakeShape(v, arg_name):
"""Convert v into a TensorShapeProto."""
# Args:
# v: A TensorShapeProto, a list of ints, or a tensor_shape.TensorShape.
# arg_name: String, for error messages.
# Returns:
# A TensorShapeProto.
if isinstance(v, tensor_shape_pb2.TensorShapeProto):
for d in v.dim:
if d.name:
logging.warning("Warning: TensorShapeProto with a named dimension: %s",
str(v))
break
return v
try:
return tensor_shape.as_shape(v).as_proto()
except TypeError as e:
raise TypeError("Error converting %s to a TensorShape: %s" % (arg_name, e))
except ValueError as e:
raise ValueError("Error converting %s to a TensorShape: %s" % (arg_name, e))
def _MakeTensor(v, arg_name):
"""Ensure v is a TensorProto."""
if isinstance(v, tensor_pb2.TensorProto):
return v
raise TypeError(
"Don't know how to convert %s to a TensorProto for argument '%s'" %
(repr(v), arg_name))
class _OpInfo(object):
"""All per-Op state we would like to precompute/validate."""
def __init__(self, op_def):
self.op_def = op_def
# TODO(josh11b): SWIG the ValidateOpDef() function from C++ and call it
# here, instead of these checks.
for arg in list(op_def.input_arg) + list(op_def.output_arg):
num_type_fields = _NumTypeFields(arg)
if num_type_fields != 1:
raise TypeError("Arg '%s' of '%s' must have one type field not %d" %
(arg.name, op_def.name, num_type_fields))
if arg.type_attr:
attr_type = _Attr(op_def, arg.type_attr).type
if attr_type != "type":
raise TypeError("Attr '%s' of '%s' used as a type_attr "
"but has type %s" %
(arg.type_attr, op_def.name, attr_type))
if arg.type_list_attr:
attr_type = _Attr(op_def, arg.type_list_attr).type
if attr_type != "list(type)":
raise TypeError(
"Attr '%s' of '%s' used as a type_list_attr but has type %s" %
(arg.type_attr, op_def.name, attr_type))
if arg.number_attr:
attr_type = _Attr(op_def, arg.number_attr).type
if attr_type != "int":
raise TypeError(
"Attr '%s' of '%s' used as a number_attr but has type %s" %
(arg.number_attr, op_def.name, attr_type))
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def _MaybeColocateWith(inputs):
"""A context manager for (maybe) colocating with a list of input tensors.
Args:
inputs: A list of `Tensor` or `Operation` objects.
Returns:
A context manager.
"""
if not inputs:
yield
else:
# NOTE(mrry): The `ops.colocate_with()` function accepts only a single
# op or tensor, so we create one context manager per element in the list.
with ops.colocate_with(inputs[0]), _MaybeColocateWith(inputs[1:]):
yield
# pylint: enable=g-doc-return-or-yield
class OpDefLibrary(object):
"""Holds a collection of OpDefs, can add the corresponding Ops to a graph."""
def __init__(self):
self._ops = {}
# pylint: disable=invalid-name
def add_op(self, op_def):
"""Register an OpDef. May call apply_op with the name afterwards."""
if not isinstance(op_def, op_def_pb2.OpDef):
raise TypeError("%s is %s, not an op_def_pb2.OpDef" %
(op_def, type(op_def)))
if not op_def.name:
raise ValueError("%s missing name." % op_def)
if op_def.name in self._ops:
raise RuntimeError("Op name %s registered twice." % op_def.name)
self._ops[op_def.name] = _OpInfo(op_def)
def add_op_list(self, op_list):
"""Register the OpDefs from an OpList."""
if not isinstance(op_list, op_def_pb2.OpList):
raise TypeError("%s is %s, not an op_def_pb2.OpList" %
(op_list, type(op_list)))
for op_def in op_list.op:
self.add_op(op_def)
def apply_op(self, op_type_name, name=None, **keywords):
# pylint: disable=g-doc-args
"""Add a node invoking a registered Op to a graph.
Example usage:
# input1 and input2 can be Tensors or anything ops.convert_to_tensor()
# will convert to a Tensor.
op_def_library.apply_op("op", input1=input1, input2=input2)
# Can specify a node name.
op_def_library.apply_op("op", input1=input1, name="node_name")
# Must use keyword arguments, with the names specified in the OpDef.
op_def_library.apply_op("op", input_name=input, attr_name=attr)
All attrs must either be inferred from an input or specified.
(If inferred, the attr must not be specified.) If an attr has a default
value specified in the Op's OpDef, then you may pass None as the value
of that attr to get the default.
Args:
op_type_name: string. Must match the name field of a registered Op.
name: string. Optional name of the created op.
**keywords: input Tensor and attr arguments specified by name,
and optional parameters to pass when constructing the Operation.
Returns:
The Tensor(s) representing the output of the operation, or the Operation
itself if there are no outputs.
Raises:
RuntimeError: On some errors.
TypeError: On some errors.
ValueError: On some errors.
"""
output_structure, is_stateful, op = self._apply_op_helper(
op_type_name, name, **keywords)
if output_structure:
outputs = op.outputs
res = _Restructure(ops.convert_n_to_tensor(outputs), output_structure)
if isinstance(res, list) and not res and is_stateful:
return op
else:
return res
else:
return op
def _apply_op_helper(self, op_type_name, name=None, **keywords):
"""Implementation of apply_op that returns output_structure, op."""
op_info = self._ops.get(op_type_name, None)
if op_info is None:
raise RuntimeError("Unrecognized Op name " + op_type_name)
op_def = op_info.op_def
# Determine the graph context.
try:
# Need to flatten all the arguments into a list.
# pylint: disable=protected-access
g = ops._get_graph_from_inputs(_Flatten(keywords.values()))
# pylint: enable=protected-access
except AssertionError as e:
raise RuntimeError(
"Cannot determine graph for Op '%s' due to: %s"
% (op_type_name, e.message))
# Default name if not specified.
if name is None:
name = op_type_name
# Check for deprecation
deprecation_version = op_def.deprecation.version
if deprecation_version:
producer = g.graph_def_versions.producer
if producer >= deprecation_version:
raise NotImplementedError(
("Op %s is not available in GraphDef version %d. "
"It has been removed in version %d. %s.") %
(op_type_name, producer, deprecation_version,
op_def.deprecation.explanation))
# Fill in the list of default types for all "type" attrs. This
# will be used to choose a preferred dtype to convert to in the
# absence of input type information.
#
# TODO(b/31302892): Currently the defaults don't work in the right
# way if you have two inputs, one of whose type resolution depends
# on the other. Handling this will require restructuring this code
# significantly.
default_type_attr_map = {}
for attr_def in op_def.attr:
if attr_def.type != "type":
continue
key = attr_def.name
if attr_def.HasField("default_value"):
default_type_attr_map[key] = dtypes.as_dtype(
attr_def.default_value.type)
# Requires that op_def has passed validation (using the C++
# ValidateOpDef() from ../framework/op_def_util.h).
attrs = {}
inputs = []
input_types = []
with g.as_default(), ops.name_scope(name) as scope:
# Perform input type inference
inferred_from = {}
for input_arg in op_def.input_arg:
input_name = input_arg.name
if input_name in keywords:
values = keywords.pop(input_name)
elif input_name + "_" in keywords:
# Handle the case where the name is a keyword or built-in
# for Python so we use the name + _ instead.
input_name += "_"
values = keywords.pop(input_name)
else:
raise TypeError("No argument for input " + input_name)
# Goals:
# * Convert values to Tensors if it contains constants.
# * Verify that values is a list if that matches the input_arg's
# type.
# * If the input_arg's type is determined by attrs, either set
# those attrs and validate those attr values are legal (if
# they have not yet been set) or validate the input matches
# the type indicated by the attrs (if they have already been
# inferred via an earlier input).
# * If the input_arg has an explicit type, make sure the input
# conforms.
if _IsListParameter(input_arg):
if not _IsListValue(values):
raise TypeError(
"Expected list for '%s' argument to '%s' Op, not %s." %
(input_name, op_type_name, values))
# In cases where we expect all elements of the list to have the
# same dtype, try to cast non-Tensor elements to that type.
dtype = None
default_dtype = None
if input_arg.type != types_pb2.DT_INVALID:
dtype = input_arg.type
elif input_arg.number_attr:
if input_arg.type_attr in attrs:
dtype = attrs[input_arg.type_attr]
else:
for t in values:
if isinstance(t, ops.Tensor):
dtype = t.dtype
break
# dtype still not found, prefer using the default dtype
# from the attr.
if dtype is None and input_arg.type_attr in default_type_attr_map:
default_dtype = default_type_attr_map[input_arg.type_attr]
try:
if not input_arg.is_ref and dtype:
dtype = dtypes.as_dtype(dtype).base_dtype
values = ops.internal_convert_n_to_tensor(
values,
name=input_arg.name,
dtype=dtype if dtype else None,
preferred_dtype=default_dtype,
as_ref=input_arg.is_ref)
if input_arg.number_attr and len(
set(v.dtype.base_dtype for v in values)) > 1:
raise TypeError() # All types should match.
except (TypeError, ValueError):
# What types does the conversion function think values have?
observed_types = []
for value in values:
try:
converted_value = ops.internal_convert_to_tensor(
value, as_ref=input_arg.is_ref)
observed_types.append(converted_value.dtype.base_dtype.name)
except (TypeError, ValueError):
observed_types.append("<NOT CONVERTIBLE TO TENSOR>")
observed = ", ".join(observed_types)
prefix = (
"Tensors in list passed to '%s' of '%s' Op have types [%s]" %
(input_name, op_type_name, observed))
if input_arg.number_attr:
if input_arg.type != types_pb2.DT_INVALID:
raise TypeError("%s that do not match expected type %s." %
(prefix, dtype.name))
elif input_arg.type_attr in attrs:
raise TypeError("%s that do not match type %s inferred from "
"earlier arguments." %
(prefix, dtype.name))
else:
raise TypeError("%s that don't all match." % prefix)
else:
raise TypeError("%s that are invalid." % prefix)
types = [x.dtype for x in values]
inputs.extend(values)
else:
# In cases where we have an expected type, try to convert non-Tensor
# arguments to that type.
dtype = None
default_dtype = None
if input_arg.type != types_pb2.DT_INVALID:
dtype = input_arg.type
elif input_arg.type_attr in attrs:
dtype = attrs[input_arg.type_attr]
elif input_arg.type_attr in default_type_attr_map:
# The dtype could not be inferred solely from the inputs,
# so we prefer the attr's default, so code that adds a new attr
# with a default is backwards compatible.
default_dtype = default_type_attr_map[input_arg.type_attr]
try:
values = ops.internal_convert_to_tensor(
values,
name=input_arg.name,
dtype=dtype,
as_ref=input_arg.is_ref,
preferred_dtype=default_dtype)
except TypeError as err:
if dtype is None:
raise err
else:
raise TypeError(
"Expected %s passed to parameter '%s' of op '%s', got %s of "
"type '%s' instead." %
(dtypes.as_dtype(dtype).name, input_arg.name, op_type_name,
repr(values), type(values).__name__))
except ValueError:
# What type does convert_to_tensor think it has?
try:
observed = ops.internal_convert_to_tensor(
values, as_ref=input_arg.is_ref).dtype.name
except ValueError as err:
raise ValueError(
"Tried to convert '%s' to a tensor and failed. Error: %s" %
(input_name, err))
prefix = ("Input '%s' of '%s' Op has type %s that does not match" %
(input_name, op_type_name, observed))
if input_arg.type != types_pb2.DT_INVALID:
raise TypeError("%s expected type of %s." %
(prefix, dtypes.as_dtype(input_arg.type).name))
else:
# Update the maps with the default, if needed.
k = input_arg.type_attr
if k in default_type_attr_map:
if k not in attrs:
attrs[k] = default_type_attr_map[k]
if k not in inferred_from:
inferred_from[k] = "Default in OpDef"
raise TypeError(
"%s type %s of argument '%s'." %
(prefix, dtypes.as_dtype(attrs[input_arg.type_attr]).name,
inferred_from[input_arg.type_attr]))
types = [values.dtype]
inputs.append(values)
base_types = [x.base_dtype for x in types]
if input_arg.number_attr:
# <number-attr> * <type> or <number-attr> * <type-attr>
if input_arg.number_attr in attrs:
if len(values) != attrs[input_arg.number_attr]:
raise ValueError(
"List argument '%s' to '%s' Op with length %d must match "
"length %d of argument '%s'." %
(input_name, op_type_name, len(values),
attrs[input_arg.number_attr],
inferred_from[input_arg.number_attr]))
else:
attrs[input_arg.number_attr] = len(values)
inferred_from[input_arg.number_attr] = input_name
num_attr = _Attr(op_def, input_arg.number_attr)
if num_attr.has_minimum and len(values) < num_attr.minimum:
raise ValueError(
"List argument '%s' to '%s' Op with length %d shorter "
"than minimum length %d." %
(input_name, op_type_name, len(values), num_attr.minimum))
# All tensors must have the same base type.
if any([bt != base_types[0] for bt in base_types]):
raise TypeError(
"All tensors passed to '%s' of '%s' Op "
"must have the same type." %
(input_name, op_type_name))
if input_arg.type != types_pb2.DT_INVALID:
# <number-attr> * <type> case
if base_types and base_types[0] != input_arg.type:
assert False, "Unreachable"
elif input_arg.type_attr in attrs:
# <number-attr> * <type-attr> case, where <type-attr> already
# has an inferred value.
if base_types and base_types[0] != attrs[input_arg.type_attr]:
assert False, "Unreachable"
else:
# <number-attr> * <type-attr> case, where we are now setting
# the <type-attr> based on this input
if not base_types:
raise TypeError(
"Don't know how to infer type variable from empty input "
"list passed to input '%s' of '%s' Op." %
(input_name, op_type_name))
attrs[input_arg.type_attr] = base_types[0]
inferred_from[input_arg.type_attr] = input_name
type_attr = _Attr(op_def, input_arg.type_attr)
_SatisfiesTypeConstraint(base_types[0], type_attr,
param_name=input_name)
elif input_arg.type_attr:
# <type-attr>
attr_value = base_types[0]
if input_arg.type_attr in attrs:
if attrs[input_arg.type_attr] != attr_value:
assert False, "Unreachable"
else:
for base_type in base_types:
_SatisfiesTypeConstraint(base_type,
_Attr(op_def, input_arg.type_attr),
param_name=input_name)
attrs[input_arg.type_attr] = attr_value
inferred_from[input_arg.type_attr] = input_name
elif input_arg.type_list_attr:
# <type-list-attr>
attr_value = base_types
if input_arg.type_list_attr in attrs:
if attrs[input_arg.type_list_attr] != attr_value:
raise TypeError(
"Input '%s' of '%s' Op has type list of %s that does not "
"match type list %s of argument '%s'." %
(input_name, op_type_name,
", ".join(dtypes.as_dtype(x).name for x in attr_value),
", ".join(dtypes.as_dtype(x).name
for x in attrs[input_arg.type_list_attr]),
inferred_from[input_arg.type_list_attr]))
else:
for base_type in base_types:
_SatisfiesTypeConstraint(base_type,
_Attr(op_def, input_arg.type_list_attr),
param_name=input_name)
attrs[input_arg.type_list_attr] = attr_value
inferred_from[input_arg.type_list_attr] = input_name
else:
# single Tensor with specified type
if base_types[0] != input_arg.type:
assert False, "Unreachable"
if input_arg.is_ref:
if not all(x._is_ref_dtype for x in types): # pylint: disable=protected-access
raise TypeError(
("'%s' Op requires that input '%s' be a mutable tensor "
"(e.g.: a tf.Variable)") % (op_type_name, input_name))
input_types.extend(types)
else:
input_types.extend(base_types)
# Process remaining attrs
for attr in op_def.attr:
# Skip attrs that have already had their values inferred
if attr.name in attrs:
if attr.name in keywords:
raise TypeError(
"Should not specify value for inferred attr '%s'." % attr.name)
continue
if attr.name in keywords:
attrs[attr.name] = keywords.pop(attr.name)
elif attr.name + "_" in keywords:
# Attrs whose names match Python keywords have an extra '_'
# appended, so we must check for that as well.
attrs[attr.name] = keywords.pop(attr.name + "_")
else:
raise TypeError("No argument for attr " + attr.name)
# Convert attr values to AttrValue protos.
attr_protos = {}
for attr_def in op_def.attr:
key = attr_def.name
value = attrs[key]
attr_value = attr_value_pb2.AttrValue()
if attr_def.HasField("default_value") and value is None:
attr_value.CopyFrom(attr_def.default_value)
attr_protos[key] = attr_value
continue
if attr_def.type.startswith("list("):
if not _IsListValue(value):
raise TypeError("Expected list for attr " + key)
if attr_def.has_minimum:
if len(value) < attr_def.minimum:
raise ValueError("Attr '%s' of '%s' Op passed list of length %d "
"less than minimum %d." %
(key, op_type_name, len(value),
attr_def.minimum))
attr_value.list.SetInParent()
if attr_def.type == "string":
attr_value.s = _MakeStr(value, key)
if attr_def.HasField("allowed_values"):
if attr_value.s not in attr_def.allowed_values.list.s:
raise ValueError(
"Attr '%s' of '%s' Op passed string '%s' not in: \"%s\"." %
(key, op_type_name, compat.as_text(attr_value.s),
'", "'.join(map(compat.as_text,
attr_def.allowed_values.list.s))))
elif attr_def.type == "list(string)":
attr_value.list.s.extend([_MakeStr(x, key) for x in value])
if attr_def.HasField("allowed_values"):
for x in attr_value.list.s:
if x not in attr_def.allowed_values.list.s:
raise ValueError(
"Attr '%s' of '%s' Op passed string '%s' not in: \"%s\"." %
(key, op_type_name, compat.as_text(x),
'", "'.join(map(compat.as_text,
attr_def.allowed_values.list.s))))
elif attr_def.type == "int":
attr_value.i = _MakeInt(value, key)
if attr_def.has_minimum:
if attr_value.i < attr_def.minimum:
raise ValueError(
"Attr '%s' of '%s' Op passed %d less than minimum %d." %
(key, op_type_name, attr_value.i, attr_def.minimum))
elif attr_def.type == "list(int)":
attr_value.list.i.extend([_MakeInt(x, key) for x in value])
elif attr_def.type == "float":
attr_value.f = _MakeFloat(value, key)
elif attr_def.type == "list(float)":
attr_value.list.f.extend([_MakeFloat(x, key) for x in value])
elif attr_def.type == "bool":
attr_value.b = _MakeBool(value, key)
elif attr_def.type == "list(bool)":
attr_value.list.b.extend([_MakeBool(x, key) for x in value])
elif attr_def.type == "type":
attr_value.type = _MakeType(value, attr_def)
elif attr_def.type == "list(type)":
attr_value.list.type.extend(
[_MakeType(x, attr_def) for x in value])
elif attr_def.type == "shape":
attr_value.shape.CopyFrom(_MakeShape(value, key))
elif attr_def.type == "list(shape)":
attr_value.list.shape.extend(
[_MakeShape(x, key) for x in value])
elif attr_def.type == "tensor":
attr_value.tensor.CopyFrom(_MakeTensor(value, key))
elif attr_def.type == "list(tensor)":
attr_value.list.tensor.extend(
[_MakeTensor(x, key) for x in value])
elif attr_def.type == "func":
if isinstance(value, attr_value_pb2.NameAttrList):
attr_value.func.CopyFrom(value)
elif isinstance(value, compat.bytes_or_text_types):
attr_value.func.name = value
else:
value.add_to_graph(ops.get_default_graph())
attr_value.func.name = value.name
else:
raise TypeError("Unrecognized Attr type " + attr_def.type)
attr_protos[key] = attr_value
del attrs # attrs is no longer authoritative, use attr_protos instead
# Determine output types (possibly using attrs)
output_types = []
output_structure = []
for arg in op_def.output_arg:
types = []
if arg.number_attr:
n = _AttrValue(attr_protos, arg.number_attr).i
if arg.type_attr:
types = [_AttrValue(attr_protos, arg.type_attr).type] * n
else:
types = [arg.type] * n
output_structure.append(n)
elif arg.type_attr:
t = _AttrValue(attr_protos, arg.type_attr)
types = [t.type]
output_structure.append(None)
elif arg.type_list_attr:
t = _AttrValue(attr_protos, arg.type_list_attr)
types = t.list.type
output_structure.append(len(types))
else:
types = [arg.type]
output_structure.append(None)
if arg.is_ref:
types = [dtypes.as_dtype(x)._as_ref for x in types] # pylint: disable=protected-access
output_types.extend(types)
if keywords:
raise TypeError("apply_op() got unexpected keyword arguments: " +
", ".join(sorted(keywords.keys())))
# NOTE(mrry): We add an explicit colocation constraint between
# the newly created op and any of its reference-typed inputs.
must_colocate_inputs = [val for arg, val in zip(op_def.input_arg, inputs)
if arg.is_ref]
with _MaybeColocateWith(must_colocate_inputs):
# Add Op to graph
op = g.create_op(op_type_name, inputs, output_types, name=scope,
input_types=input_types, attrs=attr_protos,
op_def=op_def)
return output_structure, op_def.is_stateful, op
# pylint: enable=invalid-name
| apache-2.0 |
b29308188/MMAI_final | src/utils.py | 1 | 3563 | import sys
sys.path.append(".")
import csv
import numpy as np
from datasets import Photo
import cv2
def f1_score(precision, recall):
return 2*precision*recall/(precision+recall)
def detect_faces(detector, image):
"""
Input: a face detector and an image, you may prorivde a detector as this:
*** detector = cv2.CascadeClassifier("haarcascade_frontalface_alt2.xml") ***
Output: the faces locations (x, y, width, height)
"""
#convert to gray scale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Detect faces in the image
faces = detector.detectMultiScale(
gray,
scaleFactor=1.05,
#scaleFactor=1.2,
minNeighbors=8,
minSize=(10, 10),
flags = cv2.CASCADE_SCALE_IMAGE
)
return faces
def build_photos_from_csv(file_path):
"""
Read the CSV and return a list of photos with labeled faces.
"""
P = []
image_IDs = set()
for(image_ID, x, y, w, h, tag) in csv.reader(open(file_path), delimiter = ","):
#each line is a face
if image_ID == "image_ID": # header
continue
if image_ID not in image_IDs:
P.append(Photo(image_ID)) # new a photo
image_IDs.add(image_ID)
P[-1].add_face(x, y, w, h, tag) # add this face
return P
def weighted_precision_recall(trueY, predY, sample_weight = None):
"""
Calculate the weighted precisions and recalls for each lind of labels.
"""
if sample_weight is None:
sample_weight = np.array([1.0 for i in range(len(trueY))])
scores = []
for label in [0, 1, 2]:
hit = 0.0
deno_r = 0.0
deno_p = 0.0
index = 0
for (ty, py) in zip(trueY, predY):
if ty == label:
deno_r += sample_weight[index]
if py == label:
deno_p += sample_weight[index]
if py == ty:
hit += sample_weight[index]
index += 1
p = 0
r = 0
try:
p = hit / deno_p
except:
pass
try:
r = hit / deno_r
except:
pass
scores.extend([p, r])
return scores
def read_training_data(csv_path, image_prefix = None, category = False, sample_weight = False):
"""
Read training data from csv file and extract features.
image_prefix : the folder that contains the images
category : whether to return the category(image_ID) for each face - used for labelKFold
sample_weight : whether to return the sample weights of each face
"""
P = build_photos_from_csv(csv_path)
if image_prefix is not None:
for p in P:
p.read_image(image_prefix+"/"+p.image_ID)
categories = []
sample_weights = []
X = []
Y = []
#build training data
for p in P:
p.extract_features()
for f in p.faces:
X.append(f.feature)
Y.append(f.label)
categories.append(p.image_ID) #make sure the faces in the same image would not be separated when using labelKFold
sample_weights.append(1.0/len(p.faces)) #more faces in a image -> less important?
sample_weights = np.array(sample_weights)
X = np.array(X)
Y = np.array(Y)
if category is False and sample_weight is False:
return (X, Y)
elif category is False:
return (X, Y, sample_weights)
elif sample_weight is False:
return (X, Y, categories)
else:
return (X, Y, categories, sample_weights)
| gpl-2.0 |
howaboutudance/qweechat | src/qweechat/chat.py | 1 | 4891 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# chat.py - chat area
#
# Copyright (C) 2011-2013 Sebastien Helleu <flashcode@flashtux.org>
#
# This file is part of QWeeChat, a Qt remote GUI for WeeChat.
#
# QWeeChat is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# QWeeChat is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with QWeeChat. If not, see <http://www.gnu.org/licenses/>.
#
import datetime
import qt_compat
QtCore = qt_compat.import_module('QtCore')
QtGui = qt_compat.import_module('QtGui')
import config
import weechat.color as color
class ChatTextEdit(QtGui.QTextEdit):
"""Chat area."""
def __init__(self, debug, *args):
QtGui.QTextEdit.__init__(*(self,) + args)
self.debug = debug
self.readOnly = True
self.setFocusPolicy(QtCore.Qt.NoFocus)
self.setFontFamily('monospace')
self._textcolor = self.textColor()
self._bgcolor = QtGui.QColor('#FFFFFF')
self._setcolorcode = { 'F': (self.setTextColor, self._textcolor),
'B': (self.setTextBackgroundColor, self._bgcolor) }
self._setfont = { '*': self.setFontWeight,
'_': self.setFontUnderline,
'/': self.setFontItalic }
self._fontvalues = { False: { '*': QtGui.QFont.Normal, '_': False, '/': False },
True: { '*': QtGui.QFont.Bold, '_': True, '/': True } }
self._color = color.Color(config.color_options(), self.debug)
def display(self, time, prefix, text, forcecolor=None):
if time == 0:
d = datetime.datetime.now()
else:
d = datetime.datetime.fromtimestamp(float(time))
self.setTextColor(QtGui.QColor('#999999'))
self.insertPlainText(d.strftime('%H:%M '))
prefix = self._color.convert(prefix)
text = self._color.convert(text)
if forcecolor:
if prefix:
prefix = '\x01(F%s)%s' % (forcecolor, prefix)
text = '\x01(F%s)%s' % (forcecolor, text)
if prefix:
self._display_with_colors(str(prefix).decode('utf-8') + ' ')
if text:
self._display_with_colors(str(text).decode('utf-8'))
if text[-1:] != '\n':
self.insertPlainText('\n')
else:
self.insertPlainText('\n')
self.scroll_bottom()
def _display_with_colors(self, string):
self.setTextColor(self._textcolor)
self.setTextBackgroundColor(self._bgcolor)
self._reset_attributes()
items = string.split('\x01')
for i, item in enumerate(items):
if i > 0 and item.startswith('('):
pos = item.find(')')
if pos >= 2:
action = item[1]
code = item[2:pos]
if action == '+':
# set attribute
self._set_attribute(code[0], True)
elif action == '-':
# remove attribute
self._set_attribute(code[0], False)
else:
# reset attributes and color
if code == 'r':
self._reset_attributes()
self._setcolorcode[action][0](self._setcolorcode[action][1])
else:
# set attributes + color
while code.startswith(('*', '!', '/', '_', '|', 'r')):
if code[0] == 'r':
self._reset_attributes()
elif code[0] in self._setfont:
self._set_attribute(code[0], not self._font[code[0]])
code = code[1:]
if code:
self._setcolorcode[action][0](QtGui.QColor(code))
item = item[pos+1:]
if len(item) > 0:
self.insertPlainText(item)
def _reset_attributes(self):
self._font = {}
for attr in self._setfont:
self._set_attribute(attr, False)
def _set_attribute(self, attr, value):
self._font[attr] = value
self._setfont[attr](self._fontvalues[self._font[attr]][attr])
def scroll_bottom(self):
bar = self.verticalScrollBar()
bar.setValue(bar.maximum())
| gpl-3.0 |
milodky/kernel_for_nexus7 | Documentation/target/tcm_mod_builder.py | 3119 | 42754 | #!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: nab@kernel.org
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_transport.h>\n"
buf += "#include <target/target_core_fabric_ops.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_fabric_lib.h>\n"
buf += "#include <target/target_core_device.h>\n"
buf += "#include <target/target_core_tpg.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!(se_nacl_new))\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!(tpg)) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!(" + fabric_mod_port + ")) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "__NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd_to_pool = " + fabric_mod_name + "_release_cmd,\n"
buf += " .release_cmd_direct = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .new_cmd_failure = " + fabric_mod_name + "_new_cmd_failure,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " .pack_lun = " + fabric_mod_name + "_pack_lun,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (!(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return -ENOMEM;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!(" + fabric_mod_name + "_fabric_configfs))\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "#ifdef MODULE\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
buf += "#endif\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric_ops.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_transport.h>\n"
buf += "#include <target/target_core_fabric_ops.h>\n"
buf += "#include <target/target_core_fabric_lib.h>\n"
buf += "#include <target/target_core_device.h>\n"
buf += "#include <target/target_core_tpg.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!(nacl)) {\n"
buf += " printk(KERN_ERR \"Unable to alocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('release_cmd_to_pool', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('new_cmd_failure\)\(', fo):
buf += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('get_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
if re.search('set_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
if re.search('pack_lun\)\(', fo):
buf += "u64 " + fabric_mod_name + "_pack_lun(unsigned int lun)\n"
buf += "{\n"
buf += " WARN_ON(lun >= 256);\n"
buf += " /* Caller wants this byte-swapped */\n"
buf += " return cpu_to_le64((lun & 0xff) << 8);\n"
buf += "}\n\n"
bufi += "u64 " + fabric_mod_name + "_pack_lun(unsigned int);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
| gpl-2.0 |
asimurzin/hybridFlu | hybridFlu/examples/test_icoFoam_piso_2.py | 1 | 7583 | #!/usr/bin/env python
#---------------------------------------------------------------------------
## Copyright (C) 2010- Alexey Petrov
## Copyright (C) 2009-2010 Pebble Bed Modular Reactor (Pty) Limited (PBMR)
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
## See http://sourceforge.net/projects/pythonflu
##
## Author : Ivor CLIFFORD
##
#---------------------------------------------------------------------------
"""
Example icoFoam PISO algorithm
"""
from salome_version import getVersion as SalomeVersion
if SalomeVersion() > '5.1.4':
import os
print "Not supported Salome version. Use Salome 5.1.4 or 5.1.3"
os._exit( os.EX_OK )
pass
from Foam import ref, man
from Foam import fvm, fvc
from Tkinter import *
import Pmw
class pyIcoFoam:
def __init__(self, runTime, U, p, phi, transportProperties, pRefCell=0, pRefValue=0.0):
self.runTime = runTime
self.U = U
self.p = p
self.phi = phi
self.transportProperties = transportProperties
self.pRefCell=pRefCell
self.pRefValue=pRefValue
self.pressureRes = 0.0
self.velocityRes = 0.0
def step(self, nCorr=1, nNonOrthCorr=1):
U_ = self.U
p_ = self.p
phi_ = self.phi
runTime_ = self.runTime
mesh_ = U_.mesh()
runTime_.increment()
# Read transport properties
nu = ref.dimensionedScalar(self.transportProperties.lookup(ref.word("nu")))
tmp_UEqn = ( ref.fvm.ddt( U_ ) + ref.fvm.div( phi_, U_ ) - ref.fvm.laplacian( nu, U_ ) )
UEqn = tmp_UEqn()
self.velocityRes = ref.solve( UEqn == -ref.fvc.grad( p_ ) ).initialResidual()
# --- PISO loop
for corr in range(nCorr):
tmp_rUA = 1.0 / UEqn.A()
rUA = tmp_rUA()
U_ << rUA * UEqn.H()
phi_ << ( ref.fvc.interpolate(U_) & mesh_.Sf() )
for nonOrth in range(nNonOrthCorr):
tmp_pEqn = ( ref.fvm.laplacian( rUA, p_ ) == ref.fvc.div( phi_ ) )
pEqn = tmp_pEqn()
pEqn.setReference( self.pRefCell, self.pRefValue )
pressureRes = pEqn.solve().initialResidual()
if nonOrth == 0:
self.pressureRes = pressureRes
if nonOrth == nNonOrthCorr:
phi_ -= pEqn.flux()
# Continuity errors
tmp_contErr = ref.fvc.div( phi_ );
contErr = tmp_contErr()
sumLocalContErr = (
runTime_.deltaT().value()
* contErr.mag().weightedAverage( mesh_.V() ).value()
)
globalContErr = (
runTime_.deltaT().value()
* contErr.weightedAverage( mesh_.V() ).value()
)
print "time step continuity errors : sum local = " + str(sumLocalContErr) + ", global = " + str(globalContErr)
# Correct velocity
U_-= rUA * ref.fvc.grad( p_ )
U_.correctBoundaryConditions()
# Create root and case
import os
root = ref.fileName( os.path.join( os.environ[ "HYBRIDFLU_ROOT_DIR" ], 'hybridFlu', 'examples' ) )
case = ref.fileName( "case_icoFoam_piso" )
# Create time
runTime = man.Time(ref.word("controlDict"), root, case)
runTime.controlDict().remove(ref.word("startTime"))
runTime.controlDict().remove(ref.word("endTime"))
runTime.controlDict().remove(ref.word("deltaT"))
runTime.controlDict().add(ref.word("startTime"), 0)
runTime.controlDict().add(ref.word("endTime"), 0.5)
runTime.controlDict().add(ref.word("deltaT"), 0.005)
runTime.read()
# Create mesh
mesh = man.fvMesh( man.IOobject( ref.word("region0"),
ref.fileName(runTime.timeName()),
runTime,
ref.IOobject.MUST_READ,
ref.IOobject.NO_WRITE))
# Create transport properties
transportProperties = ref.IOdictionary(ref.IOobject( ref.word("transportProperties"),
ref.fileName(runTime.constant()),
mesh,
ref.IOobject.MUST_READ,
ref.IOobject.AUTO_WRITE))
nu = ref.dimensionedScalar(transportProperties.lookup(ref.word("nu")))
nu.setValue(0.05)
# Create pressure field: read
p = man.volScalarField( man.IOobject( ref.word("p"),
ref.fileName(runTime.timeName()),
mesh,
ref.IOobject.MUST_READ,
ref.IOobject.AUTO_WRITE ),
mesh )
# Create velocity field: read
U = man.volVectorField( man.IOobject( ref.word("U"),
ref.fileName(runTime.timeName()),
mesh,
ref.IOobject.MUST_READ,
ref.IOobject.AUTO_WRITE ),
mesh)
phi = ref.createPhi( runTime, mesh, U )
print "Time: " + str(runTime.timeName())
solver = pyIcoFoam(runTime, U, p, phi, transportProperties, 0, 0.0)
pRes = [] #initial pressure residual
uRes = [] #initial velocity residual
it = []
iteration = []
iteration.append(0)
# Graphics related stuff
master = Tk()
g = Pmw.Blt.Graph(master)
g.pack(expand=1,fill='both')
# Graph related commands:
g.line_create("p-Residual", xdata=iteration[0], ydata=None)
g.element_configure("p-Residual", color = "red", dashes = 1,
symbol = "", linewidth = 1)
g.line_create("u-Residual", xdata=iteration[0], ydata=None)
g.element_configure("u-Residual", color = "blue", dashes = 1,
symbol = "", linewidth = 1)
g.axis_configure("y",logscale = 1)
# Main iterate function
def iterate(niter):
for i in xrange(niter):
i += iteration[0]
it.append(i)
solver.step(2,1)
runTime.value()
pRes.append(solver.pressureRes)
uRes.append(solver.velocityRes)
pResTpl = tuple(pRes)
uResTpl = tuple(uRes)
# Update residual plot
g.axis_configure("y",logscale = 1)
g.element_configure("p-Residual", xdata=tuple(it), ydata = pResTpl,
color = "red", dashes = 0,
symbol = "", linewidth = 1)
g.element_configure("u-Residual", xdata=tuple(it), ydata = uResTpl,
color = "blue", dashes = 0,
symbol = "", linewidth = 1)
master.update_idletasks()
iteration[0] += niter
iterate(10)
runTime.writeNow()
#--------------------------------------------------------------------------------------
| gpl-3.0 |
wangyum/mxnet | python/mxnet/gluon/rnn/rnn_layer.py | 8 | 23101 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=no-member, invalid-name, protected-access, no-self-use
# pylint: disable=too-many-branches, too-many-arguments, no-self-use
# pylint: disable=too-many-lines, arguments-differ
"""Definition of various recurrent neural network layers."""
from __future__ import print_function
__all__ = ['RNN', 'LSTM', 'GRU']
from ... import ndarray
from .. import Block
from . import rnn_cell
class _RNNLayer(Block):
"""Implementation of recurrent layers."""
def __init__(self, hidden_size, num_layers, layout,
dropout, bidirectional, input_size,
i2h_weight_initializer, h2h_weight_initializer,
i2h_bias_initializer, h2h_bias_initializer,
mode, **kwargs):
super(_RNNLayer, self).__init__(**kwargs)
assert layout == 'TNC' or layout == 'NTC', \
"Invalid layout %s; must be one of ['TNC' or 'NTC']"%layout
self._hidden_size = hidden_size
self._num_layers = num_layers
self._mode = mode
self._layout = layout
self._dropout = dropout
self._dir = 2 if bidirectional else 1
self._input_size = input_size
self._i2h_weight_initializer = i2h_weight_initializer
self._h2h_weight_initializer = h2h_weight_initializer
self._i2h_bias_initializer = i2h_bias_initializer
self._h2h_bias_initializer = h2h_bias_initializer
self._gates = {'rnn_relu': 1, 'rnn_tanh': 1, 'lstm': 4, 'gru': 3}[mode]
self.i2h_weight = []
self.h2h_weight = []
self.i2h_bias = []
self.h2h_bias = []
ng, ni, nh = self._gates, input_size, hidden_size
for i in range(num_layers):
for j in (['l', 'r'] if self._dir == 2 else ['l']):
self.i2h_weight.append(
self.params.get('%s%d_i2h_weight'%(j, i), shape=(ng*nh, ni),
init=i2h_weight_initializer,
allow_deferred_init=True))
self.h2h_weight.append(
self.params.get('%s%d_h2h_weight'%(j, i), shape=(ng*nh, nh),
init=h2h_weight_initializer,
allow_deferred_init=True))
self.i2h_bias.append(
self.params.get('%s%d_i2h_bias'%(j, i), shape=(ng*nh,),
init=i2h_bias_initializer,
allow_deferred_init=True))
self.h2h_bias.append(
self.params.get('%s%d_h2h_bias'%(j, i), shape=(ng*nh,),
init=h2h_bias_initializer,
allow_deferred_init=True))
ni = nh * self._dir
self._unfused = self._unfuse()
def __repr__(self):
s = '{name}({mapping}, {_layout}'
if self._num_layers != 1:
s += ', num_layers={_num_layers}'
if self._dropout != 0:
s += ', dropout={_dropout}'
if self._dir == 2:
s += ', bidirectional'
s += ')'
mapping = ('{_input_size} -> {_hidden_size}'.format(**self.__dict__) if self._input_size
else self._hidden_size)
return s.format(name=self.__class__.__name__,
mapping=mapping,
**self.__dict__)
def state_info(self, batch_size=0):
raise NotImplementedError
def _unfuse(self):
"""Unfuses the fused RNN in to a stack of rnn cells."""
get_cell = {'rnn_relu': lambda **kwargs: rnn_cell.RNNCell(self._hidden_size,
activation='relu',
**kwargs),
'rnn_tanh': lambda **kwargs: rnn_cell.RNNCell(self._hidden_size,
activation='tanh',
**kwargs),
'lstm': lambda **kwargs: rnn_cell.LSTMCell(self._hidden_size,
**kwargs),
'gru': lambda **kwargs: rnn_cell.GRUCell(self._hidden_size,
**kwargs)}[self._mode]
stack = rnn_cell.SequentialRNNCell(prefix=self.prefix, params=self.params)
with stack.name_scope():
ni = self._input_size
for i in range(self._num_layers):
kwargs = {'input_size': ni,
'i2h_weight_initializer': self._i2h_weight_initializer,
'h2h_weight_initializer': self._h2h_weight_initializer,
'i2h_bias_initializer': self._i2h_bias_initializer,
'h2h_bias_initializer': self._h2h_bias_initializer}
if self._dir == 2:
stack.add(rnn_cell.BidirectionalCell(
get_cell(prefix='l%d_'%i, **kwargs),
get_cell(prefix='r%d_'%i, **kwargs)))
else:
stack.add(get_cell(prefix='l%d_'%i, **kwargs))
if self._dropout > 0 and i != self._num_layers - 1:
stack.add(rnn_cell.DropoutCell(self._dropout))
ni = self._hidden_size * self._dir
return stack
def begin_state(self, batch_size=0, func=ndarray.zeros, **kwargs):
"""Initial state for this cell.
Parameters
----------
batch_size: int
Only required for `NDArray` API. Size of the batch ('N' in layout).
Dimension of the input.
func : callable, default `ndarray.zeros`
Function for creating initial state.
For Symbol API, func can be `symbol.zeros`, `symbol.uniform`,
`symbol.var` etc. Use `symbol.var` if you want to directly
feed input as states.
For NDArray API, func can be `ndarray.zeros`, `ndarray.ones`, etc.
**kwargs :
Additional keyword arguments passed to func. For example
`mean`, `std`, `dtype`, etc.
Returns
-------
states : nested list of Symbol
Starting states for the first RNN step.
"""
states = []
for i, info in enumerate(self.state_info(batch_size)):
if info is not None:
info.update(kwargs)
else:
info = kwargs
states.append(func(name='%sh0_%d'%(self.prefix, i), **info))
return states
def forward(self, inputs, states=None):
batch_size = inputs.shape[self._layout.find('N')]
skip_states = states is None
if skip_states:
states = self.begin_state(batch_size, ctx=inputs.context)
if isinstance(states, ndarray.NDArray):
states = [states]
for state, info in zip(states, self.state_info(batch_size)):
if state.shape != info['shape']:
raise ValueError(
"Invalid recurrent state shape. Expecting %s, got %s."%(
str(info['shape']), str(state.shape)))
if self._input_size == 0:
for i in range(self._dir):
self.i2h_weight[i].shape = (self._gates*self._hidden_size, inputs.shape[2])
self.i2h_weight[i]._finish_deferred_init()
if inputs.context.device_type == 'gpu':
out = self._forward_gpu(inputs, states)
else:
out = self._forward_cpu(inputs, states)
# out is (output, state)
return out[0] if skip_states else out
def _forward_cpu(self, inputs, states):
ns = len(states)
axis = self._layout.find('T')
states = sum(zip(*((j for j in i) for i in states)), ())
outputs, states = self._unfused.unroll(
inputs.shape[axis], inputs, states,
layout=self._layout, merge_outputs=True)
new_states = []
for i in range(ns):
state = ndarray.concat(*(j.reshape((1,)+j.shape) for j in states[i::ns]), dim=0)
new_states.append(state)
return outputs, new_states
def _forward_gpu(self, inputs, states):
if self._layout == 'NTC':
inputs = ndarray.swapaxes(inputs, dim1=0, dim2=1)
ctx = inputs.context
params = sum(zip(self.i2h_weight, self.h2h_weight), ())
params += sum(zip(self.i2h_bias, self.h2h_bias), ())
params = (i.data(ctx).reshape((-1,)) for i in params)
params = ndarray.concat(*params, dim=0)
rnn = ndarray.RNN(inputs, params, *states, state_size=self._hidden_size,
num_layers=self._num_layers, bidirectional=self._dir == 2,
p=self._dropout, state_outputs=True, mode=self._mode)
if self._mode == 'lstm':
outputs, states = rnn[0], [rnn[1], rnn[2]]
else:
outputs, states = rnn[0], [rnn[1]]
if self._layout == 'NTC':
outputs = ndarray.swapaxes(outputs, dim1=0, dim2=1)
return outputs, states
class RNN(_RNNLayer):
r"""Applies a multi-layer Elman RNN with `tanh` or `ReLU` non-linearity to an input sequence.
For each element in the input sequence, each layer computes the following
function:
.. math::
h_t = \tanh(w_{ih} * x_t + b_{ih} + w_{hh} * h_{(t-1)} + b_{hh})
where :math:`h_t` is the hidden state at time `t`, and :math:`x_t` is the hidden
state of the previous layer at time `t` or :math:`input_t` for the first layer.
If nonlinearity='relu', then `ReLU` is used instead of `tanh`.
Parameters
----------
hidden_size: int
The number of features in the hidden state h.
num_layers: int, default 1
Number of recurrent layers.
activation: {'relu' or 'tanh'}, default 'tanh'
The activation function to use.
layout : str, default 'TNC'
The format of input and output tensors. T, N and C stand for
sequence length, batch size, and feature dimensions respectively.
dropout: float, default 0
If non-zero, introduces a dropout layer on the outputs of each
RNN layer except the last layer.
bidirectional: bool, default False
If `True`, becomes a bidirectional RNN.
i2h_weight_initializer : str or Initializer
Initializer for the input weights matrix, used for the linear
transformation of the inputs.
h2h_weight_initializer : str or Initializer
Initializer for the recurrent weights matrix, used for the linear
transformation of the recurrent state.
i2h_bias_initializer : str or Initializer
Initializer for the bias vector.
h2h_bias_initializer : str or Initializer
Initializer for the bias vector.
input_size: int, default 0
The number of expected features in the input x.
If not specified, it will be inferred from input.
prefix : str or None
Prefix of this `Block`.
params : ParameterDict or None
Shared Parameters for this `Block`.
Inputs:
- **data**: input tensor with shape `(sequence_length, batch_size, input_size)`
when `layout` is "TNC". For other layouts dimensions are permuted accordingly.
- **states**: initial recurrent state tensor with shape
`(num_layers, batch_size, num_hidden)`. If `bidirectional` is True,
shape will instead be `(2*num_layers, batch_size, num_hidden)`. If
`states` is None, zeros will be used as default begin states.
Outputs:
- **out**: output tensor with shape `(sequence_length, batch_size, num_hidden)`
when `layout` is "TNC". If `bidirectional` is True, output shape will instead
be `(sequence_length, batch_size, 2*num_hidden)`
- **out_states**: output recurrent state tensor with the same shape as `states`.
If `states` is None `out_states` will not be returned.
Examples
--------
>>> layer = mx.gluon.rnn.RNN(100, 3)
>>> layer.initialize()
>>> input = mx.nd.random_uniform(shape=(5, 3, 10))
>>> # by default zeros are used as begin state
>>> output = layer(input)
>>> # manually specify begin state.
>>> h0 = mx.nd.random_uniform(shape=(3, 3, 100))
>>> output, hn = layer(input, h0)
"""
def __init__(self, hidden_size, num_layers=1, activation='relu',
layout='TNC', dropout=0, bidirectional=False,
i2h_weight_initializer=None, h2h_weight_initializer=None,
i2h_bias_initializer='zeros', h2h_bias_initializer='zeros',
input_size=0, **kwargs):
super(RNN, self).__init__(hidden_size, num_layers, layout,
dropout, bidirectional, input_size,
i2h_weight_initializer, h2h_weight_initializer,
i2h_bias_initializer, h2h_bias_initializer,
'rnn_'+activation, **kwargs)
def state_info(self, batch_size=0):
return [{'shape': (self._num_layers * self._dir, batch_size, self._hidden_size),
'__layout__': 'LNC'}]
class LSTM(_RNNLayer):
r"""Applies a multi-layer long short-term memory (LSTM) RNN to an input sequence.
For each element in the input sequence, each layer computes the following
function:
.. math::
\begin{array}{ll}
i_t = sigmoid(W_{ii} x_t + b_{ii} + W_{hi} h_{(t-1)} + b_{hi}) \\
f_t = sigmoid(W_{if} x_t + b_{if} + W_{hf} h_{(t-1)} + b_{hf}) \\
g_t = \tanh(W_{ig} x_t + b_{ig} + W_{hc} h_{(t-1)} + b_{hg}) \\
o_t = sigmoid(W_{io} x_t + b_{io} + W_{ho} h_{(t-1)} + b_{ho}) \\
c_t = f_t * c_{(t-1)} + i_t * g_t \\
h_t = o_t * \tanh(c_t)
\end{array}
where :math:`h_t` is the hidden state at time `t`, :math:`c_t` is the
cell state at time `t`, :math:`x_t` is the hidden state of the previous
layer at time `t` or :math:`input_t` for the first layer, and :math:`i_t`,
:math:`f_t`, :math:`g_t`, :math:`o_t` are the input, forget, cell, and
out gates, respectively.
Parameters
----------
hidden_size: int
The number of features in the hidden state h.
num_layers: int, default 1
Number of recurrent layers.
layout : str, default 'TNC'
The format of input and output tensors. T, N and C stand for
sequence length, batch size, and feature dimensions respectively.
dropout: float, default 0
If non-zero, introduces a dropout layer on the outputs of each
RNN layer except the last layer.
bidirectional: bool, default False
If `True`, becomes a bidirectional RNN.
i2h_weight_initializer : str or Initializer
Initializer for the input weights matrix, used for the linear
transformation of the inputs.
h2h_weight_initializer : str or Initializer
Initializer for the recurrent weights matrix, used for the linear
transformation of the recurrent state.
i2h_bias_initializer : str or Initializer, default 'lstmbias'
Initializer for the bias vector. By default, bias for the forget
gate is initialized to 1 while all other biases are initialized
to zero.
h2h_bias_initializer : str or Initializer
Initializer for the bias vector.
input_size: int, default 0
The number of expected features in the input x.
If not specified, it will be inferred from input.
prefix : str or None
Prefix of this `Block`.
params : `ParameterDict` or `None`
Shared Parameters for this `Block`.
Inputs:
- **data**: input tensor with shape `(sequence_length, batch_size, input_size)`
when `layout` is "TNC". For other layouts dimensions are permuted accordingly.
- **states**: a list of two initial recurrent state tensors. Each has shape
`(num_layers, batch_size, num_hidden)`. If `bidirectional` is True,
shape will instead be `(2*num_layers, batch_size, num_hidden)`. If
`states` is None, zeros will be used as default begin states.
Outputs:
- **out**: output tensor with shape `(sequence_length, batch_size, num_hidden)`
when `layout` is "TNC". If `bidirectional` is True, output shape will instead
be `(sequence_length, batch_size, 2*num_hidden)`
- **out_states**: a list of two output recurrent state tensors with the same
shape as in `states`. If `states` is None `out_states` will not be returned.
Examples
--------
>>> layer = mx.gluon.rnn.LSTM(100, 3)
>>> layer.initialize()
>>> input = mx.nd.random_uniform(shape=(5, 3, 10))
>>> # by default zeros are used as begin state
>>> output = layer(input)
>>> # manually specify begin state.
>>> h0 = mx.nd.random_uniform(shape=(3, 3, 100))
>>> c0 = mx.nd.random_uniform(shape=(3, 3, 100))
>>> output, hn = layer(input, [h0, c0])
"""
def __init__(self, hidden_size, num_layers=1, layout='TNC',
dropout=0, bidirectional=False, input_size=0,
i2h_weight_initializer=None, h2h_weight_initializer=None,
i2h_bias_initializer='zeros', h2h_bias_initializer='zeros',
**kwargs):
super(LSTM, self).__init__(hidden_size, num_layers, layout,
dropout, bidirectional, input_size,
i2h_weight_initializer, h2h_weight_initializer,
i2h_bias_initializer, h2h_bias_initializer,
'lstm', **kwargs)
def state_info(self, batch_size=0):
return [{'shape': (self._num_layers * self._dir, batch_size, self._hidden_size),
'__layout__': 'LNC'},
{'shape': (self._num_layers * self._dir, batch_size, self._hidden_size),
'__layout__': 'LNC'}]
class GRU(_RNNLayer):
r"""Applies a multi-layer gated recurrent unit (GRU) RNN to an input sequence.
For each element in the input sequence, each layer computes the following
function:
.. math::
\begin{array}{ll}
r_t = sigmoid(W_{ir} x_t + b_{ir} + W_{hr} h_{(t-1)} + b_{hr}) \\
i_t = sigmoid(W_{ii} x_t + b_{ii} + W_hi h_{(t-1)} + b_{hi}) \\
n_t = \tanh(W_{in} x_t + b_{in} + r_t * (W_{hn} h_{(t-1)}+ b_{hn})) \\
h_t = (1 - i_t) * n_t + i_t * h_{(t-1)} \\
\end{array}
where :math:`h_t` is the hidden state at time `t`, :math:`x_t` is the hidden
state of the previous layer at time `t` or :math:`input_t` for the first layer,
and :math:`r_t`, :math:`i_t`, :math:`n_t` are the reset, input, and new gates, respectively.
Parameters
----------
hidden_size: int
The number of features in the hidden state h
num_layers: int, default 1
Number of recurrent layers.
layout : str, default 'TNC'
The format of input and output tensors. T, N and C stand for
sequence length, batch size, and feature dimensions respectively.
dropout: float, default 0
If non-zero, introduces a dropout layer on the outputs of each
RNN layer except the last layer
bidirectional: bool, default False
If True, becomes a bidirectional RNN.
i2h_weight_initializer : str or Initializer
Initializer for the input weights matrix, used for the linear
transformation of the inputs.
h2h_weight_initializer : str or Initializer
Initializer for the recurrent weights matrix, used for the linear
transformation of the recurrent state.
i2h_bias_initializer : str or Initializer
Initializer for the bias vector.
h2h_bias_initializer : str or Initializer
Initializer for the bias vector.
input_size: int, default 0
The number of expected features in the input x.
If not specified, it will be inferred from input.
prefix : str or None
Prefix of this `Block`.
params : ParameterDict or None
Shared Parameters for this `Block`.
Inputs:
- **data**: input tensor with shape `(sequence_length, batch_size, input_size)`
when `layout` is "TNC". For other layouts dimensions are permuted accordingly.
- **states**: initial recurrent state tensor with shape
`(num_layers, batch_size, num_hidden)`. If `bidirectional` is True,
shape will instead be `(2*num_layers, batch_size, num_hidden)`. If
`states` is None, zeros will be used as default begin states.
Outputs:
- **out**: output tensor with shape `(sequence_length, batch_size, num_hidden)`
when `layout` is "TNC". If `bidirectional` is True, output shape will instead
be `(sequence_length, batch_size, 2*num_hidden)`
- **out_states**: output recurrent state tensor with the same shape as `states`.
If `states` is None `out_states` will not be returned.
Examples
--------
>>> layer = mx.gluon.rnn.GRU(100, 3)
>>> layer.initialize()
>>> input = mx.nd.random_uniform(shape=(5, 3, 10))
>>> # by default zeros are used as begin state
>>> output = layer(input)
>>> # manually specify begin state.
>>> h0 = mx.nd.random_uniform(shape=(3, 3, 100))
>>> output, hn = layer(input, h0)
"""
def __init__(self, hidden_size, num_layers=1, layout='TNC',
dropout=0, bidirectional=False, input_size=0,
i2h_weight_initializer=None, h2h_weight_initializer=None,
i2h_bias_initializer='zeros', h2h_bias_initializer='zeros',
**kwargs):
super(GRU, self).__init__(hidden_size, num_layers, layout,
dropout, bidirectional, input_size,
i2h_weight_initializer, h2h_weight_initializer,
i2h_bias_initializer, h2h_bias_initializer,
'gru', **kwargs)
def state_info(self, batch_size=0):
return [{'shape': (self._num_layers * self._dir, batch_size, self._hidden_size),
'__layout__': 'LNC'}]
| apache-2.0 |
gangadharkadam/letzfrappe | frappe/desk/report_dump.py | 39 | 2844 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import json
import copy
@frappe.whitelist()
def get_data(doctypes, last_modified):
data_map = {}
for dump_report_map in frappe.get_hooks().dump_report_map:
data_map.update(frappe.get_attr(dump_report_map))
import datetime
out = {}
doctypes = json.loads(doctypes)
last_modified = json.loads(last_modified)
start = datetime.datetime.now()
for d in doctypes:
args = copy.deepcopy(data_map[d])
dt = d.find("[") != -1 and d[:d.find("[")] or d
out[dt] = {}
if args.get("from"):
modified_table = "item."
else:
modified_table = ""
conditions = order_by = ""
table = args.get("from") or ("`tab%s`" % dt)
if d in last_modified:
if not args.get("conditions"):
args['conditions'] = []
args['conditions'].append(modified_table + "modified > '" + last_modified[d] + "'")
out[dt]["modified_names"] = frappe.db.sql_list("""select %sname from %s
where %smodified > %s""" % (modified_table, table, modified_table, "%s"), last_modified[d])
if args.get("force_index"):
conditions = " force index (%s) " % args["force_index"]
if args.get("conditions"):
conditions += " where " + " and ".join(args["conditions"])
if args.get("order_by"):
order_by = " order by " + args["order_by"]
out[dt]["data"] = [list(t) for t in frappe.db.sql("""select %s from %s %s %s""" \
% (",".join(args["columns"]), table, conditions, order_by))]
# last modified
modified_table = table
if "," in table:
modified_table = " ".join(table.split(",")[0].split(" ")[:-1])
tmp = frappe.db.sql("""select `modified`
from %s order by modified desc limit 1""" % modified_table)
out[dt]["last_modified"] = tmp and tmp[0][0] or ""
out[dt]["columns"] = map(lambda c: c.split(" as ")[-1], args["columns"])
if args.get("links"):
out[dt]["links"] = args["links"]
for d in out:
unused_links = []
# only compress full dumps (not partial)
if out[d].get("links") and (d not in last_modified):
for link_key in out[d]["links"]:
link = out[d]["links"][link_key]
if link[0] in out and (link[0] not in last_modified):
# make a map of link ids
# to index
link_map = {}
doctype_data = out[link[0]]
col_idx = doctype_data["columns"].index(link[1])
for row_idx in xrange(len(doctype_data["data"])):
row = doctype_data["data"][row_idx]
link_map[row[col_idx]] = row_idx
for row in out[d]["data"]:
col_idx = out[d]["columns"].index(link_key)
# replace by id
if row[col_idx]:
row[col_idx] = link_map.get(row[col_idx])
else:
unused_links.append(link_key)
for link in unused_links:
del out[d]["links"][link]
return out
| mit |
NixaSoftware/CVis | venv/lib/python2.7/site-packages/numpy/doc/constants.py | 55 | 8954 | """
=========
Constants
=========
NumPy includes several constants:
%(constant_list)s
"""
#
# Note: the docstring is autogenerated.
#
from __future__ import division, absolute_import, print_function
import textwrap, re
# Maintain same format as in numpy.add_newdocs
constants = []
def add_newdoc(module, name, doc):
constants.append((name, doc))
add_newdoc('numpy', 'Inf',
"""
IEEE 754 floating point representation of (positive) infinity.
Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
`inf`. For more details, see `inf`.
See Also
--------
inf
""")
add_newdoc('numpy', 'Infinity',
"""
IEEE 754 floating point representation of (positive) infinity.
Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
`inf`. For more details, see `inf`.
See Also
--------
inf
""")
add_newdoc('numpy', 'NAN',
"""
IEEE 754 floating point representation of Not a Number (NaN).
`NaN` and `NAN` are equivalent definitions of `nan`. Please use
`nan` instead of `NAN`.
See Also
--------
nan
""")
add_newdoc('numpy', 'NINF',
"""
IEEE 754 floating point representation of negative infinity.
Returns
-------
y : float
A floating point representation of negative infinity.
See Also
--------
isinf : Shows which elements are positive or negative infinity
isposinf : Shows which elements are positive infinity
isneginf : Shows which elements are negative infinity
isnan : Shows which elements are Not a Number
isfinite : Shows which elements are finite (not one of Not a Number,
positive infinity and negative infinity)
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive infinity.
Examples
--------
>>> np.NINF
-inf
>>> np.log(0)
-inf
""")
add_newdoc('numpy', 'NZERO',
"""
IEEE 754 floating point representation of negative zero.
Returns
-------
y : float
A floating point representation of negative zero.
See Also
--------
PZERO : Defines positive zero.
isinf : Shows which elements are positive or negative infinity.
isposinf : Shows which elements are positive infinity.
isneginf : Shows which elements are negative infinity.
isnan : Shows which elements are Not a Number.
isfinite : Shows which elements are finite - not one of
Not a Number, positive infinity and negative infinity.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). Negative zero is considered to be a finite number.
Examples
--------
>>> np.NZERO
-0.0
>>> np.PZERO
0.0
>>> np.isfinite([np.NZERO])
array([ True], dtype=bool)
>>> np.isnan([np.NZERO])
array([False], dtype=bool)
>>> np.isinf([np.NZERO])
array([False], dtype=bool)
""")
add_newdoc('numpy', 'NaN',
"""
IEEE 754 floating point representation of Not a Number (NaN).
`NaN` and `NAN` are equivalent definitions of `nan`. Please use
`nan` instead of `NaN`.
See Also
--------
nan
""")
add_newdoc('numpy', 'PINF',
"""
IEEE 754 floating point representation of (positive) infinity.
Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
`inf`. For more details, see `inf`.
See Also
--------
inf
""")
add_newdoc('numpy', 'PZERO',
"""
IEEE 754 floating point representation of positive zero.
Returns
-------
y : float
A floating point representation of positive zero.
See Also
--------
NZERO : Defines negative zero.
isinf : Shows which elements are positive or negative infinity.
isposinf : Shows which elements are positive infinity.
isneginf : Shows which elements are negative infinity.
isnan : Shows which elements are Not a Number.
isfinite : Shows which elements are finite - not one of
Not a Number, positive infinity and negative infinity.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). Positive zero is considered to be a finite number.
Examples
--------
>>> np.PZERO
0.0
>>> np.NZERO
-0.0
>>> np.isfinite([np.PZERO])
array([ True], dtype=bool)
>>> np.isnan([np.PZERO])
array([False], dtype=bool)
>>> np.isinf([np.PZERO])
array([False], dtype=bool)
""")
add_newdoc('numpy', 'e',
"""
Euler's constant, base of natural logarithms, Napier's constant.
``e = 2.71828182845904523536028747135266249775724709369995...``
See Also
--------
exp : Exponential function
log : Natural logarithm
References
----------
.. [1] http://en.wikipedia.org/wiki/Napier_constant
""")
add_newdoc('numpy', 'inf',
"""
IEEE 754 floating point representation of (positive) infinity.
Returns
-------
y : float
A floating point representation of positive infinity.
See Also
--------
isinf : Shows which elements are positive or negative infinity
isposinf : Shows which elements are positive infinity
isneginf : Shows which elements are negative infinity
isnan : Shows which elements are Not a Number
isfinite : Shows which elements are finite (not one of Not a Number,
positive infinity and negative infinity)
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive infinity.
`Inf`, `Infinity`, `PINF` and `infty` are aliases for `inf`.
Examples
--------
>>> np.inf
inf
>>> np.array([1]) / 0.
array([ Inf])
""")
add_newdoc('numpy', 'infty',
"""
IEEE 754 floating point representation of (positive) infinity.
Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
`inf`. For more details, see `inf`.
See Also
--------
inf
""")
add_newdoc('numpy', 'nan',
"""
IEEE 754 floating point representation of Not a Number (NaN).
Returns
-------
y : A floating point representation of Not a Number.
See Also
--------
isnan : Shows which elements are Not a Number.
isfinite : Shows which elements are finite (not one of
Not a Number, positive infinity and negative infinity)
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
`NaN` and `NAN` are aliases of `nan`.
Examples
--------
>>> np.nan
nan
>>> np.log(-1)
nan
>>> np.log([-1, 1, 2])
array([ NaN, 0. , 0.69314718])
""")
add_newdoc('numpy', 'newaxis',
"""
A convenient alias for None, useful for indexing arrays.
See Also
--------
`numpy.doc.indexing`
Examples
--------
>>> newaxis is None
True
>>> x = np.arange(3)
>>> x
array([0, 1, 2])
>>> x[:, newaxis]
array([[0],
[1],
[2]])
>>> x[:, newaxis, newaxis]
array([[[0]],
[[1]],
[[2]]])
>>> x[:, newaxis] * x
array([[0, 0, 0],
[0, 1, 2],
[0, 2, 4]])
Outer product, same as ``outer(x, y)``:
>>> y = np.arange(3, 6)
>>> x[:, newaxis] * y
array([[ 0, 0, 0],
[ 3, 4, 5],
[ 6, 8, 10]])
``x[newaxis, :]`` is equivalent to ``x[newaxis]`` and ``x[None]``:
>>> x[newaxis, :].shape
(1, 3)
>>> x[newaxis].shape
(1, 3)
>>> x[None].shape
(1, 3)
>>> x[:, newaxis].shape
(3, 1)
""")
if __doc__:
constants_str = []
constants.sort()
for name, doc in constants:
s = textwrap.dedent(doc).replace("\n", "\n ")
# Replace sections by rubrics
lines = s.split("\n")
new_lines = []
for line in lines:
m = re.match(r'^(\s+)[-=]+\s*$', line)
if m and new_lines:
prev = textwrap.dedent(new_lines.pop())
new_lines.append('%s.. rubric:: %s' % (m.group(1), prev))
new_lines.append('')
else:
new_lines.append(line)
s = "\n".join(new_lines)
# Done.
constants_str.append(""".. const:: %s\n %s""" % (name, s))
constants_str = "\n".join(constants_str)
__doc__ = __doc__ % dict(constant_list=constants_str)
del constants_str, name, doc
del line, lines, new_lines, m, s, prev
del constants, add_newdoc
| apache-2.0 |
TouK/vumi | vumi/application/tests/helpers.py | 4 | 3399 | # -*- test-case-name: vumi.application.tests.test_test_helpers -*-
import os
from twisted.internet.defer import inlineCallbacks
from twisted.trial.unittest import SkipTest
from zope.interface import implements
from vumi.tests.helpers import (
MessageHelper, WorkerHelper, MessageDispatchHelper, PersistenceHelper,
generate_proxies, IHelper,
)
class ApplicationHelper(object):
"""
Test helper for application workers.
This helper construct and wraps several lower-level helpers and provides
higher-level functionality for app worker tests.
:param application_class:
The worker class for the application being tested.
:param bool use_riak:
Set to ``True`` if the test requires Riak. This is passed to the
underlying :class:`~vumi.tests.helpers.PersistenceHelper`.
:param \**msg_helper_args:
All other keyword params are passed to the underlying
:class:`~vumi.tests.helpers.MessageHelper`.
"""
implements(IHelper)
def __init__(self, application_class, use_riak=False, **msg_helper_args):
self.application_class = application_class
self.persistence_helper = PersistenceHelper(use_riak=use_riak)
self.msg_helper = MessageHelper(**msg_helper_args)
self.transport_name = self.msg_helper.transport_name
self.worker_helper = WorkerHelper(self.msg_helper.transport_name)
self.dispatch_helper = MessageDispatchHelper(
self.msg_helper, self.worker_helper)
# Proxy methods from our helpers.
generate_proxies(self, self.msg_helper)
generate_proxies(self, self.worker_helper)
generate_proxies(self, self.dispatch_helper)
generate_proxies(self, self.persistence_helper)
def setup(self):
self.persistence_helper.setup()
self.worker_helper.setup()
@inlineCallbacks
def cleanup(self):
yield self.worker_helper.cleanup()
yield self.persistence_helper.cleanup()
def get_application(self, config, cls=None, start=True):
"""
Get an instance of a worker class.
:param config: Config dict.
:param cls: The Application class to instantiate.
Defaults to :attr:`application_class`
:param start: True to start the application (default), False otherwise.
Some default config values are helpfully provided in the
interests of reducing boilerplate:
* ``transport_name`` defaults to :attr:`self.transport_name`
"""
if cls is None:
cls = self.application_class
config = self.mk_config(config)
config.setdefault('transport_name', self.msg_helper.transport_name)
return self.get_worker(cls, config, start)
def find_nodejs_or_skip_test(worker_class):
"""
Find the node.js executable by checking the ``VUMI_TEST_NODE_PATH`` envvar
and falling back to the provided worker's own detection method. If no
executable is found, :class:`SkipTest` is raised.
"""
path = os.environ.get('VUMI_TEST_NODE_PATH')
if path is not None:
if os.path.isfile(path):
return path
raise RuntimeError(
"VUMI_TEST_NODE_PATH specified, but does not exist: %s" % (path,))
path = worker_class.find_nodejs()
if path is None:
raise SkipTest("No node.js executable found.")
return path
| bsd-3-clause |
rosmo/ansible | test/units/modules/cloud/xenserver/FakeXenAPI.py | 94 | 1996 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2019, Bojan Vitnik <bvitnik@mainstream.rs>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
FAKE_API_VERSION = "1.1"
class Failure(Exception):
def __init__(self, details):
self.details = details
def __str__(self):
return str(self.details)
class Session(object):
def __init__(self, uri, transport=None, encoding=None, verbose=0,
allow_none=1, ignore_ssl=False):
self.transport = transport
self._session = None
self.last_login_method = None
self.last_login_params = None
self.API_version = FAKE_API_VERSION
def _get_api_version(self):
return FAKE_API_VERSION
def _login(self, method, params):
self._session = "OpaqueRef:fake-xenapi-session-ref"
self.last_login_method = method
self.last_login_params = params
self.API_version = self._get_api_version()
def _logout(self):
self._session = None
self.last_login_method = None
self.last_login_params = None
self.API_version = FAKE_API_VERSION
def xenapi_request(self, methodname, params):
if methodname.startswith('login'):
self._login(methodname, params)
return None
elif methodname == 'logout' or methodname == 'session.logout':
self._logout()
return None
else:
# Should be patched with mocker.patch().
return None
def __getattr__(self, name):
if name == 'handle':
return self._session
elif name == 'xenapi':
# Should be patched with mocker.patch().
return None
elif name.startswith('login') or name.startswith('slave_local'):
return lambda *params: self._login(name, params)
elif name == 'logout':
return self._logout
def xapi_local():
return Session("http://_var_lib_xcp_xapi/")
| gpl-3.0 |
laxdog/poloniexlendingbot | modules/MaxToLend.py | 2 | 2884 | # coding=utf-8
from decimal import Decimal
coin_cfg = []
max_to_lend_rate = 0
max_to_lend = 0
max_percent_to_lend = 0
min_loan_size = 0.001
log = None
def init(config, log1):
global coin_cfg, max_to_lend_rate, max_to_lend, max_percent_to_lend, min_loan_size, log
coin_cfg = coin_cfg = config.get_coin_cfg()
max_to_lend = Decimal(config.get('BOT', 'maxtolend', False, 0))
max_percent_to_lend = Decimal(config.get('BOT', 'maxpercenttolend', False, 0, 100)) / 100
max_to_lend_rate = Decimal(config.get('BOT', 'maxtolendrate', False, 0.003, 5)) / 100
min_loan_size = Decimal(config.get("BOT", 'minloansize', None, 0.001))
log = log1
def amount_to_lend(active_cur_test_balance, active_cur, lending_balance, low_rate):
restrict_lend = False
active_bal = Decimal(0)
log_data = str("")
cur_max_to_lend_rate = max_to_lend_rate
cur_max_to_lend = max_to_lend
cur_max_percent_to_lend = max_percent_to_lend
if active_cur in coin_cfg:
cur_max_to_lend_rate = coin_cfg[active_cur]['maxtolendrate']
cur_max_to_lend = coin_cfg[active_cur]['maxtolend']
cur_max_percent_to_lend = coin_cfg[active_cur]['maxpercenttolend']
if cur_max_to_lend_rate == 0 and low_rate > 0 or cur_max_to_lend_rate >= low_rate > 0:
log_data = ("The Lower Rate found on " + active_cur + " is " + str(
"%.4f" % (Decimal(low_rate) * 100)) + "% vs conditional rate " + str(
"%.4f" % (Decimal(cur_max_to_lend_rate) * 100)) + "%. ")
restrict_lend = True
if cur_max_to_lend != 0 and restrict_lend:
log.updateStatusValue(active_cur, "maxToLend", cur_max_to_lend)
if lending_balance > (active_cur_test_balance - cur_max_to_lend):
active_bal = (lending_balance - (active_cur_test_balance - cur_max_to_lend))
if cur_max_to_lend == 0 and cur_max_percent_to_lend != 0 and restrict_lend:
log.updateStatusValue(active_cur, "maxToLend", (cur_max_percent_to_lend * active_cur_test_balance))
if lending_balance > (active_cur_test_balance - (cur_max_percent_to_lend * active_cur_test_balance)):
active_bal = (lending_balance - (active_cur_test_balance - (
cur_max_percent_to_lend * active_cur_test_balance)))
if cur_max_to_lend == 0 and cur_max_percent_to_lend == 0:
log.updateStatusValue(active_cur, "maxToLend", active_cur_test_balance)
active_bal = lending_balance
if not restrict_lend:
log.updateStatusValue(active_cur, "maxToLend", active_cur_test_balance)
active_bal = lending_balance
if (lending_balance - active_bal) < min_loan_size:
active_bal = lending_balance
if active_bal < lending_balance:
log.log(log_data + " Lending " + str("%.8f" % Decimal(active_bal)) + " of " + str(
"%.8f" % Decimal(lending_balance)) + " Available")
return active_bal
| mit |
sujithshankar/anaconda | pyanaconda/ui/tui/spokes/__init__.py | 1 | 12990 | # The base classes for Anaconda TUI Spokes
#
# Copyright (C) (2012) Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from pyanaconda.ui.tui import simpleline as tui
from pyanaconda.ui.tui.tuiobject import TUIObject, YesNoDialog
from pyanaconda.ui.common import Spoke, StandaloneSpoke, NormalSpoke
from pyanaconda.users import validatePassword, cryptPassword
import re
from collections import namedtuple
from pyanaconda.iutil import setdeepattr, getdeepattr
from pyanaconda.i18n import N_, _
from pyanaconda.constants import PASSWORD_CONFIRM_ERROR_TUI, PW_ASCII_CHARS
from pyanaconda.constants import PASSWORD_WEAK, PASSWORD_WEAK_WITH_ERROR
__all__ = ["TUISpoke", "EditTUISpoke", "EditTUIDialog", "EditTUISpokeEntry",
"StandaloneSpoke", "NormalTUISpoke"]
# Inherit abstract methods from Spoke
# pylint: disable=abstract-method
class TUISpoke(TUIObject, tui.Widget, Spoke):
"""Base TUI Spoke class implementing the pyanaconda.ui.common.Spoke API.
It also acts as a Widget so we can easily add it to Hub, where is shows
as a summary box with title, description and completed checkbox.
:param title: title of this spoke
:type title: str
:param category: category this spoke belongs to
:type category: string
.. inheritance-diagram:: TUISpoke
:parts: 3
"""
title = N_("Default spoke title")
def __init__(self, app, data, storage, payload, instclass):
if self.__class__ is TUISpoke:
raise TypeError("TUISpoke is an abstract class")
TUIObject.__init__(self, app, data)
tui.Widget.__init__(self)
Spoke.__init__(self, storage, payload, instclass)
@property
def status(self):
return _("testing status...")
@property
def completed(self):
return True
def refresh(self, args=None):
TUIObject.refresh(self, args)
return True
def input(self, args, key):
"""Handle the input, the base class just forwards it to the App level."""
return key
def render(self, width):
"""Render the summary representation for Hub to internal buffer."""
tui.Widget.render(self, width)
if self.mandatory and not self.completed:
key = "!"
elif self.completed:
key = "x"
else:
key = " "
# always set completed = True here; otherwise key value won't be
# displayed if completed (spoke value from above) is False
c = tui.CheckboxWidget(key=key, completed=True,
title=_(self.title), text=self.status)
c.render(width)
self.draw(c)
class NormalTUISpoke(TUISpoke, NormalSpoke):
"""
.. inheritance-diagram:: NormalTUISpoke
:parts: 3
"""
pass
EditTUISpokeEntry = namedtuple("EditTUISpokeEntry", ["title", "attribute", "aux", "visible"])
# Inherit abstract methods from NormalTUISpoke
# pylint: disable=abstract-method
class EditTUIDialog(NormalTUISpoke):
"""Spoke/dialog used to read new value of textual or password data
.. inheritance-diagram:: EditTUIDialog
:parts: 3
To override the wrong input message set the wrong_input_message attribute
to a translated string.
"""
title = N_("New value")
PASSWORD = re.compile(".*")
def __init__(self, app, data, storage, payload, instclass, policy_name=""):
if self.__class__ is EditTUIDialog:
raise TypeError("EditTUIDialog is an abstract class")
NormalTUISpoke.__init__(self, app, data, storage, payload, instclass)
self.value = None
self.policy = None
self.wrong_input_message = None
# Configure the password policy, if available. Otherwise use defaults.
self.policy = self.data.anaconda.pwpolicy.get_policy(policy_name)
if not self.policy:
self.policy = self.data.anaconda.PwPolicyData()
def refresh(self, args=None):
self._window = []
self.value = None
return True
def prompt(self, entry=None):
if not entry:
return None
if entry.aux == self.PASSWORD:
pw = self._app.raw_input(_("%s: ") % entry.title, hidden=True)
confirm = self._app.raw_input(_("%s (confirm): ") % entry.title, hidden=True)
if (pw and not confirm) or (confirm and not pw):
print(_("You must enter your root password and confirm it by typing"
" it a second time to continue."))
return None
if (pw != confirm):
print(_(PASSWORD_CONFIRM_ERROR_TUI))
return None
# If an empty password was provided, unset the value
if not pw:
self.value = ""
return None
valid, strength, message = validatePassword(pw, user=None, minlen=self.policy.minlen)
if not valid:
print(message)
return None
if strength < self.policy.minquality:
if self.policy.strict:
done_msg = ""
else:
done_msg = _("\nWould you like to use it anyway?")
if message:
error = _(PASSWORD_WEAK_WITH_ERROR) % message + " " + done_msg
else:
error = _(PASSWORD_WEAK) % done_msg
if not self.policy.strict:
question_window = YesNoDialog(self._app, error)
self._app.switch_screen_modal(question_window)
if not question_window.answer:
return None
else:
print(error)
return None
if any(char not in PW_ASCII_CHARS for char in pw):
print(_("You have provided a password containing non-ASCII characters.\n"
"You may not be able to switch between keyboard layouts to login.\n"))
self.value = cryptPassword(pw)
return None
else:
return _("Enter a new value for '%s' and press [Enter]\n") % entry.title
def input(self, entry, key):
if entry.aux.match(key):
self.value = key
self.close()
return True
else:
if self.wrong_input_message:
print(self.wrong_input_message)
else:
print(_("You have provided an invalid value\n"))
return NormalTUISpoke.input(self, entry, key)
class OneShotEditTUIDialog(EditTUIDialog):
"""The same as EditTUIDialog, but closes automatically after
the value is read
"""
def prompt(self, entry=None):
ret = None
if entry:
ret = EditTUIDialog.prompt(self, entry)
if ret is None:
self.close()
return ret
# Inherit abstract methods from NormalTUISpoke
# pylint: disable=abstract-method
class EditTUISpoke(NormalTUISpoke):
"""Spoke with declarative semantics, it contains
a list of titles, attribute names and regexps
that specify the fields of an object the user
allowed to edit.
.. inheritance-diagram:: EditTUISpoke
:parts: 3
"""
# self.data's subattribute name
# empty string means __init__ will provide
# something else
edit_data = ""
# constants to be used in the aux field
# and mark the entry as a password or checkbox field
PASSWORD = EditTUIDialog.PASSWORD
CHECK = "check"
# list of fields in the format of named tuples like:
# EditTUISpokeEntry(title, attribute, aux, visible)
# title - Nontranslated title of the entry
# attribute - The edited object's attribute name
# aux - Compiled regular expression or one of the
# two constants from above.
# It will be used to check the value typed
# by user and to show the proper entry
# for password, text or checkbox.
# visible - True, False or a function that accepts
# two arguments - self and the edited object
# It is evaluated and used to display or
# hide this attribute's entry
edit_fields = [
]
def __init__(self, app, data, storage, payload, instclass, policy_name=""):
if self.__class__ is EditTUISpoke:
raise TypeError("EditTUISpoke is an abstract class")
NormalTUISpoke.__init__(self, app, data, storage, payload, instclass)
self.dialog = OneShotEditTUIDialog(app, data, storage, payload, instclass, policy_name=policy_name)
# self.args should hold the object this Spoke is supposed
# to edit
self.args = None
@property
def visible_fields(self):
"""Get the list of currently visible entries"""
# it would be nice to have this a static list, but visibility of the
# entries often depends on the current state of the spoke and thus
# changes dynamically
ret = []
for entry in self.edit_fields:
if callable(entry.visible) and entry.visible(self, self.args):
ret.append(entry)
elif not callable(entry.visible) and entry.visible:
ret.append(entry)
return ret
def refresh(self, args=None):
NormalTUISpoke.refresh(self, args)
if args:
self.args = args
elif self.edit_data:
self.args = self.data
for key in self.edit_data.split("."):
self.args = getattr(self.args, key)
def _prep_text(i, entry):
number = tui.TextWidget("%2d)" % i)
title = tui.TextWidget(_(entry.title))
value = getdeepattr(self.args, entry.attribute)
value = tui.TextWidget(value)
return tui.ColumnWidget([(3, [number]), (None, [title, value])], 1)
def _prep_check(i, entry):
number = tui.TextWidget("%2d)" % i)
value = getdeepattr(self.args, entry.attribute)
ch = tui.CheckboxWidget(title=_(entry.title), completed=bool(value))
return tui.ColumnWidget([(3, [number]), (None, [ch])], 1)
def _prep_password(i, entry):
number = tui.TextWidget("%2d)" % i)
title = tui.TextWidget(_(entry.title))
value = ""
if len(getdeepattr(self.args, entry.attribute)) > 0:
value = _("Password set.")
value = tui.TextWidget(value)
return tui.ColumnWidget([(3, [number]), (None, [title, value])], 1)
for idx, entry in enumerate(self.visible_fields):
entry_type = entry.aux
if entry_type == self.PASSWORD:
w = _prep_password(idx+1, entry)
elif entry_type == self.CHECK:
w = _prep_check(idx+1, entry)
else:
w = _prep_text(idx+1, entry)
self._window.append(w)
return True
def input(self, args, key):
try:
idx = int(key) - 1
if idx >= 0 and idx < len(self.visible_fields):
if self.visible_fields[idx].aux == self.CHECK:
setdeepattr(self.args, self.visible_fields[idx].attribute,
not getdeepattr(self.args, self.visible_fields[idx][1]))
self.app.redraw()
self.apply()
else:
self.app.switch_screen_modal(self.dialog, self.visible_fields[idx])
if self.dialog.value is not None:
setdeepattr(self.args, self.visible_fields[idx].attribute,
self.dialog.value)
self.apply()
return True
except ValueError:
pass
return NormalTUISpoke.input(self, args, key)
class StandaloneTUISpoke(TUISpoke, StandaloneSpoke):
"""
.. inheritance-diagram:: StandaloneTUISpoke
:parts: 3
"""
pass
| gpl-2.0 |
savoirfairelinux/django | django/db/backends/sqlite3/base.py | 5 | 18697 | """
SQLite3 backend for the sqlite3 module in the standard library.
"""
import decimal
import math
import re
import warnings
from sqlite3 import dbapi2 as Database
import pytz
from django.core.exceptions import ImproperlyConfigured
from django.db import utils
from django.db.backends import utils as backend_utils
from django.db.backends.base.base import BaseDatabaseWrapper
from django.utils import timezone
from django.utils.dateparse import (
parse_date, parse_datetime, parse_duration, parse_time,
)
from .client import DatabaseClient # isort:skip
from .creation import DatabaseCreation # isort:skip
from .features import DatabaseFeatures # isort:skip
from .introspection import DatabaseIntrospection # isort:skip
from .operations import DatabaseOperations # isort:skip
from .schema import DatabaseSchemaEditor # isort:skip
def decoder(conv_func):
"""
Convert bytestrings from Python's sqlite3 interface to a regular string.
"""
return lambda s: conv_func(s.decode())
Database.register_converter("bool", decoder(lambda s: s == '1'))
Database.register_converter("time", decoder(parse_time))
Database.register_converter("date", decoder(parse_date))
Database.register_converter("datetime", decoder(parse_datetime))
Database.register_converter("timestamp", decoder(parse_datetime))
Database.register_converter("TIMESTAMP", decoder(parse_datetime))
Database.register_converter("decimal", decoder(backend_utils.typecast_decimal))
Database.register_adapter(decimal.Decimal, backend_utils.rev_typecast_decimal)
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'sqlite'
display_name = 'SQLite'
# SQLite doesn't actually support most of these types, but it "does the right
# thing" given more verbose field definitions, so leave them as is so that
# schema inspection is more useful.
data_types = {
'AutoField': 'integer',
'BigAutoField': 'integer',
'BinaryField': 'BLOB',
'BooleanField': 'bool',
'CharField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'datetime',
'DecimalField': 'decimal',
'DurationField': 'bigint',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'real',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'char(15)',
'GenericIPAddressField': 'char(39)',
'NullBooleanField': 'bool',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer unsigned',
'PositiveSmallIntegerField': 'smallint unsigned',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
'UUIDField': 'char(32)',
}
data_types_suffix = {
'AutoField': 'AUTOINCREMENT',
'BigAutoField': 'AUTOINCREMENT',
}
# SQLite requires LIKE statements to include an ESCAPE clause if the value
# being escaped has a percent or underscore in it.
# See http://www.sqlite.org/lang_expr.html for an explanation.
operators = {
'exact': '= %s',
'iexact': "LIKE %s ESCAPE '\\'",
'contains': "LIKE %s ESCAPE '\\'",
'icontains': "LIKE %s ESCAPE '\\'",
'regex': 'REGEXP %s',
'iregex': "REGEXP '(?i)' || %s",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE %s ESCAPE '\\'",
'endswith': "LIKE %s ESCAPE '\\'",
'istartswith': "LIKE %s ESCAPE '\\'",
'iendswith': "LIKE %s ESCAPE '\\'",
}
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, *, _) should be
# escaped on database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')"
pattern_ops = {
'contains': r"LIKE '%%' || {} || '%%' ESCAPE '\'",
'icontains': r"LIKE '%%' || UPPER({}) || '%%' ESCAPE '\'",
'startswith': r"LIKE {} || '%%' ESCAPE '\'",
'istartswith': r"LIKE UPPER({}) || '%%' ESCAPE '\'",
'endswith': r"LIKE '%%' || {} ESCAPE '\'",
'iendswith': r"LIKE '%%' || UPPER({}) ESCAPE '\'",
}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
# Classes instantiated in __init__().
client_class = DatabaseClient
creation_class = DatabaseCreation
features_class = DatabaseFeatures
introspection_class = DatabaseIntrospection
ops_class = DatabaseOperations
def get_connection_params(self):
settings_dict = self.settings_dict
if not settings_dict['NAME']:
raise ImproperlyConfigured(
"settings.DATABASES is improperly configured. "
"Please supply the NAME value.")
kwargs = {
'database': settings_dict['NAME'],
'detect_types': Database.PARSE_DECLTYPES | Database.PARSE_COLNAMES,
}
kwargs.update(settings_dict['OPTIONS'])
# Always allow the underlying SQLite connection to be shareable
# between multiple threads. The safe-guarding will be handled at a
# higher level by the `BaseDatabaseWrapper.allow_thread_sharing`
# property. This is necessary as the shareability is disabled by
# default in pysqlite and it cannot be changed once a connection is
# opened.
if 'check_same_thread' in kwargs and kwargs['check_same_thread']:
warnings.warn(
'The `check_same_thread` option was provided and set to '
'True. It will be overridden with False. Use the '
'`DatabaseWrapper.allow_thread_sharing` property instead '
'for controlling thread shareability.',
RuntimeWarning
)
kwargs.update({'check_same_thread': False})
if self.features.can_share_in_memory_db:
kwargs.update({'uri': True})
return kwargs
def get_new_connection(self, conn_params):
conn = Database.connect(**conn_params)
conn.create_function("django_date_extract", 2, _sqlite_date_extract)
conn.create_function("django_date_trunc", 2, _sqlite_date_trunc)
conn.create_function("django_datetime_cast_date", 2, _sqlite_datetime_cast_date)
conn.create_function("django_datetime_cast_time", 2, _sqlite_datetime_cast_time)
conn.create_function("django_datetime_extract", 3, _sqlite_datetime_extract)
conn.create_function("django_datetime_trunc", 3, _sqlite_datetime_trunc)
conn.create_function("django_time_extract", 2, _sqlite_time_extract)
conn.create_function("django_time_trunc", 2, _sqlite_time_trunc)
conn.create_function("django_time_diff", 2, _sqlite_time_diff)
conn.create_function("django_timestamp_diff", 2, _sqlite_timestamp_diff)
conn.create_function("regexp", 2, _sqlite_regexp)
conn.create_function("django_format_dtdelta", 3, _sqlite_format_dtdelta)
conn.create_function("django_power", 2, _sqlite_power)
conn.execute('PRAGMA foreign_keys = ON')
return conn
def init_connection_state(self):
pass
def create_cursor(self, name=None):
return self.connection.cursor(factory=SQLiteCursorWrapper)
def close(self):
self.validate_thread_sharing()
# If database is in memory, closing the connection destroys the
# database. To prevent accidental data loss, ignore close requests on
# an in-memory db.
if not self.is_in_memory_db():
BaseDatabaseWrapper.close(self)
def _savepoint_allowed(self):
# Two conditions are required here:
# - A sufficiently recent version of SQLite to support savepoints,
# - Being in a transaction, which can only happen inside 'atomic'.
# When 'isolation_level' is not None, sqlite3 commits before each
# savepoint; it's a bug. When it is None, savepoints don't make sense
# because autocommit is enabled. The only exception is inside 'atomic'
# blocks. To work around that bug, on SQLite, 'atomic' starts a
# transaction explicitly rather than simply disable autocommit.
return self.features.uses_savepoints and self.in_atomic_block
def _set_autocommit(self, autocommit):
if autocommit:
level = None
else:
# sqlite3's internal default is ''. It's different from None.
# See Modules/_sqlite/connection.c.
level = ''
# 'isolation_level' is a misleading API.
# SQLite always runs at the SERIALIZABLE isolation level.
with self.wrap_database_errors:
self.connection.isolation_level = level
def disable_constraint_checking(self):
if self.in_atomic_block:
# sqlite3 cannot disable constraint checking inside a transaction.
return False
self.cursor().execute('PRAGMA foreign_keys = OFF')
return True
def enable_constraint_checking(self):
self.cursor().execute('PRAGMA foreign_keys = ON')
def check_constraints(self, table_names=None):
"""
Check each table name in `table_names` for rows with invalid foreign
key references. This method is intended to be used in conjunction with
`disable_constraint_checking()` and `enable_constraint_checking()`, to
determine if rows with invalid references were entered while constraint
checks were off.
Raise an IntegrityError on the first invalid foreign key reference
encountered (if any) and provide detailed information about the
invalid reference in the error message.
Backends can override this method if they can more directly apply
constraint checking (e.g. via "SET CONSTRAINTS ALL IMMEDIATE")
"""
cursor = self.cursor()
if table_names is None:
table_names = self.introspection.table_names(cursor)
for table_name in table_names:
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
if not primary_key_column_name:
continue
key_columns = self.introspection.get_key_columns(cursor, table_name)
for column_name, referenced_table_name, referenced_column_name in key_columns:
cursor.execute(
"""
SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
LEFT JOIN `%s` as REFERRED
ON (REFERRING.`%s` = REFERRED.`%s`)
WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL
"""
% (
primary_key_column_name, column_name, table_name,
referenced_table_name, column_name, referenced_column_name,
column_name, referenced_column_name,
)
)
for bad_row in cursor.fetchall():
raise utils.IntegrityError(
"The row in table '%s' with primary key '%s' has an "
"invalid foreign key: %s.%s contains a value '%s' that "
"does not have a corresponding value in %s.%s." % (
table_name, bad_row[0], table_name, column_name,
bad_row[1], referenced_table_name, referenced_column_name,
)
)
def is_usable(self):
return True
def _start_transaction_under_autocommit(self):
"""
Start a transaction explicitly in autocommit mode.
Staying in autocommit mode works around a bug of sqlite3 that breaks
savepoints when autocommit is disabled.
"""
self.cursor().execute("BEGIN")
def is_in_memory_db(self):
return self.creation.is_in_memory_db(self.settings_dict['NAME'])
FORMAT_QMARK_REGEX = re.compile(r'(?<!%)%s')
class SQLiteCursorWrapper(Database.Cursor):
"""
Django uses "format" style placeholders, but pysqlite2 uses "qmark" style.
This fixes it -- but note that if you want to use a literal "%s" in a query,
you'll need to use "%%s".
"""
def execute(self, query, params=None):
if params is None:
return Database.Cursor.execute(self, query)
query = self.convert_query(query)
return Database.Cursor.execute(self, query, params)
def executemany(self, query, param_list):
query = self.convert_query(query)
return Database.Cursor.executemany(self, query, param_list)
def convert_query(self, query):
return FORMAT_QMARK_REGEX.sub('?', query).replace('%%', '%')
def _sqlite_date_extract(lookup_type, dt):
if dt is None:
return None
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'week_day':
return (dt.isoweekday() % 7) + 1
elif lookup_type == 'week':
return dt.isocalendar()[1]
elif lookup_type == 'quarter':
return math.ceil(dt.month / 3)
else:
return getattr(dt, lookup_type)
def _sqlite_date_trunc(lookup_type, dt):
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'year':
return "%i-01-01" % dt.year
elif lookup_type == 'quarter':
month_in_quarter = dt.month - (dt.month - 1) % 3
return '%i-%02i-01' % (dt.year, month_in_quarter)
elif lookup_type == 'month':
return "%i-%02i-01" % (dt.year, dt.month)
elif lookup_type == 'day':
return "%i-%02i-%02i" % (dt.year, dt.month, dt.day)
def _sqlite_time_trunc(lookup_type, dt):
try:
dt = backend_utils.typecast_time(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'hour':
return "%02i:00:00" % dt.hour
elif lookup_type == 'minute':
return "%02i:%02i:00" % (dt.hour, dt.minute)
elif lookup_type == 'second':
return "%02i:%02i:%02i" % (dt.hour, dt.minute, dt.second)
def _sqlite_datetime_parse(dt, tzname):
if dt is None:
return None
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if tzname is not None:
dt = timezone.localtime(dt, pytz.timezone(tzname))
return dt
def _sqlite_datetime_cast_date(dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
return dt.date().isoformat()
def _sqlite_datetime_cast_time(dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
return dt.time().isoformat()
def _sqlite_datetime_extract(lookup_type, dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
if lookup_type == 'week_day':
return (dt.isoweekday() % 7) + 1
elif lookup_type == 'week':
return dt.isocalendar()[1]
elif lookup_type == 'quarter':
return math.ceil(dt.month / 3)
else:
return getattr(dt, lookup_type)
def _sqlite_datetime_trunc(lookup_type, dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
if lookup_type == 'year':
return "%i-01-01 00:00:00" % dt.year
elif lookup_type == 'quarter':
month_in_quarter = dt.month - (dt.month - 1) % 3
return '%i-%02i-01 00:00:00' % (dt.year, month_in_quarter)
elif lookup_type == 'month':
return "%i-%02i-01 00:00:00" % (dt.year, dt.month)
elif lookup_type == 'day':
return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day)
elif lookup_type == 'hour':
return "%i-%02i-%02i %02i:00:00" % (dt.year, dt.month, dt.day, dt.hour)
elif lookup_type == 'minute':
return "%i-%02i-%02i %02i:%02i:00" % (dt.year, dt.month, dt.day, dt.hour, dt.minute)
elif lookup_type == 'second':
return "%i-%02i-%02i %02i:%02i:%02i" % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
def _sqlite_time_extract(lookup_type, dt):
if dt is None:
return None
try:
dt = backend_utils.typecast_time(dt)
except (ValueError, TypeError):
return None
return getattr(dt, lookup_type)
def _sqlite_format_dtdelta(conn, lhs, rhs):
"""
LHS and RHS can be either:
- An integer number of microseconds
- A string representing a timedelta object
- A string representing a datetime
"""
try:
if isinstance(lhs, int):
lhs = str(decimal.Decimal(lhs) / decimal.Decimal(1000000))
real_lhs = parse_duration(lhs)
if real_lhs is None:
real_lhs = backend_utils.typecast_timestamp(lhs)
if isinstance(rhs, int):
rhs = str(decimal.Decimal(rhs) / decimal.Decimal(1000000))
real_rhs = parse_duration(rhs)
if real_rhs is None:
real_rhs = backend_utils.typecast_timestamp(rhs)
if conn.strip() == '+':
out = real_lhs + real_rhs
else:
out = real_lhs - real_rhs
except (ValueError, TypeError):
return None
# typecast_timestamp returns a date or a datetime without timezone.
# It will be formatted as "%Y-%m-%d" or "%Y-%m-%d %H:%M:%S[.%f]"
return str(out)
def _sqlite_time_diff(lhs, rhs):
left = backend_utils.typecast_time(lhs)
right = backend_utils.typecast_time(rhs)
return (
(left.hour * 60 * 60 * 1000000) +
(left.minute * 60 * 1000000) +
(left.second * 1000000) +
(left.microsecond) -
(right.hour * 60 * 60 * 1000000) -
(right.minute * 60 * 1000000) -
(right.second * 1000000) -
(right.microsecond)
)
def _sqlite_timestamp_diff(lhs, rhs):
left = backend_utils.typecast_timestamp(lhs)
right = backend_utils.typecast_timestamp(rhs)
return (left - right).total_seconds() * 1000000
def _sqlite_regexp(re_pattern, re_string):
return bool(re.search(re_pattern, str(re_string))) if re_string is not None else False
def _sqlite_power(x, y):
return x ** y
| bsd-3-clause |
Antiun/c2c-rd-addons | chricar_partner_id_number/__init__.py | 4 | 1395 | # -*- coding: utf-8 -*-
# Python source code encoding : https://www.python.org/dev/peps/pep-0263/
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL
# http://tiny.be
# Copyright (C) 2010-2012 ChriCar Beteiligungs- und Beratungs- GmbH
# http://www.camptocamp.at
# Copyright (C) 2015 Antiun Ingenieria, SL (Madrid, Spain)
# http://www.antiun.com
# Antonio Espinosa <antonioea@antiun.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import models
| agpl-3.0 |
cgstudiomap/cgstudiomap | main/parts/odoo/addons/account_analytic_plans/report/__init__.py | 445 | 1084 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crossovered_analytic
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ng110/PyZDDE | Examples/Scripts/arrayTraceSimple.py | 2 | 3848 | # -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Name: arrayTraceSimple.py
# Purpose: A simple script to demonstrate array ray tracing. We need to use
# the two functions from the module pyzdde.arraytrace --
# The first function getRayDataArray() helps us to create the
# ray data structure array, and the function zArrayTrace() sends
# the ray data to Zemax (through c) for tracing.
#
# Author: Indranil Sinharoy
#
# Created: Tue Feb 17 15:58:13 2015
# Copyright: (c) Indranil Sinharoy, 2012 - 2017
# Licence: MIT License
#-------------------------------------------------------------------------------
from __future__ import print_function, division
import pyzdde.arraytrace as at # Module for array ray tracing
import pyzdde.zdde as pyz
import os as os
import sys as sys
from math import sqrt as sqrt
if sys.version_info[0] > 2:
xrange = range
cd = os.path.dirname(os.path.realpath(__file__))
def trace_rays():
ln = pyz.createLink()
filename = os.path.join(ln.zGetPath()[1], 'Sequential', 'Objectives',
'Cooke 40 degree field.zmx')
ln.zLoadFile(filename)
print("Loaded zemax file:", ln.zGetFile())
ln.zGetUpdate() # In general this should be done ...
if not ln.zPushLensPermission():
print("\nERROR: Extensions not allowed to push lenses. Please enable in Zemax.")
ln.close()
sys.exit(0)
ln.zPushLens(1) # FOR SOME REASON, THE ARRAY RAY TRACING SEEMS TO
# BE WORKING ON THE LENS THAT IS IN THE MAIN ZEMAX APPLICATION WINDOW!!!!
ln.zNewLens() # THIS IS JUST TO PROVE THE ABOVE POINT!!! RAY TRACING STILL ON THE LENS
# IN THE MAIN ZEMAX APPLICATION, EVENTHOUGH THE LENS IN THE DDE SERVER IS A "NEW LENS"
numRays = 101**2 # 10201
rd = at.getRayDataArray(numRays, tType=0, mode=0, endSurf=-1)
radius = int(sqrt(numRays)/2)
# Fill the rest of the ray data array
k = 0
for i in xrange(-radius, radius + 1, 1):
for j in xrange(-radius, radius + 1, 1):
k += 1
rd[k].z = i/(2*radius) # px
rd[k].l = j/(2*radius) # py
rd[k].intensity = 1.0
rd[k].wave = 1
# Trace the rays
ret = at.zArrayTrace(rd, timeout=5000)
# Dump the ray trace data into a file
outputfile = os.path.join(cd, "arrayTraceOutput.txt")
if ret==0:
k = 0
with open(outputfile, 'w') as f:
f.write("Listing of Array trace data\n")
f.write(" px py error xout yout"
" l m n opd Exr Exi"
" Eyr Eyi Ezr Ezi trans\n")
for i in xrange(-radius, radius + 1, 1):
for j in xrange(-radius, radius + 1, 1):
k += 1
line = ("{:7.3f} {:7.3f} {:5d} {:15.6E} {:15.6E} {:9.5f} "
"{:9.5f} {:9.5f} {:7.3f} {:7.3f} {:7.3f} {:7.3f} "
"{:7.3f} {:7.3f} {:7.3f} {:7.4f}\n"
.format(i/(2*radius), j/(2*radius), rd[k].error,
rd[k].x, rd[k].y, rd[k].l, rd[k].m, rd[k].n,
rd[k].opd, rd[k].Exr, rd[k].Exi, rd[k].Eyr,
rd[k].Eyi, rd[k].Ezr, rd[k].Ezi, rd[k].intensity))
f.write(line)
print("Success")
print("Ray trace data outputted to the file {}".format(outputfile))
else:
print("There was some problem in ray tracing")
ln.zNewLens()
ln.zPushLens()
ln.close()
if __name__ == '__main__':
trace_rays() | mit |
EmanueleCannizzaro/scons | test/MSVS/vs-8.0-clean.py | 1 | 3762 | #!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/MSVS/vs-8.0-clean.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Verify the -c option's ability to clean generated Visual Studio 8.0
project (.vcproj) and solution (.sln) files.
"""
import TestSConsMSVS
test = TestSConsMSVS.TestSConsMSVS()
host_arch = test.get_vs_host_arch()
# Make the test infrastructure think we have this version of MSVS installed.
test._msvs_versions = ['8.0']
expected_slnfile = TestSConsMSVS.expected_slnfile_8_0
expected_vcprojfile = TestSConsMSVS.expected_vcprojfile_8_0
test.write('SConstruct', """\
env=Environment(platform='win32', tools=['msvs'], MSVS_VERSION='8.0',
CPPDEFINES=['DEF1', 'DEF2',('DEF3','1234')],
CPPPATH=['inc1', 'inc2'],
HOST_ARCH='%(HOST_ARCH)s')
testsrc = ['test1.cpp', 'test2.cpp']
testincs = ['sdk.h']
testlocalincs = ['test.h']
testresources = ['test.rc']
testmisc = ['readme.txt']
p = env.MSVSProject(target = 'Test.vcproj',
srcs = testsrc,
incs = testincs,
localincs = testlocalincs,
resources = testresources,
misc = testmisc,
buildtarget = 'Test.exe',
variant = 'Release',
auto_build_solution = 0)
env.MSVSSolution(target = 'Test.sln',
slnguid = '{SLNGUID}',
projects = [p],
variant = 'Release')
"""%{'HOST_ARCH': host_arch})
test.run(arguments=".")
test.must_exist(test.workpath('Test.vcproj'))
vcproj = test.read('Test.vcproj', 'r')
expect = test.msvs_substitute(expected_vcprojfile, '8.0', None, 'SConstruct')
# don't compare the pickled data
assert vcproj[:len(expect)] == expect, test.diff_substr(expect, vcproj)
test.must_exist(test.workpath('Test.sln'))
sln = test.read('Test.sln', 'r')
expect = test.msvs_substitute(expected_slnfile, '8.0', None, 'SConstruct')
# don't compare the pickled data
assert sln[:len(expect)] == expect, test.diff_substr(expect, sln)
test.run(arguments='-c .')
test.must_not_exist(test.workpath('Test.vcproj'))
test.must_not_exist(test.workpath('Test.sln'))
test.run(arguments='.')
test.must_exist(test.workpath('Test.vcproj'))
test.must_exist(test.workpath('Test.sln'))
test.run(arguments='-c Test.sln')
test.must_exist(test.workpath('Test.vcproj'))
test.must_not_exist(test.workpath('Test.sln'))
test.run(arguments='-c Test.vcproj')
test.must_not_exist(test.workpath('Test.vcproj'))
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit |
s142857/servo | tests/wpt/web-platform-tests/tools/manifest/manifest.py | 144 | 12349 | import json
import os
from collections import defaultdict
from item import item_types, ManualTest, WebdriverSpecTest, Stub, RefTest, TestharnessTest
from log import get_logger
from sourcefile import SourceFile
CURRENT_VERSION = 2
class ManifestError(Exception):
pass
class ManifestVersionMismatch(ManifestError):
pass
class Manifest(object):
def __init__(self, git_rev=None, url_base="/"):
# Dict of item_type: {path: set(manifest_items)}
self._data = dict((item_type, defaultdict(set))
for item_type in item_types)
self.rev = git_rev
self.url_base = url_base
self.local_changes = LocalChanges(self)
# reftest nodes arranged as {path: set(manifest_items)}
self.reftest_nodes = defaultdict(set)
self.reftest_nodes_by_url = {}
def _included_items(self, include_types=None):
if include_types is None:
include_types = item_types
for item_type in include_types:
paths = self._data[item_type].copy()
for local_types, local_paths in self.local_changes.itertypes(item_type):
for path, items in local_paths.iteritems():
paths[path] = items
for path in self.local_changes.iterdeleted():
if path in paths:
del paths[path]
yield item_type, paths
def contains_path(self, path):
return any(path in paths for _, paths in self._included_items())
def add(self, item):
if item is None:
return
is_reference = False
if isinstance(item, RefTest):
self.reftest_nodes[item.path].add(item)
self.reftest_nodes_by_url[item.url] = item
is_reference = item.is_reference
if not is_reference:
self._add(item)
item.manifest = self
def _add(self, item):
self._data[item.item_type][item.path].add(item)
def extend(self, items):
for item in items:
self.add(item)
def remove_path(self, path):
for item_type in item_types:
if path in self._data[item_type]:
del self._data[item_type][path]
def itertypes(self, *types):
if not types:
types = None
for item_type, items in self._included_items(types):
for item in sorted(items.items()):
yield item
def __iter__(self):
for item in self.itertypes():
yield item
def __getitem__(self, path):
for _, paths in self._included_items():
if path in paths:
return paths[path]
raise KeyError
def get_reference(self, url):
if url in self.local_changes.reftest_nodes_by_url:
return self.local_changes.reftest_nodes_by_url[url]
if url in self.reftest_nodes_by_url:
return self.reftest_nodes_by_url[url]
return None
def _committed_with_path(self, rel_path):
rv = set()
for paths_items in self._data.itervalues():
rv |= paths_items.get(rel_path, set())
if rel_path in self.reftest_nodes:
rv |= self.reftest_nodes[rel_path]
return rv
def _committed_paths(self):
rv = set()
for paths_items in self._data.itervalues():
rv |= set(paths_items.keys())
return rv
def update(self,
tests_root,
url_base,
new_rev,
committed_changes=None,
local_changes=None,
remove_missing_local=False):
if local_changes is None:
local_changes = {}
if committed_changes is not None:
for rel_path, status in committed_changes:
self.remove_path(rel_path)
if status == "modified":
use_committed = rel_path in local_changes
source_file = SourceFile(tests_root,
rel_path,
url_base,
use_committed=use_committed)
self.extend(source_file.manifest_items())
self.local_changes = LocalChanges(self)
local_paths = set()
for rel_path, status in local_changes.iteritems():
local_paths.add(rel_path)
if status == "modified":
existing_items = self._committed_with_path(rel_path)
source_file = SourceFile(tests_root,
rel_path,
url_base,
use_committed=False)
local_items = set(source_file.manifest_items())
updated_items = local_items - existing_items
self.local_changes.extend(updated_items)
else:
self.local_changes.add_deleted(rel_path)
if remove_missing_local:
for path in self._committed_paths() - local_paths:
self.local_changes.add_deleted(path)
self.update_reftests()
if new_rev is not None:
self.rev = new_rev
self.url_base = url_base
def update_reftests(self):
reftest_nodes = self.reftest_nodes.copy()
for path, items in self.local_changes.reftest_nodes.iteritems():
reftest_nodes[path] |= items
#TODO: remove locally deleted files
tests = set()
for items in reftest_nodes.values():
tests |= set(item for item in items if not item.is_reference)
has_inbound = set()
for path, items in reftest_nodes.iteritems():
for item in items:
for ref_url, ref_type in item.references:
has_inbound.add(ref_url)
if self.local_changes.reftest_nodes:
target = self.local_changes
else:
target = self
#TODO: Warn if there exist unreachable reftest nodes
for path, items in reftest_nodes.iteritems():
for item in items:
if item.url in has_inbound:
continue
target._data["reftest"][path].add(item)
def to_json(self):
out_items = {
item_type: sorted(
test.to_json()
for _, tests in items.iteritems()
for test in tests
)
for item_type, items in self._data.iteritems()
}
reftest_nodes = {key:[v.to_json() for v in value]
for key, value in self.reftest_nodes.iteritems()}
rv = {"url_base": self.url_base,
"rev": self.rev,
"local_changes": self.local_changes.to_json(),
"items": out_items,
"reftest_nodes": reftest_nodes,
"version": CURRENT_VERSION}
return rv
@classmethod
def from_json(cls, tests_root, obj):
version = obj.get("version")
if version != CURRENT_VERSION:
raise ManifestVersionMismatch
self = cls(git_rev=obj["rev"],
url_base=obj.get("url_base", "/"))
if not hasattr(obj, "iteritems"):
raise ManifestError
item_classes = {"testharness": TestharnessTest,
"reftest": RefTest,
"manual": ManualTest,
"stub": Stub,
"wdspec": WebdriverSpecTest}
source_files = {}
for k, values in obj["items"].iteritems():
if k not in item_types:
raise ManifestError
for v in values:
manifest_item = item_classes[k].from_json(self, tests_root, v,
source_files=source_files)
self._add(manifest_item)
for path, values in obj["reftest_nodes"].iteritems():
for v in values:
item = RefTest.from_json(self, tests_root, v,
source_files=source_files)
self.reftest_nodes[path].add(item)
self.reftest_nodes_by_url[v["url"]] = item
self.local_changes = LocalChanges.from_json(self,
tests_root,
obj["local_changes"],
source_files=source_files)
return self
class LocalChanges(object):
def __init__(self, manifest):
self.manifest = manifest
self._data = dict((item_type, defaultdict(set)) for item_type in item_types)
self._deleted = set()
self.reftest_nodes = defaultdict(set)
self.reftest_nodes_by_url = {}
def add(self, item):
if item is None:
return
is_reference = False
if isinstance(item, RefTest):
self.reftest_nodes[item.path].add(item)
self.reftest_nodes_by_url[item.url] = item
is_reference = item.is_reference
if not is_reference:
self._add(item)
item.manifest = self.manifest
def _add(self, item):
self._data[item.item_type][item.path].add(item)
def extend(self, items):
for item in items:
self.add(item)
def add_deleted(self, path):
self._deleted.add(path)
def is_deleted(self, path):
return path in self._deleted
def itertypes(self, *types):
for item_type in types:
yield item_type, self._data[item_type]
def iterdeleted(self):
for item in self._deleted:
yield item
def __getitem__(self, item_type):
return self._data[item_type]
def to_json(self):
reftest_nodes = {key:[v.to_json() for v in value]
for key, value in self.reftest_nodes.iteritems()}
rv = {"items": defaultdict(dict),
"reftest_nodes": reftest_nodes,
"deleted": []}
rv["deleted"].extend(self._deleted)
for test_type, paths in self._data.iteritems():
for path, tests in paths.iteritems():
rv["items"][test_type][path] = [test.to_json() for test in tests]
return rv
@classmethod
def from_json(cls, manifest, tests_root, obj, source_files=None):
self = cls(manifest)
if not hasattr(obj, "iteritems"):
raise ManifestError
item_classes = {"testharness": TestharnessTest,
"reftest": RefTest,
"manual": ManualTest,
"stub": Stub,
"wdspec": WebdriverSpecTest}
for test_type, paths in obj["items"].iteritems():
for path, tests in paths.iteritems():
for test in tests:
manifest_item = item_classes[test_type].from_json(manifest,
tests_root,
test,
source_files=source_files)
self.add(manifest_item)
for path, values in obj["reftest_nodes"].iteritems():
for v in values:
item = RefTest.from_json(self.manifest, tests_root, v,
source_files=source_files)
self.reftest_nodes[path].add(item)
self.reftest_nodes_by_url[item.url] = item
for item in obj["deleted"]:
self.add_deleted(item)
return self
def load(tests_root, manifest_path):
logger = get_logger()
if os.path.exists(manifest_path):
logger.debug("Opening manifest at %s" % manifest_path)
else:
logger.debug("Creating new manifest at %s" % manifest_path)
try:
with open(manifest_path) as f:
manifest = Manifest.from_json(tests_root, json.load(f))
except IOError:
manifest = Manifest(None)
return manifest
def write(manifest, manifest_path):
with open(manifest_path, "w") as f:
json.dump(manifest.to_json(), f, sort_keys=True, indent=2, separators=(',', ': '))
| mpl-2.0 |
piotr1212/carbon | lib/carbon/client.py | 2 | 24444 | from collections import deque, defaultdict
from time import time
from six import with_metaclass
from twisted.application.service import Service
from twisted.internet import reactor
from twisted.internet.defer import Deferred, DeferredList
from twisted.internet.protocol import ReconnectingClientFactory
from twisted.protocols.basic import LineOnlyReceiver, Int32StringReceiver
from carbon.conf import settings
from carbon.util import pickle
from carbon.util import PluginRegistrar, TaggedSeries
from carbon.util import enableTcpKeepAlive
from carbon import instrumentation, log, pipeline, state
try:
from OpenSSL import SSL
except ImportError:
SSL = None
try:
from twisted.internet import ssl
except ImportError:
ssl = None
try:
import signal
except ImportError:
log.debug("Couldn't import signal module")
try:
from carbon.resolver import setUpRandomResolver
except ImportError:
setUpRandomResolver = None
SEND_QUEUE_LOW_WATERMARK = settings.MAX_QUEUE_SIZE * settings.QUEUE_LOW_WATERMARK_PCT
class CarbonClientProtocol(object):
def connectionMade(self):
log.clients("%s::connectionMade" % self)
self.paused = False
self.connected = True
self.transport.registerProducer(self, streaming=True)
# Define internal metric names
self.lastResetTime = time()
self.destination = self.factory.destination
self.destinationName = self.factory.destinationName
self.queuedUntilReady = 'destinations.%s.queuedUntilReady' % self.destinationName
self.sent = 'destinations.%s.sent' % self.destinationName
self.batchesSent = 'destinations.%s.batchesSent' % self.destinationName
self.slowConnectionReset = 'destinations.%s.slowConnectionReset' % self.destinationName
enableTcpKeepAlive(self.transport, settings.TCP_KEEPALIVE, settings)
d = self.factory.connectionMade
# Setup a new deferred before calling the callback to allow callbacks
# to re-register themselves.
self.factory.connectionMade = Deferred()
d.callback(self)
self.sendQueued()
def connectionLost(self, reason):
log.clients("%s::connectionLost %s" % (self, reason.getErrorMessage()))
self.connected = False
def pauseProducing(self):
self.paused = True
def resumeProducing(self):
self.paused = False
self.sendQueued()
def stopProducing(self):
self.disconnect()
def disconnect(self):
if self.connected:
self.transport.unregisterProducer()
self.transport.loseConnection()
self.connected = False
def sendDatapoint(self, metric, datapoint):
self.factory.enqueue(metric, datapoint)
self.factory.scheduleSend()
def _sendDatapointsNow(self, datapoints):
"""Implement this function to actually send datapoints."""
raise NotImplementedError()
def sendDatapointsNow(self, datapoints):
self._sendDatapointsNow(datapoints)
instrumentation.increment(self.sent, len(datapoints))
instrumentation.increment(self.batchesSent)
self.factory.checkQueue()
def sendQueued(self):
"""This should be the only method that will be used to send stats.
In order to not hold the event loop and prevent stats from flowing
in while we send them out, this will process
settings.MAX_DATAPOINTS_PER_MESSAGE stats, send them, and if there
are still items in the queue, this will invoke reactor.callLater
to schedule another run of sendQueued after a reasonable enough time
for the destination to process what it has just received.
Given a queue size of one million stats, and using a
chained_invocation_delay of 0.0001 seconds, you'd get 1,000
sendQueued() invocations/second max. With a
settings.MAX_DATAPOINTS_PER_MESSAGE of 100, the rate of stats being
sent could theoretically be as high as 100,000 stats/sec, or
6,000,000 stats/minute. This is probably too high for a typical
receiver to handle.
In practice this theoretical max shouldn't be reached because
network delays should add an extra delay - probably on the order
of 10ms per send, so the queue should drain with an order of
minutes, which seems more realistic.
"""
queueSize = self.factory.queueSize
if self.paused:
instrumentation.max(self.queuedUntilReady, queueSize)
return
if not self.factory.hasQueuedDatapoints():
return
if not self.connectionQualityMonitor():
self.resetConnectionForQualityReasons("Sent: {0}, Received: {1}".format(
instrumentation.prior_stats.get(self.sent, 0),
instrumentation.prior_stats.get('metricsReceived', 0)))
self.sendDatapointsNow(self.factory.takeSomeFromQueue())
if (self.factory.queueFull.called and queueSize < SEND_QUEUE_LOW_WATERMARK):
if not self.factory.queueHasSpace.called:
self.factory.queueHasSpace.callback(queueSize)
if self.factory.hasQueuedDatapoints():
self.factory.scheduleSend()
def connectionQualityMonitor(self):
"""Checks to see if the connection for this factory appears to
be delivering stats at a speed close to what we're receiving
them at.
This is open to other measures of connection quality.
Returns a Bool
True means that quality is good, OR
True means that the total received is less than settings.MIN_RESET_STAT_FLOW
False means that quality is bad
"""
if not settings.USE_RATIO_RESET:
return True
if settings.DESTINATION_POOL_REPLICAS:
received = self.factory.attemptedRelays
else:
received = 'metricsReceived'
destination_sent = float(instrumentation.prior_stats.get(self.sent, 0))
total_received = float(instrumentation.prior_stats.get(received, 0))
instrumentation.increment(self.slowConnectionReset, 0)
if total_received < settings.MIN_RESET_STAT_FLOW:
return True
if (destination_sent / total_received) < settings.MIN_RESET_RATIO:
return False
else:
return True
def resetConnectionForQualityReasons(self, reason):
"""Only re-sets the connection if it's been
settings.MIN_RESET_INTERVAL seconds since the last re-set.
Reason should be a string containing the quality info that led to
a re-set.
"""
if (time() - self.lastResetTime) < float(settings.MIN_RESET_INTERVAL):
return
else:
self.factory.connectedProtocol.disconnect()
self.lastResetTime = time()
instrumentation.increment(self.slowConnectionReset)
log.clients("%s:: resetConnectionForQualityReasons: %s" % (self, reason))
def __str__(self):
return 'CarbonClientProtocol(%s:%d:%s)' % (self.factory.destination)
__repr__ = __str__
class CAReplaceClientContextFactory:
"""A context factory for SSL clients needing a different CA chain."""
isClient = 1
# SSLv23_METHOD allows SSLv2, SSLv3, and TLSv1. We disable SSLv2 below,
# though.
method = SSL.SSLv23_METHOD if SSL else None
_cafile = None
def __init__(self, file=None):
self._cafile = file
def getContext(self):
ctx = SSL.Context(self.method)
ctx.set_options(SSL.OP_NO_SSLv2)
if self._cafile is not None:
ctx.use_certificate_chain_file(self._cafile)
return ctx
class CarbonClientFactory(with_metaclass(PluginRegistrar, ReconnectingClientFactory, object)):
plugins = {}
maxDelay = 5
def __init__(self, destination, router):
self.destination = destination
self.router = router
self.destinationName = ('%s:%d:%s' % destination).replace('.', '_')
self.host, self.port, self.carbon_instance = destination
self.addr = (self.host, self.port)
self.started = False
# This factory maintains protocol state across reconnects
self.queue = deque() # Change to make this the sole source of metrics to be sent.
self.connectedProtocol = None
self.queueEmpty = Deferred()
self.queueFull = Deferred()
self.queueFull.addCallbacks(self.queueFullCallback, log.err)
self.queueHasSpace = Deferred()
self.queueHasSpace.addCallbacks(self.queueSpaceCallback, log.err)
# Args: {'connector': connector, 'reason': reason}
self.connectFailed = Deferred()
# Args: {'connector': connector, 'reason': reason}
self.connectionLost = Deferred()
# Args: protocol instance
self.connectionMade = Deferred()
self.connectionMade.addCallbacks(self.clientConnectionMade, log.err)
self.deferSendPending = None
# Define internal metric names
self.attemptedRelays = 'destinations.%s.attemptedRelays' % self.destinationName
self.fullQueueDrops = 'destinations.%s.fullQueueDrops' % self.destinationName
self.queuedUntilConnected = 'destinations.%s.queuedUntilConnected' % self.destinationName
self.relayMaxQueueLength = 'destinations.%s.relayMaxQueueLength' % self.destinationName
def clientProtocol(self):
raise NotImplementedError()
def scheduleSend(self):
if self.deferSendPending and self.deferSendPending.active():
return
self.deferSendPending = reactor.callLater(settings.TIME_TO_DEFER_SENDING, self.sendQueued)
def sendQueued(self):
if self.connectedProtocol:
self.connectedProtocol.sendQueued()
def queueFullCallback(self, result):
state.events.cacheFull()
log.clients('%s send queue is full (%d datapoints)' % (self, result))
def queueSpaceCallback(self, result):
if self.queueFull.called:
log.clients('%s send queue has space available' % self.connectedProtocol)
self.queueFull = Deferred()
self.queueFull.addCallbacks(self.queueFullCallback, log.err)
state.events.cacheSpaceAvailable()
self.queueHasSpace = Deferred()
self.queueHasSpace.addCallbacks(self.queueSpaceCallback, log.err)
def buildProtocol(self, addr):
self.connectedProtocol = self.clientProtocol()
self.connectedProtocol.factory = self
return self.connectedProtocol
def startConnecting(self): # calling this startFactory yields recursion problems
self.started = True
if settings['DESTINATION_TRANSPORT'] == "ssl":
if not SSL or not ssl:
print("SSL destination transport request, but no Python OpenSSL available.")
raise SystemExit(1)
authority = None
if settings['DESTINATION_SSL_CA']:
try:
with open(settings['DESTINATION_SSL_CA']) as f:
authority = ssl.Certificate.loadPEM(f.read())
except IOError:
print("Failed to read CA chain: %s" % settings['DESTINATION_SSL_CA'])
raise SystemExit(1)
# Twisted 14 introduced this function, it might not be around on older installs.
if hasattr(ssl, "optionsForClientTLS"):
from six import u
client = ssl.optionsForClientTLS(u(self.host), authority)
else:
client = CAReplaceClientContextFactory(settings['DESTINATION_SSL_CA'])
self.connector = reactor.connectSSL(self.host, self.port, self, client)
else:
self.connector = reactor.connectTCP(self.host, self.port, self)
def stopConnecting(self):
self.started = False
self.stopTrying()
if self.connectedProtocol and self.connectedProtocol.connected:
return self.connectedProtocol.disconnect()
@property
def queueSize(self):
return len(self.queue)
def hasQueuedDatapoints(self):
return bool(self.queue)
def takeSomeFromQueue(self):
"""Use self.queue, which is a collections.deque, to pop up to
settings.MAX_DATAPOINTS_PER_MESSAGE items from the left of the
queue.
"""
def yield_max_datapoints():
for _ in range(settings.MAX_DATAPOINTS_PER_MESSAGE):
try:
yield self.queue.popleft()
except IndexError:
return
return list(yield_max_datapoints())
def checkQueue(self):
"""Check if the queue is empty. If the queue isn't empty or
doesn't exist yet, then this will invoke the callback chain on the
self.queryEmpty Deferred chain with the argument 0, and will
re-set the queueEmpty callback chain with a new Deferred
object.
"""
if not self.queue:
self.queueEmpty.callback(0)
self.queueEmpty = Deferred()
def enqueue(self, metric, datapoint):
self.queue.append((metric, datapoint))
def enqueue_from_left(self, metric, datapoint):
self.queue.appendleft((metric, datapoint))
def sendDatapoint(self, metric, datapoint):
instrumentation.increment(self.attemptedRelays)
instrumentation.max(self.relayMaxQueueLength, self.queueSize)
if self.queueSize >= settings.MAX_QUEUE_SIZE:
if not self.queueFull.called:
self.queueFull.callback(self.queueSize)
instrumentation.increment(self.fullQueueDrops)
else:
self.enqueue(metric, datapoint)
if self.connectedProtocol:
self.scheduleSend()
else:
instrumentation.increment(self.queuedUntilConnected)
def sendHighPriorityDatapoint(self, metric, datapoint):
"""The high priority datapoint is one relating to the carbon
daemon itself. It puts the datapoint on the left of the deque,
ahead of other stats, so that when the carbon-relay, specifically,
is overwhelmed its stats are more likely to make it through and
expose the issue at hand.
In addition, these stats go on the deque even when the max stats
capacity has been reached. This relies on not creating the deque
with a fixed max size.
"""
instrumentation.increment(self.attemptedRelays)
self.enqueue_from_left(metric, datapoint)
if self.connectedProtocol:
self.scheduleSend()
else:
instrumentation.increment(self.queuedUntilConnected)
def startedConnecting(self, connector):
log.clients("%s::startedConnecting (%s:%d)" % (
self, connector.host, connector.port))
def clientConnectionMade(self, client):
log.clients("%s::connectionMade (%s)" % (self, client))
self.resetDelay()
self.destinationUp(client.destination)
self.connectionMade.addCallbacks(self.clientConnectionMade, log.err)
return client
def clientConnectionLost(self, connector, reason):
ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
log.clients("%s::clientConnectionLost (%s:%d) %s" % (
self, connector.host, connector.port, reason.getErrorMessage()))
self.connectedProtocol = None
self.destinationDown(self.destination)
args = dict(connector=connector, reason=reason)
d = self.connectionLost
self.connectionLost = Deferred()
d.callback(args)
def clientConnectionFailed(self, connector, reason):
ReconnectingClientFactory.clientConnectionFailed(self, connector, reason)
log.clients("%s::clientConnectionFailed (%s:%d) %s" % (
self, connector.host, connector.port, reason.getErrorMessage()))
self.destinationDown(self.destination)
args = dict(connector=connector, reason=reason)
d = self.connectFailed
self.connectFailed = Deferred()
d.callback(args)
def destinationUp(self, destination):
log.clients("Destination is up: %s:%d:%s" % destination)
if not self.router.hasDestination(destination):
log.clients("Adding client %s:%d:%s to router" % destination)
self.router.addDestination(destination)
state.events.resumeReceivingMetrics()
def destinationDown(self, destination):
# Only blacklist the destination if we tried a lot.
log.clients("Destination is down: %s:%d:%s (%d/%d)" % (
destination[0], destination[1], destination[2], self.retries,
settings.DYNAMIC_ROUTER_MAX_RETRIES))
# Retries comes from the ReconnectingClientFactory.
if self.retries < settings.DYNAMIC_ROUTER_MAX_RETRIES:
return
if settings.DYNAMIC_ROUTER and self.router.hasDestination(destination):
log.clients("Removing client %s:%d:%s to router" % destination)
self.router.removeDestination(destination)
# Do not receive more metrics if we don't have any usable destinations.
if not self.router.countDestinations():
state.events.pauseReceivingMetrics()
# Re-inject queued metrics.
metrics = list(self.queue)
log.clients("Re-injecting %d metrics from %s" % (len(metrics), self))
for metric, datapoint in metrics:
state.events.metricGenerated(metric, datapoint)
self.queue.clear()
def disconnect(self):
self.queueEmpty.addCallbacks(lambda result: self.stopConnecting(), log.err)
readyToStop = DeferredList(
[self.connectionLost, self.connectFailed],
fireOnOneCallback=True,
fireOnOneErrback=True)
self.checkQueue()
# This can happen if the client is stopped before a connection is ever made
if (not readyToStop.called) and (not self.started):
readyToStop.callback(None)
return readyToStop
def __str__(self):
return 'CarbonClientFactory(%s:%d:%s)' % self.destination
__repr__ = __str__
# Basic clients and associated factories.
class CarbonPickleClientProtocol(CarbonClientProtocol, Int32StringReceiver):
def _sendDatapointsNow(self, datapoints):
self.sendString(pickle.dumps(datapoints, protocol=2))
class CarbonPickleClientFactory(CarbonClientFactory):
plugin_name = "pickle"
def clientProtocol(self):
return CarbonPickleClientProtocol()
class CarbonLineClientProtocol(CarbonClientProtocol, LineOnlyReceiver):
def _sendDatapointsNow(self, datapoints):
for metric, datapoint in datapoints:
if isinstance(datapoint[1], float):
value = ("%.10f" % datapoint[1]).rstrip('0').rstrip('.')
else:
value = "%d" % datapoint[1]
self.sendLine("%s %s %d" % (metric, value, datapoint[0]))
class CarbonLineClientFactory(CarbonClientFactory):
plugin_name = "line"
def clientProtocol(self):
return CarbonLineClientProtocol()
class FakeClientFactory(object):
"""Fake client factory that buffers points
This is used when all the destinations are down and before we
pause the reception of metrics to avoid loosing points.
"""
def __init__(self):
# This queue isn't explicitely bounded but will implicitely be. It receives
# only metrics when no destinations are available, and as soon as we detect
# that we don't have any destination we pause the producer: this mean that
# it will contain only a few seconds of metrics.
self.queue = deque()
self.started = False
def startConnecting(self):
pass
def sendDatapoint(self, metric, datapoint):
self.queue.append((metric, datapoint))
def sendHighPriorityDatapoint(self, metric, datapoint):
self.queue.append((metric, datapoint))
def reinjectDatapoints(self):
metrics = list(self.queue)
log.clients("Re-injecting %d metrics from %s" % (len(metrics), self))
for metric, datapoint in metrics:
state.events.metricGenerated(metric, datapoint)
self.queue.clear()
class CarbonClientManager(Service):
def __init__(self, router):
if settings.DESTINATION_POOL_REPLICAS:
# If we decide to open multiple TCP connection to a replica, we probably
# want to try to also load-balance accross hosts. In this case we need
# to make sure rfc3484 doesn't get in the way.
if setUpRandomResolver:
setUpRandomResolver(reactor)
else:
print("Import error, Twisted >= 17.1.0 needed for using DESTINATION_POOL_REPLICAS.")
raise SystemExit(1)
self.router = router
self.client_factories = {} # { destination : CarbonClientFactory() }
# { destination[0:2]: set(CarbonClientFactory()) }
self.pooled_factories = defaultdict(set)
# This fake factory will be used as a buffer when we did not manage
# to connect to any destination.
fake_factory = FakeClientFactory()
self.client_factories[None] = fake_factory
state.events.resumeReceivingMetrics.addHandler(fake_factory.reinjectDatapoints)
def createFactory(self, destination):
factory_name = settings["DESTINATION_PROTOCOL"]
factory_class = CarbonClientFactory.plugins.get(factory_name)
if not factory_class:
print("In carbon.conf, DESTINATION_PROTOCOL must be one of %s. "
"Invalid value: '%s'" % (', '.join(CarbonClientFactory.plugins), factory_name))
raise SystemExit(1)
return factory_class(destination, self.router)
def startService(self):
if 'signal' in globals().keys():
log.debug("Installing SIG_IGN for SIGHUP")
signal.signal(signal.SIGHUP, signal.SIG_IGN)
Service.startService(self)
for factory in self.client_factories.values():
if not factory.started:
factory.startConnecting()
def stopService(self):
Service.stopService(self)
return self.stopAllClients()
def startClient(self, destination):
if destination in self.client_factories:
return
log.clients("connecting to carbon daemon at %s:%d:%s" % destination)
if not settings.DYNAMIC_ROUTER:
# If not using a dynamic router we add the destination before
# it's known to be working.
self.router.addDestination(destination)
factory = self.createFactory(destination)
self.client_factories[destination] = factory
self.pooled_factories[destination[0:2]].add(factory)
connectAttempted = DeferredList(
[factory.connectionMade, factory.connectFailed],
fireOnOneCallback=True,
fireOnOneErrback=True)
if self.running:
factory.startConnecting() # this can trigger & replace connectFailed
return connectAttempted
def stopClient(self, destination):
factory = self.client_factories.get(destination)
if factory is None or destination is None:
return None
self.router.removeDestination(destination)
stopCompleted = factory.disconnect()
stopCompleted.addCallbacks(
lambda result: self.disconnectClient(destination), log.err
)
return stopCompleted
def disconnectClient(self, destination):
factory = self.client_factories.pop(destination)
self.pooled_factories[destination[0:2]].remove(factory)
c = factory.connector
if c and c.state == 'connecting' and not factory.hasQueuedDatapoints():
c.stopConnecting()
def stopAllClients(self):
deferreds = []
for destination in list(self.client_factories):
deferred = self.stopClient(destination)
if deferred:
deferreds.append(deferred)
return DeferredList(deferreds)
def getDestinations(self, metric):
destinations = list(self.router.getDestinations(metric))
# If we can't find any destination we just buffer the
# points. We will also pause the socket on the receiving side.
if not destinations:
return [None]
return destinations
def getFactories(self, metric):
destinations = self.getDestinations(metric)
factories = set()
if not settings.DESTINATION_POOL_REPLICAS:
# Simple case, with only one replica per destination.
for d in destinations:
# If we can't find it, we add to the 'fake' factory / buffer.
factories.add(self.client_factories.get(d))
else:
# Here we might have multiple replicas per destination.
for d in destinations:
if d is None:
# d == None means there are no destinations currently available, so
# we just put the data into our fake factory / buffer.
factories.add(self.client_factories[None])
else:
# Else we take the replica with the smallest queue size.
key = d[0:2] # Take only host:port, not instance.
factories.add(min(self.pooled_factories[key], key=lambda f: f.queueSize))
return factories
def sendDatapoint(self, metric, datapoint):
for factory in self.getFactories(metric):
factory.sendDatapoint(metric, datapoint)
def sendHighPriorityDatapoint(self, metric, datapoint):
for factory in self.getFactories(metric):
factory.sendHighPriorityDatapoint(metric, datapoint)
def __str__(self):
return "<%s[%x]>" % (self.__class__.__name__, id(self))
class RelayProcessor(pipeline.Processor):
plugin_name = 'relay'
def process(self, metric, datapoint):
if settings.TAG_RELAY_NORMALIZED:
# normalize metric name
try:
metric = TaggedSeries.parse(metric).path
except Exception as err:
log.msg('Error parsing metric %s: %s' % (metric, err))
# continue anyway with processing the unnormalized metric for robustness
state.client_manager.sendDatapoint(metric, datapoint)
return pipeline.Processor.NO_OUTPUT
| apache-2.0 |
mrquim/repository.mrquim | repo/script.module.schism.common/lib/jsbeautifier/unpackers/__init__.py | 173 | 2316 | #
# General code for JSBeautifier unpackers infrastructure. See README.specs
# written by Stefano Sanfilippo <a.little.coder@gmail.com>
#
"""General code for JSBeautifier unpackers infrastructure."""
import pkgutil
import re
from jsbeautifier.unpackers import evalbased
# NOTE: AT THE MOMENT, IT IS DEACTIVATED FOR YOUR SECURITY: it runs js!
BLACKLIST = ['jsbeautifier.unpackers.evalbased']
class UnpackingError(Exception):
"""Badly packed source or general error. Argument is a
meaningful description."""
pass
def getunpackers():
"""Scans the unpackers dir, finds unpackers and add them to UNPACKERS list.
An unpacker will be loaded only if it is a valid python module (name must
adhere to naming conventions) and it is not blacklisted (i.e. inserted
into BLACKLIST."""
path = __path__
prefix = __name__ + '.'
unpackers = []
interface = ['unpack', 'detect', 'PRIORITY']
for _importer, modname, _ispkg in pkgutil.iter_modules(path, prefix):
if 'tests' not in modname and modname not in BLACKLIST:
try:
module = __import__(modname, fromlist=interface)
except ImportError:
raise UnpackingError('Bad unpacker: %s' % modname)
else:
unpackers.append(module)
return sorted(unpackers, key = lambda mod: mod.PRIORITY)
UNPACKERS = getunpackers()
def run(source, evalcode=False):
"""Runs the applicable unpackers and return unpacked source as a string."""
for unpacker in [mod for mod in UNPACKERS if mod.detect(source)]:
source = unpacker.unpack(source)
if evalcode and evalbased.detect(source):
source = evalbased.unpack(source)
return source
def filtercomments(source):
"""NOT USED: strips trailing comments and put them at the top."""
trailing_comments = []
comment = True
while comment:
if re.search(r'^\s*\/\*', source):
comment = source[0, source.index('*/') + 2]
elif re.search(r'^\s*\/\/', source):
comment = re.search(r'^\s*\/\/', source).group(0)
else:
comment = None
if comment:
source = re.sub(r'^\s+', '', source[len(comment):])
trailing_comments.append(comment)
return '\n'.join(trailing_comments) + source
| gpl-2.0 |
graetzer/arangodb | 3rdParty/boost/1.62.0/libs/mpl/preprocessed/pp.py | 45 | 9005 |
# Copyright Aleksey Gurtovoy 2001-2004
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
#
# See http://www.boost.org/libs/mpl for documentation.
# $Id$
# $Date$
# $Revision$
import fileinput
import os
import re
import string
import sys
if_else = lambda a,b,c:(a and [b] or [c])[0]
max_len = 79
ident = 4
def nearest_ident_pos(text):
return (len(text)/ident) * ident
def block_format(limits, text, first_sep=' ', sep=',', need_last_ident=1 ):
if sep == ',' and string.find( text, '<' ) != -1:
sep = '%s ' % sep
words = string.split(
string.join( string.split( text ), ' ' )
, sep
)
s = ' ' * limits[0]
max_len = limits[1]
return '%s\n%s' \
% (
reduce(
lambda t,w,max_len=max_len,s=s,sep=sep:
if_else(t[1] + len(w) < max_len
, ('%s%s%s'% (t[0],t[2],w), t[1]+len(w)+len(t[2]), sep)
, ('%s\n%s%s%s'% (t[0],s,sep,w), len(s)+len(w)+len(sep), sep)
)
, words
, (s,len(s)+len(first_sep),first_sep)
)[0]
, if_else(need_last_ident,s,'')
)
def handle_args( match ):
if re.compile('^\s*(typedef|struct|static)\s+.*?$').match(match.group(0)):
return match.group(0)
return '%s'\
% block_format(
(nearest_ident_pos(match.group(1)),max_len)
, match.group(3)
, match.group(2)
, ','
, 0
)
def handle_inline_args(match):
if len(match.group(0)) < max_len:
return match.group(0)
if match.group(9) == None:
return '%s%s<\n%s>\n'\
% (
match.group(1)
, match.group(3)
, block_format(
(nearest_ident_pos(match.group(1))+ident,max_len)
, match.group(4)
)
)
return '%s%s<\n%s>\n%s%s'\
% (
match.group(1)
, match.group(3)
, block_format(
(nearest_ident_pos(match.group(1))+ident,max_len-len(match.group(9)))
, match.group(4)
)
, string.replace(match.group(1),',',' ')
, match.group(9)
)
def handle_simple_list(match):
if match.group(1) == 'template':
return match.group(0)
single_arg = re.compile('^\s*(\w|\d)+\s*$').match(match.group(2))
return if_else(single_arg,'%s<%s>','%s< %s >') %\
(
match.group(1)
, string.join(string.split(match.group(2)), '')
)
def handle_static(match):
if len(match.group(0)) < max_len:
return match.group(0)
(first_sep,sep) = if_else(string.find(match.group(0),'+') == -1, (' ',' '),(' ','+'))
return '%s%s\n%s%s' %\
(
match.group(1)
, string.join(string.split(match.group(2)), ' ')
, block_format(
(nearest_ident_pos(match.group(1))+ident,max_len)
, match.group(4)
, first_sep
, sep
)
, match.group(5)
)
def handle_typedefs(match):
if string.count(match.group(2), ';') == 1:
return match.group(0)
join_sep = ';\n%s' % match.group(1)
return '%s%s\n' \
% (
match.group(1)
, string.join(map(string.strip, string.split(match.group(2), ';')), join_sep)
)
def fix_angle_brackets( match ):
return ' '.join( ''.join( match.group(1).split( ' ' ) ) ) + match.group(3)
class pretty:
def __init__(self, name):
self.output = open(name, "w")
self.prev_line = ''
self.re_copyright_start = re.compile( r'^// Copyright .*$' )
self.re_copyright_end = re.compile( r'^// See .* for documentation.$' )
self.reading_copyright = 0
self.copyright = None
self.re_header_name_comment = re.compile(
r'^\s*//\s+\$[I]d:\s+(.*?%s\.hpp)\s+[^$]+[$]$'
% os.path.splitext( name )[0]
)
self.header_was_written = 0
self.re_junk = re.compile(r'^\s*(#|//[^/]|////).*$')
self.re_c_comment_start = re.compile(r'^\s*/\*.*')
self.re_c_comment_end = re.compile(r'^.*\*/\s*$')
self.inside_c_comment = 0
self.re_empty_line = re.compile(r'^\s*$')
self.re_comma = re.compile(r'(\S+)\s*,\s*')
self.re_assign = re.compile(r'(\S+[^<|^!|^>])\s*(=+)\s*(\S+)')
self.re_marked_empty_comment = re.compile(r'^\s*//\s*$')
self.re_typedef = re.compile(r'^\s+typedef\s+.*?;$')
self.re_nsl = re.compile(r'^(\s+typedef\s+.*?;|\s*(private|public):\s*|\s*{\s*|\s*(\w|\d|,)+\s*)$')
self.re_templ_decl = re.compile(r'^(\s*template\s*<\s*.*?|\s*(private|public):\s*)$')
self.re_type_const = re.compile(r'(const)\s+((unsigned|signed)?(bool|char|short|int|long))')
#self.re_templ_args = re.compile(r'^(\s*)(, | {2})((.*::.*?,?)+)\s*$')
self.re_templ_args = re.compile(r'^(\s*)(, | {2})((\s*(\w+)(\s+|::)\w+\s*.*?,?)+)\s*$')
self.re_inline_templ_args = re.compile(
r'^(\s+(,|:\s+)?|struct\s+)(\w+)\s*<((\s*(typename\s+)?\w+\s*(=\s*.*|<(\s*\w+\s*,?)+>\s*)?,?)+)\s*>\s+((struct|class).*?)?$'
)
self.re_simple_list = re.compile(r'(\w+)\s*<((\w|,| |-)+)>')
self.re_static_const = re.compile(r'(\s*)((BOOST_STATIC_CONSTANT\(\s*\w+,\s*|enum\s*\w*\s*{\s*)value\s*=)(.*?)([}|\)];)$')
self.re_typedefs = re.compile(r'(\s*)((\s*typedef\s*.*?;)+)\s*$')
self.re_fix_angle_brackets = re.compile( r'(>(\s*>)+)(,|\n$)' )
self.re_closing_curly_brace = re.compile(r'^(}|struct\s+\w+);\s*$')
self.re_namespace_scope_templ = re.compile(r'^template\s*<\s*$')
self.re_namespace = re.compile(r'^\n?namespace\s+\w+\s*{\s*\n?$')
def process(self, line):
if self.reading_copyright:
if not self.re_copyright_end.match( line ):
self.copyright += line
return
self.reading_copyright = 0
if not self.header_was_written and self.re_copyright_start.match( line ):
self.copyright = line
self.reading_copyright = 1
return
# searching for header line
if not self.header_was_written:
if self.re_header_name_comment.match( line ):
self.header_was_written = 1
match = self.re_header_name_comment.match( line )
self.output.write( \
'\n%s\n' \
'// *Preprocessed* version of the main "%s" header\n' \
'// -- DO NOT modify by hand!\n\n' \
% ( self.copyright, match.group(1) )
)
return
# skipping preprocessor directives, comments, etc.
if self.re_junk.match(line):
return
if self.inside_c_comment or self.re_c_comment_start.match(line):
self.inside_c_comment = not self.re_c_comment_end.match(line)
return
# restoring some empty lines
if self.re_templ_decl.match(line) and self.re_typedef.match(self.prev_line) \
or not self.re_empty_line.match(line) and self.re_closing_curly_brace.match(self.prev_line) \
or not self.re_empty_line.match(self.prev_line) \
and ( self.re_namespace_scope_templ.match(line) \
or self.re_namespace.match(line) and not self.re_namespace.match(self.prev_line) \
):
line = '\n%s' % line
# removing excessive empty lines
if self.re_empty_line.match(line):
if self.re_empty_line.match(self.prev_line) or not self.header_was_written:
return
# skip empty line after typedef
if self.re_nsl.match(self.prev_line):
return
# formatting
line = self.re_comma.sub( r'\1, ', line )
line = self.re_assign.sub( r'\1 \2 \3', line )
line = self.re_marked_empty_comment.sub( r'\n', line )
line = self.re_type_const.sub( r'\2 \1', line )
line = self.re_templ_args.sub( handle_args, line )
line = self.re_inline_templ_args.sub( handle_inline_args, line )
line = self.re_simple_list.sub( handle_simple_list, line)
line = self.re_static_const.sub( handle_static, line )
line = self.re_typedefs.sub( handle_typedefs, line )
line = self.re_fix_angle_brackets.sub( fix_angle_brackets, line )
# write the output
self.output.write(line)
self.prev_line = line
def main( src, dest ):
p = pretty( os.path.basename( dest ) )
for line in fileinput.input( src ):
p.process(line)
if __name__ == '__main__':
main( sys.argv[1], sys.argv[2] )
| apache-2.0 |
JohnnyKing94/pootle | pootle/apps/pootle_store/migrations/0027_unit_created_by_squashed_0055_fill_unit_source_data.py | 5 | 16954 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-05-09 15:35
from __future__ import unicode_literals
import logging
import os
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
import pootle.core.user
from pootle.core.batch import Batch
from pootle.core.url_helpers import split_pootle_path
from pootle.core.user import get_system_user_id
from pootle_statistics.models import SubmissionTypes
logger = logging.getLogger(__name__)
UNIT_CREATE_TYPE = 10
UNIT_SOURCE_SQL = (
"INSERT INTO `pootle_store_unit_source` "
" (`unit_id`, `created_with`, `created_by_id`, `source_hash`, `source_length`, `source_wordcount`) "
"(SELECT `id` as `unit_id`, "
" %s as `created_with`, "
" %s as `created_by_id`, "
" `source_hash` as `source_hash`,"
" `source_length` as `source_length`,"
" `source_wordcount` as `source_wordcount`"
" from `pootle_store_unit`)")
UNIT_CHANGE_SQL = (
"INSERT INTO `pootle_store_unit_change` "
" (`unit_id`, `changed_with`, `commented_on`, `commented_by_id`, `reviewed_on`, `reviewed_by_id`, `submitted_on`, `submitted_by_id`) "
" (SELECT `id` as `unit_id`, "
" %s as `changed_with`, "
" `commented_on` as `commented_on`,"
" `commented_by_id` as `commented_by_id`,"
" `reviewed_on` as `reviewed_on`,"
" `reviewed_by_id` as `reviewed_by_id`,"
" `submitted_on` as `submitted_on`,"
" `submitted_by_id` as `submitted_by_id`"
" from `pootle_store_unit` "
" WHERE "
" ((`pootle_store_unit`.`translator_comment` is not NULL "
" AND (`commented_on` is not NULL "
" OR `commented_by_id` is not NULL)) "
" OR (`reviewed_on` is not NULL "
" OR `reviewed_by_id` is not NULL) "
" OR (`submitted_on` is not NULL "
" OR `submitted_by_id` is not NULL)))")
UNIT_DELETE_COLS_SQL = (
"ALTER TABLE `pootle_store_unit` %s")
REMOVED_UNIT_COLS = [
"source_hash",
"source_length",
"source_wordcount",
"commented_on",
"commented_by_id",
"reviewed_by_id",
"reviewed_on",
"submitted_by_id",
"submitted_on"]
class OLDSuggestionStates(object):
PENDING = 'pending'
ACCEPTED = 'accepted'
REJECTED = 'rejected'
def _missing_changes(apps):
units = apps.get_model("pootle_store.Unit").objects.all()
changeless = units.filter(change__isnull=True)
missing_comments = changeless.filter(translator_comment__gt="")
missing_comments = (
missing_comments.filter(commented_by_id__isnull=False)
| missing_comments.filter(commented_on__isnull=False))
missing_reviews = (
changeless.filter(reviewed_by_id__isnull=False)
| changeless.filter(reviewed_on__isnull=False))
missing_submits = (
changeless.filter(submitted_by_id__isnull=False)
| changeless.filter(submitted_on__isnull=False))
return (missing_comments | missing_reviews | missing_submits)
def create_sources_with_orm(apps, creators):
sysuser = get_system_user_id()
UnitSource = apps.get_model("pootle_store.UnitSource")
units = apps.get_model("pootle_store.Unit").objects.all()
_units = list(
units.values_list(
"id",
"source_hash",
"source_length",
"source_wordcount").iterator())
def _unit_source_create(pk, source_hash, source_length, source_wordcount):
return dict(
unit_id=pk,
source_hash=source_hash,
source_wordcount=source_wordcount,
source_length=source_length,
created_with=SubmissionTypes.WEB,
created_by_id=creators.get(pk, sysuser))
Batch(UnitSource.objects, batch_size=500).create(
_units,
_unit_source_create,
reduces=False)
def update_sources_with_orm(apps, creators):
UnitSource = apps.get_model("pootle_store.UnitSource")
def _set_created_by(unit_source):
unit_source.created_by_id = creators.get(unit_source.unit_id)
return unit_source
Batch(UnitSource.objects, batch_size=500).update(
list(UnitSource.objects.filter(unit_id__in=creators.keys())),
update_method=_set_created_by,
update_fields=["created_by"],
reduces=False)
def create_changes_with_orm(apps):
UnitChange = apps.get_model("pootle_store.UnitChange")
missing = list(
_missing_changes(apps).iterator())
def _unit_change_create(unit):
return dict(
unit_id=unit.pk,
changed_with=SubmissionTypes.WEB,
commented_by_id=unit.commented_by_id,
commented_on=unit.commented_on,
reviewed_by_id=unit.reviewed_by_id,
reviewed_on=unit.reviewed_on,
submitted_by_id=unit.submitted_by_id,
submitted_on=unit.submitted_on)
Batch(UnitChange.objects, batch_size=500).create(
missing,
_unit_change_create,
reduces=False)
def create_sources_with_sql(schema_editor):
sysuser = get_system_user_id()
cursor = schema_editor.connection.cursor()
cursor.execute(UNIT_SOURCE_SQL % (SubmissionTypes.SYSTEM, sysuser))
def create_changes_with_sql(schema_editor):
cursor = schema_editor.connection.cursor()
cursor.execute(UNIT_CHANGE_SQL % SubmissionTypes.WEB)
def add_unit_sources(apps, schema_editor):
subs = apps.get_model("pootle_statistics.Submission").objects.all()
sysuser = get_system_user_id()
creators = dict(
subs.filter(type=UNIT_CREATE_TYPE)
.exclude(submitter_id=sysuser)
.values_list("unit_id", "submitter"))
if schema_editor.connection.vendor == "mysql" and settings.POOTLE_SQL_MIGRATIONS:
create_sources_with_sql(schema_editor)
update_sources_with_orm(apps, creators)
else:
create_sources_with_orm(apps, creators)
def add_unit_changes(apps, schema_editor):
if schema_editor.connection.vendor == "mysql" and settings.POOTLE_SQL_MIGRATIONS:
create_changes_with_sql(schema_editor)
else:
create_changes_with_orm(apps)
def convert_unit_source_change(apps, schema_editor):
add_unit_sources(apps, schema_editor)
add_unit_changes(apps, schema_editor)
def add_default_suggestion_states(apps, schema_editor):
states = apps.get_model("pootle_store.SuggestionState").objects
for state in ["pending", "accepted", "rejected"]:
states.create(name=state)
def set_suggestion_states(apps, schema_editor):
# TODO: add sql path for this update
suggestions = apps.get_model("pootle_store.Suggestion").objects.all()
states = apps.get_model("pootle_store.SuggestionState").objects.all()
pending = states.get(name="pending")
accepted = states.get(name="accepted")
rejected = states.get(name="rejected")
suggestions.filter(tmp_state=OLDSuggestionStates.PENDING).update(state_id=pending.id)
suggestions.filter(tmp_state=OLDSuggestionStates.ACCEPTED).update(state_id=accepted.id)
suggestions.filter(tmp_state=OLDSuggestionStates.REJECTED).update(state_id=rejected.id)
def clean_abs_file_paths(apps, schema_editor):
"""Replace wrong absolute store file paths by proper relative paths
built based on store.pootle_path values.
"""
store_model = apps.get_model("pootle_store.Store")
stores = store_model.objects.filter(
translation_project__project__treestyle="nongnu")
stores = stores.filter(file__startswith="/").only("file", "pootle_path")
to_update = []
for store in stores.iterator():
lang, prj, d, fn = split_pootle_path(store.pootle_path)
store.file = os.path.join(prj, lang, d, fn)
to_update.append(store)
if to_update:
result = Batch(store_model, batch_size=500).update(
to_update,
update_fields=["file"],
reduces=False)
logger.debug("Cleaned %s store paths" % result)
def remove_fields_with_sql(apps, schema_editor):
Unit = apps.get_model("pootle_store.Unit")
cursor = schema_editor.connection.cursor()
for col in REMOVED_UNIT_COLS:
field = Unit._meta.get_field(col)
if field.remote_field:
fk_names = schema_editor._constraint_names(
Unit, [field.column], foreign_key=True)
for fk_name in fk_names:
cursor.execute(
schema_editor._delete_constraint_sql(
schema_editor.sql_delete_fk, Unit, fk_name))
cursor.execute(
UNIT_DELETE_COLS_SQL
% (", ".join(
[("DROP COLUMN `%s`" % col)
for col in REMOVED_UNIT_COLS])))
def remove_fields(apps, schema_editor):
if schema_editor.connection.vendor == "mysql" and settings.POOTLE_SQL_MIGRATIONS:
remove_fields_with_sql(apps, schema_editor)
class RemoveFieldIfExists(migrations.RemoveField):
def database_forwards(self, app_label, schema_editor, from_state, to_state):
if schema_editor.connection.vendor == "mysql" and settings.POOTLE_SQL_MIGRATIONS:
return
super(RemoveFieldIfExists, self).database_forwards(
app_label, schema_editor, from_state, to_state)
class Migration(migrations.Migration):
replaces = [
(b'pootle_store', '0027_unit_created_by'),
(b'pootle_store', '0028_set_created_by'),
(b'pootle_store', '0029_unit_tablename'),
(b'pootle_store', '0030_store_tablename'),
(b'pootle_store', '0031_suggestion_tablename'),
(b'pootle_store', '0032_qc_tablename'),
(b'pootle_store', '0033_conditionally_remove_unit_created_by'),
(b'pootle_store', '0034_unitsource'),
(b'pootle_store', '0035_set_created_by_again'),
(b'pootle_store', '0036_unitchange'),
(b'pootle_store', '0037_unitsource_fields'),
(b'pootle_store', '0038_suggestion_tmp_state'),
(b'pootle_store', '0039_set_suggestion_tmp_state'),
(b'pootle_store', '0040_remove_suggestion_state'),
(b'pootle_store', '0041_suggestionstate'),
(b'pootle_store', '0042_add_default_suggestion_states'),
(b'pootle_store', '0043_suggestion_state'),
(b'pootle_store', '0044_set_new_suggestion_states'),
(b'pootle_store', '0045_remove_suggestion_tmp_state'),
(b'pootle_store', '0046_unit_source_one_to_one'),
(b'pootle_store', '0047_remove_old_unit_fields'),
(b'pootle_store', '0048_set_change_commented'),
(b'pootle_store', '0049_remove_unit_commented'),
(b'pootle_store', '0050_set_change_reviewed'),
(b'pootle_store', '0051_remove_unit_reviewed'),
(b'pootle_store', '0052_set_change_submitted'),
(b'pootle_store', '0053_remove_unit_submitted'),
(b'pootle_store', '0054_clean_abs_file_paths'),
(b'pootle_store', '0055_fill_unit_source_data')]
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('pootle_store', '0026_suggestion_on_delete_user'),
('pootle_statistics', '0001_initial'),
]
operations = [
migrations.AlterModelTable(
name='unit',
table='pootle_store_unit',
),
migrations.AlterModelTable(
name='store',
table='pootle_store_store',
),
migrations.AlterModelTable(
name='suggestion',
table='pootle_store_suggestion',
),
migrations.AlterModelTable(
name='qualitycheck',
table='pootle_store_qualitycheck',
),
migrations.CreateModel(
name='UnitSource',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_with', models.IntegerField(db_index=True, default=5)),
('created_by', models.ForeignKey(default=pootle.core.user.get_system_user_id, on_delete=models.SET(pootle.core.user.get_system_user), related_name='created_units', to=settings.AUTH_USER_MODEL)),
('unit', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='unit_source', to='pootle_store.Unit')),
('source_hash', models.CharField(editable=False, max_length=32, null=True)),
('source_length', models.SmallIntegerField(default=0, editable=False)),
('source_wordcount', models.SmallIntegerField(default=0, editable=False)),
],
options={
'abstract': False,
'db_table': 'pootle_store_unit_source',
},
),
migrations.CreateModel(
name='UnitChange',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('changed_with', models.IntegerField(db_index=True)),
('submitted_on', models.DateTimeField(db_index=True, null=True)),
('commented_on', models.DateTimeField(db_index=True, null=True)),
('reviewed_on', models.DateTimeField(db_index=True, null=True)),
('commented_by', models.ForeignKey(null=True, on_delete=models.SET(pootle.core.user.get_system_user), related_name='units_commented', to=settings.AUTH_USER_MODEL)),
('reviewed_by', models.ForeignKey(null=True, on_delete=models.SET(pootle.core.user.get_system_user), related_name='units_reviewed', to=settings.AUTH_USER_MODEL)),
('submitted_by', models.ForeignKey(null=True, on_delete=models.SET(pootle.core.user.get_system_user), related_name='units_submitted', to=settings.AUTH_USER_MODEL)),
('unit', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='change', to='pootle_store.Unit')),
],
options={
'abstract': False,
'db_table': 'pootle_store_unit_change',
},
),
migrations.RunPython(convert_unit_source_change),
migrations.RunPython(
remove_fields,
),
RemoveFieldIfExists(
model_name='unit',
name='source_hash',
),
RemoveFieldIfExists(
model_name='unit',
name='source_length',
),
RemoveFieldIfExists(
model_name='unit',
name='source_wordcount',
),
RemoveFieldIfExists(
model_name='unit',
name='commented_by',
),
RemoveFieldIfExists(
model_name='unit',
name='commented_on',
),
RemoveFieldIfExists(
model_name='unit',
name='reviewed_by',
),
RemoveFieldIfExists(
model_name='unit',
name='reviewed_on',
),
RemoveFieldIfExists(
model_name='unit',
name='submitted_by',
),
RemoveFieldIfExists(
model_name='unit',
name='submitted_on',
),
migrations.AlterField(
model_name='unitchange',
name='commented_by',
field=models.ForeignKey(null=True, on_delete=models.SET(pootle.core.user.get_system_user), related_name='commented', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='unitchange',
name='reviewed_by',
field=models.ForeignKey(null=True, on_delete=models.SET(pootle.core.user.get_system_user), related_name='reviewed', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='unitchange',
name='submitted_by',
field=models.ForeignKey(null=True, on_delete=models.SET(pootle.core.user.get_system_user), related_name='submitted', to=settings.AUTH_USER_MODEL),
),
migrations.CreateModel(
name='SuggestionState',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=16)),
],
options={
'abstract': False,
'db_table': 'pootle_store_suggestion_state',
},
),
migrations.RunPython(add_default_suggestion_states),
migrations.RenameField(
model_name='suggestion',
old_name="state",
new_name="tmp_state"),
migrations.AddField(
model_name='suggestion',
name='state',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='suggestions', to='pootle_store.SuggestionState'),
),
migrations.RunPython(set_suggestion_states),
migrations.RemoveField(
model_name='suggestion',
name='tmp_state',
),
migrations.RunPython(clean_abs_file_paths)]
| gpl-3.0 |
sander76/home-assistant | homeassistant/components/xbox/media_player.py | 3 | 7802 | """Xbox Media Player Support."""
from __future__ import annotations
import re
from typing import List
from xbox.webapi.api.client import XboxLiveClient
from xbox.webapi.api.provider.catalog.models import Image
from xbox.webapi.api.provider.smartglass.models import (
PlaybackState,
PowerState,
SmartglassConsole,
SmartglassConsoleList,
VolumeDirection,
)
from homeassistant.components.media_player import MediaPlayerEntity
from homeassistant.components.media_player.const import (
MEDIA_TYPE_APP,
MEDIA_TYPE_GAME,
SUPPORT_BROWSE_MEDIA,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_STEP,
)
from homeassistant.const import STATE_OFF, STATE_ON, STATE_PAUSED, STATE_PLAYING
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from . import ConsoleData, XboxUpdateCoordinator
from .browse_media import build_item_response
from .const import DOMAIN
SUPPORT_XBOX = (
SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
| SUPPORT_PLAY
| SUPPORT_PAUSE
| SUPPORT_VOLUME_STEP
| SUPPORT_VOLUME_MUTE
| SUPPORT_BROWSE_MEDIA
| SUPPORT_PLAY_MEDIA
)
XBOX_STATE_MAP = {
PlaybackState.Playing: STATE_PLAYING,
PlaybackState.Paused: STATE_PAUSED,
PowerState.On: STATE_ON,
PowerState.SystemUpdate: STATE_OFF,
PowerState.ConnectedStandby: STATE_OFF,
PowerState.Off: STATE_OFF,
PowerState.Unknown: None,
}
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up Xbox media_player from a config entry."""
client: XboxLiveClient = hass.data[DOMAIN][entry.entry_id]["client"]
consoles: SmartglassConsoleList = hass.data[DOMAIN][entry.entry_id]["consoles"]
coordinator: XboxUpdateCoordinator = hass.data[DOMAIN][entry.entry_id][
"coordinator"
]
async_add_entities(
[XboxMediaPlayer(client, console, coordinator) for console in consoles.result]
)
class XboxMediaPlayer(CoordinatorEntity, MediaPlayerEntity):
"""Representation of an Xbox Media Player."""
def __init__(
self,
client: XboxLiveClient,
console: SmartglassConsole,
coordinator: XboxUpdateCoordinator,
) -> None:
"""Initialize the Xbox Media Player."""
super().__init__(coordinator)
self.client: XboxLiveClient = client
self._console: SmartglassConsole = console
@property
def name(self):
"""Return the device name."""
return self._console.name
@property
def unique_id(self):
"""Console device ID."""
return self._console.id
@property
def data(self) -> ConsoleData:
"""Return coordinator data for this console."""
return self.coordinator.data.consoles[self._console.id]
@property
def state(self):
"""State of the player."""
status = self.data.status
if status.playback_state in XBOX_STATE_MAP:
return XBOX_STATE_MAP[status.playback_state]
return XBOX_STATE_MAP[status.power_state]
@property
def supported_features(self):
"""Flag media player features that are supported."""
if self.state not in [STATE_PLAYING, STATE_PAUSED]:
return SUPPORT_XBOX & ~SUPPORT_NEXT_TRACK & ~SUPPORT_PREVIOUS_TRACK
return SUPPORT_XBOX
@property
def media_content_type(self):
"""Media content type."""
app_details = self.data.app_details
if app_details and app_details.product_family == "Games":
return MEDIA_TYPE_GAME
return MEDIA_TYPE_APP
@property
def media_title(self):
"""Title of current playing media."""
app_details = self.data.app_details
if not app_details:
return None
return (
app_details.localized_properties[0].product_title
or app_details.localized_properties[0].short_title
)
@property
def media_image_url(self):
"""Image url of current playing media."""
app_details = self.data.app_details
if not app_details:
return None
image = _find_media_image(app_details.localized_properties[0].images)
if not image:
return None
url = image.uri
if url[0] == "/":
url = f"http:{url}"
return url
@property
def media_image_remotely_accessible(self) -> bool:
"""If the image url is remotely accessible."""
return True
async def async_turn_on(self):
"""Turn the media player on."""
await self.client.smartglass.wake_up(self._console.id)
async def async_turn_off(self):
"""Turn the media player off."""
await self.client.smartglass.turn_off(self._console.id)
async def async_mute_volume(self, mute):
"""Mute the volume."""
if mute:
await self.client.smartglass.mute(self._console.id)
else:
await self.client.smartglass.unmute(self._console.id)
async def async_volume_up(self):
"""Turn volume up for media player."""
await self.client.smartglass.volume(self._console.id, VolumeDirection.Up)
async def async_volume_down(self):
"""Turn volume down for media player."""
await self.client.smartglass.volume(self._console.id, VolumeDirection.Down)
async def async_media_play(self):
"""Send play command."""
await self.client.smartglass.play(self._console.id)
async def async_media_pause(self):
"""Send pause command."""
await self.client.smartglass.pause(self._console.id)
async def async_media_previous_track(self):
"""Send previous track command."""
await self.client.smartglass.previous(self._console.id)
async def async_media_next_track(self):
"""Send next track command."""
await self.client.smartglass.next(self._console.id)
async def async_browse_media(self, media_content_type=None, media_content_id=None):
"""Implement the websocket media browsing helper."""
return await build_item_response(
self.client,
self._console.id,
self.data.status.is_tv_configured,
media_content_type,
media_content_id,
)
async def async_play_media(self, media_type, media_id, **kwargs):
"""Launch an app on the Xbox."""
if media_id == "Home":
await self.client.smartglass.go_home(self._console.id)
elif media_id == "TV":
await self.client.smartglass.show_tv_guide(self._console.id)
else:
await self.client.smartglass.launch_app(self._console.id, media_id)
@property
def device_info(self):
"""Return a device description for device registry."""
# Turns "XboxOneX" into "Xbox One X" for display
matches = re.finditer(
".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)",
self._console.console_type,
)
model = " ".join([m.group(0) for m in matches])
return {
"identifiers": {(DOMAIN, self._console.id)},
"name": self._console.name,
"manufacturer": "Microsoft",
"model": model,
}
def _find_media_image(images=List[Image]) -> Image | None:
purpose_order = ["FeaturePromotionalSquareArt", "Tile", "Logo", "BoxArt"]
for purpose in purpose_order:
for image in images:
if (
image.image_purpose == purpose
and image.width == image.height
and image.width >= 300
):
return image
return None
| apache-2.0 |
xuegang/gpdb | src/test/tinc/tincrepo/mpp/gpdb/tests/storage/aoco_compression/test_runsqls.py | 9 | 29719 | """
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import platform
import shutil
import tinctest
from mpp.models import SQLTestCase
from mpp.lib import mppUtil
from mpp.lib.config import GPDBConfig
from mpp.lib.GPFDIST import GPFDIST
'''
Class to generate and run the sqls
'''
@tinctest.skipLoading('scenario')
class co_create_storage_directive_small(SQLTestCase):
"""
@description: Create tables with storage_directive
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_create_storage_directive/small/'
ans_dir = 'expected/co_create_storage_directive/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class co_create_storage_directive_large_1G_zlib(SQLTestCase):
"""
@description: Create tables with storage_directive
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_create_storage_directive/large_1G_zlib/'
ans_dir = 'expected/co_create_storage_directive/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class co_create_storage_directive_large_1G_zlib_2(SQLTestCase):
"""
@description: Create tables with storage_directive
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_create_storage_directive/large_1G_zlib_2/'
ans_dir = 'expected/co_create_storage_directive/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class co_create_storage_directive_large_1G_quick_rle(SQLTestCase):
"""
@description: Create tables with storage_directive
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_create_storage_directive/large_1G_quick_rle/'
ans_dir = 'expected/co_create_storage_directive/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class co_create_storage_directive_large_2G_zlib(SQLTestCase):
"""
@description: Create tables with storage_directive
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_create_storage_directive/large_2G_zlib/'
ans_dir = 'expected/co_create_storage_directive/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class co_create_storage_directive_large_2G_zlib_2(SQLTestCase):
"""
@description: Create tables with storage_directive
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_create_storage_directive/large_2G_zlib_2/'
ans_dir = 'expected/co_create_storage_directive/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class co_create_storage_directive_large_2G_quick_rle(SQLTestCase):
"""
@description: Create tables with storage_directive
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_create_storage_directive/large_2G_quick_rle/'
ans_dir = 'expected/co_create_storage_directive/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class co_create_column_reference_default_small(SQLTestCase):
"""
@description: Create tables with default column_reference
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_create_column_reference_default/small/'
ans_dir = 'expected/co_create_column_reference_default/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class co_create_column_reference_default_large_1G_zlib(SQLTestCase):
"""
@description: Create tables with default column_reference
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_create_column_reference_default/large_1G_zlib/'
ans_dir = 'expected/co_create_column_reference_default/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class co_create_column_reference_default_large_1G_zlib_2(SQLTestCase):
"""
@description: Create tables with default column_reference
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_create_column_reference_default/large_1G_zlib_2/'
ans_dir = 'expected/co_create_column_reference_default/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class co_create_column_reference_default_large_1G_quick_rle(SQLTestCase):
"""
@description: Create tables with default column_reference
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_create_column_reference_default/large_1G_quick_rle/'
ans_dir = 'expected/co_create_column_reference_default/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class co_create_column_reference_default_large_2G_zlib(SQLTestCase):
"""
@description: Create tables with default column_reference
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_create_column_reference_default/large_2G_zlib/'
ans_dir = 'expected/co_create_column_reference_default/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class co_create_column_reference_default_large_2G_zlib_2(SQLTestCase):
"""
@description: Create tables with default column_reference
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_create_column_reference_default/large_2G_zlib_2/'
ans_dir = 'expected/co_create_column_reference_default/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class co_create_column_reference_default_large_2G_quick_rle(SQLTestCase):
"""
@description: Create tables with default column_reference
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_create_column_reference_default/large_2G_quick_rle/'
ans_dir = 'expected/co_create_column_reference_default/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class co_create_column_reference_column_small(SQLTestCase):
"""
@description: Create table wth column_reference for each column
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_create_column_reference_column/small/'
ans_dir = 'expected/co_create_column_reference_column/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class co_create_column_reference_column_large_1G_zlib(SQLTestCase):
"""
@description: Create table wth column_reference for each column
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_create_column_reference_column/large_1G_zlib/'
ans_dir = 'expected/co_create_column_reference_column/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class co_create_column_reference_column_large_1G_zlib_2(SQLTestCase):
"""
@description: Create table wth column_reference for each column
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_create_column_reference_column/large_1G_zlib_2/'
ans_dir = 'expected/co_create_column_reference_column/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class co_create_column_reference_column_large_1G_quick_rle(SQLTestCase):
"""
@description: Create table wth column_reference for each column
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_create_column_reference_column/large_1G_quick_rle/'
ans_dir = 'expected/co_create_column_reference_column/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class co_create_column_reference_column_large_2G_zlib(SQLTestCase):
"""
@description: Create table wth column_reference for each column
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_create_column_reference_column/large_2G_zlib/'
ans_dir = 'expected/co_create_column_reference_column/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class co_create_column_reference_column_large_2G_zlib_2(SQLTestCase):
"""
@description: Create table wth column_reference for each column
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_create_column_reference_column/large_2G_zlib_2/'
ans_dir = 'expected/co_create_column_reference_column/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class co_create_column_reference_column_large_2G_quick_rle(SQLTestCase):
"""
@description: Create table wth column_reference for each column
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_create_column_reference_column/large_2G_quick_rle/'
ans_dir = 'expected/co_create_column_reference_column/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class ao_create_with_row_small(SQLTestCase):
"""
@description: Create tables with compression attributes in with clause - AO tables
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'ao_create_with_row/small/'
ans_dir = 'expected/ao_create_with_row/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class ao_create_with_row_large_1G_zlib(SQLTestCase):
"""
@description: Create tables with compression attributes in with clause - AO tables
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'ao_create_with_row/large_1G_zlib/'
ans_dir = 'expected/ao_create_with_row/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class ao_create_with_row_large_1G_zlib_2(SQLTestCase):
"""
@description: Create tables with compression attributes in with clause - AO tables
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'ao_create_with_row/large_1G_zlib_2/'
ans_dir = 'expected/ao_create_with_row/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class ao_create_with_row_large_1G_quick_rle(SQLTestCase):
"""
@description: Create tables with compression attributes in with clause - AO tables
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'ao_create_with_row/large_1G_quick_rle/'
ans_dir = 'expected/ao_create_with_row/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class ao_create_with_row_large_2G_zlib(SQLTestCase):
"""
@description: Create tables with compression attributes in with clause - AO tables
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'ao_create_with_row/large_2G_zlib/'
ans_dir = 'expected/ao_create_with_row/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class ao_create_with_row_large_2G_zlib_2(SQLTestCase):
"""
@description: Create tables with compression attributes in with clause - AO tables
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'ao_create_with_row/large_2G_zlib_2/'
ans_dir = 'expected/ao_create_with_row/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class ao_create_with_row_large_2G_quick_rle(SQLTestCase):
"""
@description: Create tables with compression attributes in with clause - AO tables
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'ao_create_with_row/large_2G_quick_rle/'
ans_dir = 'expected/ao_create_with_row/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class co_create_with_column_small(SQLTestCase):
"""
@description: Create tables with compression attributes in with clause -CO tables
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_create_with_column/small/'
ans_dir = 'expected/co_create_with_column/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class co_create_with_column_large_1G_zlib(SQLTestCase):
"""
@description: Create tables with compression attributes in with clause -CO tables
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_create_with_column/large_1G_zlib/'
ans_dir = 'expected/co_create_with_column/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class co_create_with_column_large_1G_zlib_2(SQLTestCase):
"""
@description: Create tables with compression attributes in with clause -CO tables
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_create_with_column/large_1G_zlib_2/'
ans_dir = 'expected/co_create_with_column/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class co_create_with_column_large_1G_quick_rle(SQLTestCase):
"""
@description: Create tables with compression attributes in with clause -CO tables
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_create_with_column/large_1G_quick_rle/'
ans_dir = 'expected/co_create_with_column/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class co_create_with_column_large_2G_zlib(SQLTestCase):
"""
@description: Create tables with compression attributes in with clause -CO tables
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_create_with_column/large_2G_zlib/'
ans_dir = 'expected/co_create_with_column/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class co_create_with_column_large_2G_zlib_2(SQLTestCase):
"""
@description: Create tables with compression attributes in with clause -CO tables
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_create_with_column/large_2G_zlib_2/'
ans_dir = 'expected/co_create_with_column/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class co_create_with_column_large_2G_quick_rle(SQLTestCase):
"""
@description: Create tables with compression attributes in with clause -CO tables
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_create_with_column/large_2G_quick_rle/'
ans_dir = 'expected/co_create_with_column/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class ao_create_with_row_part_small(SQLTestCase):
"""
@description: Create tables with compression attributes in with clause at partition level - AO tables
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'ao_create_with_row_part/small/'
ans_dir = 'expected/ao_create_with_row_part/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class ao_create_with_row_part_large_1G_zlib(SQLTestCase):
"""
@description: Create tables with compression attributes in with clause at partition level - AO tables
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'ao_create_with_row_part/large_1G_zlib/'
ans_dir = 'expected/ao_create_with_row_part/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class ao_create_with_row_part_large_1G_zlib_2(SQLTestCase):
"""
@description: Create tables with compression attributes in with clause at partition level - AO tables
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'ao_create_with_row_part/large_1G_zlib_2/'
ans_dir = 'expected/ao_create_with_row_part/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class ao_create_with_row_part_large_1G_quick_rle(SQLTestCase):
"""
@description: Create tables with compression attributes in with clause at partition level - AO tables
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'ao_create_with_row_part/large_1G_quick_rle/'
ans_dir = 'expected/ao_create_with_row_part/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class ao_create_with_row_part_large_2G_zlib(SQLTestCase):
"""
@description: Create tables with compression attributes in with clause at partition level - AO tables
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'ao_create_with_row_part/large_2G_zlib/'
ans_dir = 'expected/ao_create_with_row_part/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class ao_create_with_row_part_large_2G_zlib_2(SQLTestCase):
"""
@description: Create tables with compression attributes in with clause at partition level - AO tables
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'ao_create_with_row_part/large_2G_zlib_2/'
ans_dir = 'expected/ao_create_with_row_part/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class ao_create_with_row_part_large_2G_quick_rle(SQLTestCase):
"""
@description: Create tables with compression attributes in with clause at partition level - AO tables
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'ao_create_with_row_part/large_2G_quick_rle/'
ans_dir = 'expected/ao_create_with_row_part/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class ao_create_with_row_sub_part_small(SQLTestCase):
"""
@description: Create tables with compression attributes in with clause at sub-partition level - AO tables
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'ao_create_with_row_sub_part/small/'
ans_dir = 'expected/ao_create_with_row_sub_part/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class co_create_with_column_part_small(SQLTestCase):
"""
@description: Create tables with compression attributes in with clause at partition level - CO tables
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_create_with_column_part/small/'
ans_dir = 'expected/co_create_with_column_part/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class co_create_with_column_part_large_1G_zlib(SQLTestCase):
"""
@description: Create tables with compression attributes in with clause at partition level - CO tables
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_create_with_column_part/large_1G_zlib/'
ans_dir = 'expected/co_create_with_column_part/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class co_create_with_column_part_large_1G_zlib_2(SQLTestCase):
"""
@description: Create tables with compression attributes in with clause at partition level - CO tables
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_create_with_column_part/large_1G_zlib_2/'
ans_dir = 'expected/co_create_with_column_part/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class co_create_with_column_part_large_1G_quick_rle(SQLTestCase):
"""
@description: Create tables with compression attributes in with clause at partition level - CO tables
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_create_with_column_part/large_1G_quick_rle/'
ans_dir = 'expected/co_create_with_column_part/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class co_create_with_column_part_large_2G_zlib(SQLTestCase):
"""
@description: Create tables with compression attributes in with clause at partition level - CO tables
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_create_with_column_part/large_2G_zlib/'
ans_dir = 'expected/co_create_with_column_part/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class co_create_with_column_part_large_2G_zlib_2(SQLTestCase):
"""
@description: Create tables with compression attributes in with clause at partition level - CO tables
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_create_with_column_part/large_2G_zlib_2/'
ans_dir = 'expected/co_create_with_column_part/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class co_create_with_column_part_large_2G_quick_rle(SQLTestCase):
"""
@description: Create tables with compression attributes in with clause at partition level - CO tables
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_create_with_column_part/large_2G_quick_rle/'
ans_dir = 'expected/co_create_with_column_part/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class co_create_with_column_sub_part_small(SQLTestCase):
"""
@description: Create tables with compression attributes in with clause at sub-partition level - CO tables
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_create_with_column_sub_part/small/'
ans_dir = 'expected/co_create_with_column_sub_part/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class co_create_column_reference_column_part_small(SQLTestCase):
"""
@description: Create tables with column reference at partiiton level
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_create_column_reference_column_part/small/'
ans_dir = 'expected/co_create_column_reference_column_part/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class co_create_column_reference_column_part_large_1G_zlib(SQLTestCase):
"""
@description: Create tables with column reference at partiiton level
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_create_column_reference_column_part/large_1G_zlib/'
ans_dir = 'expected/co_create_column_reference_column_part/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class co_create_column_reference_column_part_large_1G_zlib_2(SQLTestCase):
"""
@description: Create tables with column reference at partiiton level
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_create_column_reference_column_part/large_1G_zlib_2/'
ans_dir = 'expected/co_create_column_reference_column_part/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class co_create_column_reference_column_part_large_1G_quick_rle(SQLTestCase):
"""
@description: Create tables with column reference at partiiton level
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_create_column_reference_column_part/large_1G_quick_rle/'
ans_dir = 'expected/co_create_column_reference_column_part/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class co_create_column_reference_column_part_large_2G_zlib(SQLTestCase):
"""
@description: Create tables with column reference at partiiton level
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_create_column_reference_column_part/large_2G_zlib/'
ans_dir = 'expected/co_create_column_reference_column_part/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class co_create_column_reference_column_part_large_2G_zlib_2(SQLTestCase):
"""
@description: Create tables with column reference at partiiton level
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_create_column_reference_column_part/large_2G_zlib_2/'
ans_dir = 'expected/co_create_column_reference_column_part/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class co_create_column_reference_column_part_large_2G_quick_rle(SQLTestCase):
"""
@description: Create tables with column reference at partiiton level
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_create_column_reference_column_part/large_2G_quick_rle/'
ans_dir = 'expected/co_create_column_reference_column_part/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class co_create_column_reference_column_sub_part_small(SQLTestCase):
"""
@description: Create table with column reference at sub-partition level
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_create_column_reference_column_sub_part/small/'
ans_dir = 'expected/co_create_column_reference_column_sub_part/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class create_col_with_diff_column_reference(SQLTestCase):
"""
@description: Create table with each column having different
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
compresstypes in column reference
"""
sql_dir = 'create_col_with_diff_column_reference/'
ans_dir = 'expected/create_col_with_diff_column_reference/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class create_col_with_diff_storage_directive(SQLTestCase):
"""
@description: Create table with each column having different
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
compresstypes in storage_directive
"""
sql_dir = 'create_col_with_diff_storage_directive/'
ans_dir = 'expected/create_col_with_diff_storage_directive/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class create_col_with_storage_directive_and_col_ref(SQLTestCase):
"""
@description: Create table with storage_directive and column_reference
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'create_col_with_storage_directive_and_col_ref/'
ans_dir = 'expected/create_col_with_storage_directive_and_col_ref/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class co_alter_table_add(SQLTestCase):
"""
@description: Alter table add new column with compression
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_alter_table_add/'
ans_dir = 'expected/co_alter_table_add/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class co_alter_type(SQLTestCase):
"""
@description: Alter a built in datatype to use compression
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_alter_type/'
ans_dir = 'expected/co_alter_type/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class co_create_type(SQLTestCase):
"""
@description: Create type with compresstypes
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'co_create_type/'
ans_dir = 'expected/co_create_type/'
out_dir = 'output/'
@tinctest.skipLoading('scenario')
class other_tests(SQLTestCase):
"""
@description: Other AO and CO related tests
@gucs gp_create_table_random_default_distribution=off
@product_version gpdb: [4.3-]
"""
sql_dir = 'other_tests/'
ans_dir = 'expected/other_tests/'
out_dir = 'output/'
@classmethod
def setUpClass(cls):
super(other_tests, cls).setUpClass()
source_dir = cls.get_source_dir()
config = GPDBConfig()
host, _ = config.get_hostandport_of_segment(0)
port = mppUtil.getOpenPort(8080)
tinctest.logger.info("gpfdist host = {0}, port = {1}".format(host, port))
data_dir = os.path.join(source_dir, 'data')
cls.gpfdist = GPFDIST(port, host, directory=data_dir)
cls.gpfdist.startGpfdist()
data_out_dir = os.path.join(data_dir, 'output')
shutil.rmtree(data_out_dir, ignore_errors=True)
os.mkdir(data_out_dir)
@classmethod
def tearDownClass(cls):
cls.gpfdist.killGpfdist()
super(other_tests, cls).tearDownClass()
| apache-2.0 |
crazy-canux/django | django/db/backends/postgresql/base.py | 143 | 10451 | """
PostgreSQL database backend for Django.
Requires psycopg 2: http://initd.org/projects/psycopg2
"""
import warnings
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import DEFAULT_DB_ALIAS
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.backends.base.validation import BaseDatabaseValidation
from django.db.utils import DatabaseError as WrappedDatabaseError
from django.utils.encoding import force_str
from django.utils.functional import cached_property
from django.utils.safestring import SafeBytes, SafeText
try:
import psycopg2 as Database
import psycopg2.extensions
import psycopg2.extras
except ImportError as e:
raise ImproperlyConfigured("Error loading psycopg2 module: %s" % e)
def psycopg2_version():
version = psycopg2.__version__.split(' ', 1)[0]
return tuple(int(v) for v in version.split('.') if v.isdigit())
PSYCOPG2_VERSION = psycopg2_version()
if PSYCOPG2_VERSION < (2, 4, 5):
raise ImproperlyConfigured("psycopg2_version 2.4.5 or newer is required; you have %s" % psycopg2.__version__)
# Some of these import psycopg2, so import them after checking if it's installed.
from .client import DatabaseClient # isort:skip
from .creation import DatabaseCreation # isort:skip
from .features import DatabaseFeatures # isort:skip
from .introspection import DatabaseIntrospection # isort:skip
from .operations import DatabaseOperations # isort:skip
from .schema import DatabaseSchemaEditor # isort:skip
from .utils import utc_tzinfo_factory # isort:skip
from .version import get_version # isort:skip
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)
psycopg2.extensions.register_adapter(SafeBytes, psycopg2.extensions.QuotedString)
psycopg2.extensions.register_adapter(SafeText, psycopg2.extensions.QuotedString)
psycopg2.extras.register_uuid()
# Register support for inet[] manually so we don't have to handle the Inet()
# object on load all the time.
INETARRAY_OID = 1041
INETARRAY = psycopg2.extensions.new_array_type(
(INETARRAY_OID,),
'INETARRAY',
psycopg2.extensions.UNICODE,
)
psycopg2.extensions.register_type(INETARRAY)
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'postgresql'
# This dictionary maps Field objects to their associated PostgreSQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
data_types = {
'AutoField': 'serial',
'BinaryField': 'bytea',
'BooleanField': 'boolean',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'timestamp with time zone',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'DurationField': 'interval',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'inet',
'GenericIPAddressField': 'inet',
'NullBooleanField': 'boolean',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer',
'PositiveSmallIntegerField': 'smallint',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
'UUIDField': 'uuid',
}
data_type_check_constraints = {
'PositiveIntegerField': '"%(column)s" >= 0',
'PositiveSmallIntegerField': '"%(column)s" >= 0',
}
operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': 'LIKE %s',
'icontains': 'LIKE UPPER(%s)',
'regex': '~ %s',
'iregex': '~* %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE %s',
'endswith': 'LIKE %s',
'istartswith': 'LIKE UPPER(%s)',
'iendswith': 'LIKE UPPER(%s)',
}
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, *, _) should be
# escaped on database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')"
pattern_ops = {
'contains': "LIKE '%%' || {} || '%%'",
'icontains': "LIKE '%%' || UPPER({}) || '%%'",
'startswith': "LIKE {} || '%%'",
'istartswith': "LIKE UPPER({}) || '%%'",
'endswith': "LIKE '%%' || {}",
'iendswith': "LIKE '%%' || UPPER({})",
}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def get_connection_params(self):
settings_dict = self.settings_dict
# None may be used to connect to the default 'postgres' db
if settings_dict['NAME'] == '':
raise ImproperlyConfigured(
"settings.DATABASES is improperly configured. "
"Please supply the NAME value.")
conn_params = {
'database': settings_dict['NAME'] or 'postgres',
}
conn_params.update(settings_dict['OPTIONS'])
conn_params.pop('isolation_level', None)
if settings_dict['USER']:
conn_params['user'] = settings_dict['USER']
if settings_dict['PASSWORD']:
conn_params['password'] = force_str(settings_dict['PASSWORD'])
if settings_dict['HOST']:
conn_params['host'] = settings_dict['HOST']
if settings_dict['PORT']:
conn_params['port'] = settings_dict['PORT']
return conn_params
def get_new_connection(self, conn_params):
connection = Database.connect(**conn_params)
# self.isolation_level must be set:
# - after connecting to the database in order to obtain the database's
# default when no value is explicitly specified in options.
# - before calling _set_autocommit() because if autocommit is on, that
# will set connection.isolation_level to ISOLATION_LEVEL_AUTOCOMMIT.
options = self.settings_dict['OPTIONS']
try:
self.isolation_level = options['isolation_level']
except KeyError:
self.isolation_level = connection.isolation_level
else:
# Set the isolation level to the value from OPTIONS.
if self.isolation_level != connection.isolation_level:
connection.set_session(isolation_level=self.isolation_level)
return connection
def init_connection_state(self):
self.connection.set_client_encoding('UTF8')
conn_timezone_name = self.connection.get_parameter_status('TimeZone')
if conn_timezone_name != self.timezone_name:
cursor = self.connection.cursor()
try:
cursor.execute(self.ops.set_time_zone_sql(), [self.timezone_name])
finally:
cursor.close()
# Commit after setting the time zone (see #17062)
if not self.get_autocommit():
self.connection.commit()
def create_cursor(self):
cursor = self.connection.cursor()
cursor.tzinfo_factory = utc_tzinfo_factory if settings.USE_TZ else None
return cursor
def _set_autocommit(self, autocommit):
with self.wrap_database_errors:
self.connection.autocommit = autocommit
def check_constraints(self, table_names=None):
"""
To check constraints, we set constraints to immediate. Then, when, we're done we must ensure they
are returned to deferred.
"""
self.cursor().execute('SET CONSTRAINTS ALL IMMEDIATE')
self.cursor().execute('SET CONSTRAINTS ALL DEFERRED')
def is_usable(self):
try:
# Use a psycopg cursor directly, bypassing Django's utilities.
self.connection.cursor().execute("SELECT 1")
except Database.Error:
return False
else:
return True
@property
def _nodb_connection(self):
nodb_connection = super(DatabaseWrapper, self)._nodb_connection
try:
nodb_connection.ensure_connection()
except (DatabaseError, WrappedDatabaseError):
warnings.warn(
"Normally Django will use a connection to the 'postgres' database "
"to avoid running initialization queries against the production "
"database when it's not needed (for example, when running tests). "
"Django was unable to create a connection to the 'postgres' database "
"and will use the default database instead.",
RuntimeWarning
)
settings_dict = self.settings_dict.copy()
settings_dict['NAME'] = settings.DATABASES[DEFAULT_DB_ALIAS]['NAME']
nodb_connection = self.__class__(
self.settings_dict.copy(),
alias=self.alias,
allow_thread_sharing=False)
return nodb_connection
@cached_property
def psycopg2_version(self):
return PSYCOPG2_VERSION
@cached_property
def pg_version(self):
with self.temporary_connection():
return get_version(self.connection)
| bsd-3-clause |
jamesrobertlloyd/gpss-research | experiments/2014-01-09-radio.py | 4 | 1466 | Experiment(description='Trying to recreate old results using latest code',
data_dir='../data/radio/',
max_depth=4,
random_order=False,
k=1,
debug=False,
local_computation=False,
n_rand=9,
sd=2,
jitter_sd=0.1,
max_jobs=200,
verbose=False,
make_predictions=False,
skip_complete=True,
results_dir='../results/2014-01-09-radio/',
iters=250,
base_kernels='SE,Per,Lin,Const,Noise',
random_seed=1,
period_heuristic=3,
period_heuristic_type='min',
max_period_heuristic=1.5, # Encourage it to see periodicity
subset=True,
subset_size=250,
full_iters=10,
bundle_size=5,
additive_form=True,
mean='ff.MeanZero()', # Starting mean
kernel='ff.SumKernel(operands=[ff.NoiseKernel(), ff.ConstKernel(), ff.SqExpKernel(dimension=0), ff.ProductKernel(operands=[ff.PeriodicKernel(dimension=0, lengthscale=0.334902, period=0.000316), ff.PeriodicKernel(dimension=0, lengthscale=1.108831, period=2.296433), ff.SqExpKernel(dimension=0)])])', # Starting kernel
lik='ff.LikGauss(sf=-np.Inf)', # Starting likelihood
score='bic',
search_operators=[('A', 'B', {'A': 'kernel', 'B': 'base'}),
('A', ('None',), {'A': 'kernel'})]) | mit |
anant-dev/django | tests/model_meta/tests.py | 84 | 11711 | from django.apps import apps
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.core.exceptions import FieldDoesNotExist
from django.db.models.fields import CharField, Field, related
from django.db.models.options import EMPTY_RELATION_TREE, IMMUTABLE_WARNING
from django.test import SimpleTestCase
from .models import (
AbstractPerson, BasePerson, Child, CommonAncestor, FirstParent, Person,
ProxyPerson, Relating, Relation, SecondParent,
)
from .results import TEST_RESULTS
class OptionsBaseTests(SimpleTestCase):
def _map_related_query_names(self, res):
return tuple((o.name, m) for o, m in res)
def _map_names(self, res):
return tuple((f.name, m) for f, m in res)
def _model(self, current_model, field):
model = field.model._meta.concrete_model
return None if model == current_model else model
def _details(self, current_model, relation):
direct = isinstance(relation, Field) or isinstance(relation, GenericForeignKey)
model = relation.model._meta.concrete_model
if model == current_model:
model = None
field = relation if direct else relation.field
m2m = isinstance(field, related.ManyToManyField)
return relation, model, direct, m2m
class GetFieldsTests(OptionsBaseTests):
def test_get_fields_is_immutable(self):
msg = IMMUTABLE_WARNING % "get_fields()"
for _ in range(2):
# Running unit test twice to ensure both non-cached and cached result
# are immutable.
fields = Person._meta.get_fields()
with self.assertRaisesMessage(AttributeError, msg):
fields += ["errors"]
class LabelTests(OptionsBaseTests):
def test_label(self):
for model, expected_result in TEST_RESULTS['labels'].items():
self.assertEqual(model._meta.label, expected_result)
def test_label_lower(self):
for model, expected_result in TEST_RESULTS['lower_labels'].items():
self.assertEqual(model._meta.label_lower, expected_result)
class DataTests(OptionsBaseTests):
def test_fields(self):
for model, expected_result in TEST_RESULTS['fields'].items():
fields = model._meta.fields
self.assertEqual([f.attname for f in fields], expected_result)
def test_local_fields(self):
is_data_field = lambda f: isinstance(f, Field) and not isinstance(f, related.ManyToManyField)
for model, expected_result in TEST_RESULTS['local_fields'].items():
fields = model._meta.local_fields
self.assertEqual([f.attname for f in fields], expected_result)
for f in fields:
self.assertEqual(f.model, model)
self.assertTrue(is_data_field(f))
def test_local_concrete_fields(self):
for model, expected_result in TEST_RESULTS['local_concrete_fields'].items():
fields = model._meta.local_concrete_fields
self.assertEqual([f.attname for f in fields], expected_result)
for f in fields:
self.assertIsNotNone(f.column)
class M2MTests(OptionsBaseTests):
def test_many_to_many(self):
for model, expected_result in TEST_RESULTS['many_to_many'].items():
fields = model._meta.many_to_many
self.assertEqual([f.attname for f in fields], expected_result)
for f in fields:
self.assertTrue(f.many_to_many and f.is_relation)
def test_many_to_many_with_model(self):
for model, expected_result in TEST_RESULTS['many_to_many_with_model'].items():
models = [self._model(model, field) for field in model._meta.many_to_many]
self.assertEqual(models, expected_result)
class RelatedObjectsTests(OptionsBaseTests):
key_name = lambda self, r: r[0]
def test_related_objects(self):
result_key = 'get_all_related_objects_with_model'
for model, expected in TEST_RESULTS[result_key].items():
objects = [
(field, self._model(model, field))
for field in model._meta.get_fields()
if field.auto_created and not field.concrete
]
self.assertEqual(self._map_related_query_names(objects), expected)
def test_related_objects_local(self):
result_key = 'get_all_related_objects_with_model_local'
for model, expected in TEST_RESULTS[result_key].items():
objects = [
(field, self._model(model, field))
for field in model._meta.get_fields(include_parents=False)
if field.auto_created and not field.concrete
]
self.assertEqual(self._map_related_query_names(objects), expected)
def test_related_objects_include_hidden(self):
result_key = 'get_all_related_objects_with_model_hidden'
for model, expected in TEST_RESULTS[result_key].items():
objects = [
(field, self._model(model, field))
for field in model._meta.get_fields(include_hidden=True)
if field.auto_created and not field.concrete
]
self.assertEqual(
sorted(self._map_names(objects), key=self.key_name),
sorted(expected, key=self.key_name)
)
def test_related_objects_include_hidden_local_only(self):
result_key = 'get_all_related_objects_with_model_hidden_local'
for model, expected in TEST_RESULTS[result_key].items():
objects = [
(field, self._model(model, field))
for field in model._meta.get_fields(include_hidden=True, include_parents=False)
if field.auto_created and not field.concrete
]
self.assertEqual(
sorted(self._map_names(objects), key=self.key_name),
sorted(expected, key=self.key_name)
)
class VirtualFieldsTests(OptionsBaseTests):
def test_virtual_fields(self):
for model, expected_names in TEST_RESULTS['virtual_fields'].items():
objects = model._meta.virtual_fields
self.assertEqual(sorted([f.name for f in objects]), sorted(expected_names))
class GetFieldByNameTests(OptionsBaseTests):
def test_get_data_field(self):
field_info = self._details(Person, Person._meta.get_field('data_abstract'))
self.assertEqual(field_info[1:], (BasePerson, True, False))
self.assertIsInstance(field_info[0], CharField)
def test_get_m2m_field(self):
field_info = self._details(Person, Person._meta.get_field('m2m_base'))
self.assertEqual(field_info[1:], (BasePerson, True, True))
self.assertIsInstance(field_info[0], related.ManyToManyField)
def test_get_related_object(self):
field_info = self._details(Person, Person._meta.get_field('relating_baseperson'))
self.assertEqual(field_info[1:], (BasePerson, False, False))
self.assertIsInstance(field_info[0], related.ForeignObjectRel)
def test_get_related_m2m(self):
field_info = self._details(Person, Person._meta.get_field('relating_people'))
self.assertEqual(field_info[1:], (None, False, True))
self.assertIsInstance(field_info[0], related.ForeignObjectRel)
def test_get_generic_relation(self):
field_info = self._details(Person, Person._meta.get_field('generic_relation_base'))
self.assertEqual(field_info[1:], (None, True, False))
self.assertIsInstance(field_info[0], GenericRelation)
def test_get_fields_only_searches_forward_on_apps_not_ready(self):
opts = Person._meta
# If apps registry is not ready, get_field() searches over only
# forward fields.
opts.apps.models_ready = False
try:
# 'data_abstract' is a forward field, and therefore will be found
self.assertTrue(opts.get_field('data_abstract'))
msg = (
"Person has no field named 'relating_baseperson'. The app "
"cache isn't ready yet, so if this is an auto-created related "
"field, it won't be available yet."
)
# 'data_abstract' is a reverse field, and will raise an exception
with self.assertRaisesMessage(FieldDoesNotExist, msg):
opts.get_field('relating_baseperson')
finally:
opts.apps.models_ready = True
class RelationTreeTests(SimpleTestCase):
all_models = (Relation, AbstractPerson, BasePerson, Person, ProxyPerson, Relating)
def setUp(self):
apps.clear_cache()
def test_clear_cache_clears_relation_tree(self):
# The apps.clear_cache is setUp() should have deleted all trees.
# Exclude abstract models that are not included in the Apps registry
# and have no cache.
all_models_with_cache = (m for m in self.all_models if not m._meta.abstract)
for m in all_models_with_cache:
self.assertNotIn('_relation_tree', m._meta.__dict__)
def test_first_relation_tree_access_populates_all(self):
# On first access, relation tree should have populated cache.
self.assertTrue(self.all_models[0]._meta._relation_tree)
# AbstractPerson does not have any relations, so relation_tree
# should just return an EMPTY_RELATION_TREE.
self.assertEqual(AbstractPerson._meta._relation_tree, EMPTY_RELATION_TREE)
# All the other models should already have their relation tree
# in the internal __dict__ .
all_models_but_abstractperson = (m for m in self.all_models if m is not AbstractPerson)
for m in all_models_but_abstractperson:
self.assertIn('_relation_tree', m._meta.__dict__)
def test_relations_related_objects(self):
# Testing non hidden related objects
self.assertEqual(
sorted([field.related_query_name() for field in Relation._meta._relation_tree
if not field.remote_field.field.remote_field.is_hidden()]),
sorted([
'fk_abstract_rel', 'fk_base_rel', 'fk_concrete_rel', 'fo_abstract_rel',
'fo_base_rel', 'fo_concrete_rel', 'm2m_abstract_rel',
'm2m_base_rel', 'm2m_concrete_rel'
])
)
# Testing hidden related objects
self.assertEqual(
sorted([field.related_query_name() for field in BasePerson._meta._relation_tree]),
sorted([
'+', '_relating_basepeople_hidden_+', 'BasePerson_following_abstract+',
'BasePerson_following_abstract+', 'BasePerson_following_base+', 'BasePerson_following_base+',
'BasePerson_friends_abstract+', 'BasePerson_friends_abstract+', 'BasePerson_friends_base+',
'BasePerson_friends_base+', 'BasePerson_m2m_abstract+', 'BasePerson_m2m_base+', 'Relating_basepeople+',
'Relating_basepeople_hidden+', 'followers_abstract', 'followers_base', 'friends_abstract_rel_+',
'friends_base_rel_+', 'person', 'relating_basepeople', 'relating_baseperson',
])
)
self.assertEqual([field.related_query_name() for field in AbstractPerson._meta._relation_tree], [])
class ParentListTests(SimpleTestCase):
def test_get_parent_list(self):
self.assertEqual(CommonAncestor._meta.get_parent_list(), [])
self.assertEqual(FirstParent._meta.get_parent_list(), [CommonAncestor])
self.assertEqual(SecondParent._meta.get_parent_list(), [CommonAncestor])
self.assertEqual(Child._meta.get_parent_list(), [FirstParent, SecondParent, CommonAncestor])
| bsd-3-clause |
espdev/readthedocs.org | readthedocs/projects/migrations/0001_initial.py | 36 | 13984 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('taggit', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='EmailHook',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('email', models.EmailField(max_length=254)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('pub_date', models.DateTimeField(auto_now_add=True, verbose_name='Publication date')),
('modified_date', models.DateTimeField(auto_now=True, verbose_name='Modified date')),
('name', models.CharField(max_length=255, verbose_name='Name')),
('slug', models.SlugField(unique=True, max_length=255, verbose_name='Slug')),
('description', models.TextField(help_text='The reStructuredText description of the project', verbose_name='Description', blank=True)),
('repo', models.CharField(help_text='Hosted documentation repository URL', max_length=255, verbose_name='Repository URL')),
('repo_type', models.CharField(default=b'git', max_length=10, verbose_name='Repository type', choices=[(b'git', 'Git'), (b'svn', 'Subversion'), (b'hg', 'Mercurial'), (b'bzr', 'Bazaar')])),
('project_url', models.URLField(help_text="The project's homepage", verbose_name='Project homepage', blank=True)),
('canonical_url', models.URLField(help_text='URL that documentation is expected to serve from', verbose_name='Canonical URL', blank=True)),
('version', models.CharField(help_text='Project version these docs apply to, i.e. 1.0a', max_length=100, verbose_name='Version', blank=True)),
('copyright', models.CharField(help_text='Project copyright information', max_length=255, verbose_name='Copyright', blank=True)),
('theme', models.CharField(default=b'default', help_text='<a href="http://sphinx.pocoo.org/theming.html#builtin-themes" target="_blank">Examples</a>', max_length=20, verbose_name='Theme', choices=[(b'default', 'Default'), (b'sphinxdoc', 'Sphinx Docs'), (b'traditional', 'Traditional'), (b'nature', 'Nature'), (b'haiku', 'Haiku')])),
('suffix', models.CharField(default=b'.rst', verbose_name='Suffix', max_length=10, editable=False)),
('single_version', models.BooleanField(default=False, help_text='A single version site has no translations and only your "latest" version, served at the root of the domain. Use this with caution, only turn it on if you will <b>never</b> have multiple versions of your docs.', verbose_name='Single version')),
('default_version', models.CharField(default=b'latest', help_text='The version of your project that / redirects to', max_length=255, verbose_name='Default version')),
('default_branch', models.CharField(default=None, max_length=255, blank=True, help_text='What branch "latest" points to. Leave empty to use the default value for your VCS (eg. <code>trunk</code> or <code>master</code>).', null=True, verbose_name='Default branch')),
('requirements_file', models.CharField(default=None, max_length=255, blank=True, help_text='Requires Virtualenv. A <a href="https://pip.pypa.io/en/latest/user_guide.html#requirements-files">pip requirements file</a> needed to build your documentation. Path from the root of your project.', null=True, verbose_name='Requirements file')),
('documentation_type', models.CharField(default=b'auto', help_text='Type of documentation you are building. <a href="http://sphinx-doc.org/builders.html#sphinx.builders.html.DirectoryHTMLBuilder">More info</a>.', max_length=20, verbose_name='Documentation type', choices=[(b'auto', 'Automatically Choose'), (b'sphinx', 'Sphinx Html'), (b'mkdocs', 'Mkdocs (Markdown)'), (b'sphinx_htmldir', 'Sphinx HtmlDir'), (b'sphinx_singlehtml', 'Sphinx Single Page HTML')])),
('allow_comments', models.BooleanField(default=False, verbose_name='Allow Comments')),
('comment_moderation', models.BooleanField(default=False, verbose_name='Comment Moderation)')),
('analytics_code', models.CharField(help_text='Google Analytics Tracking ID (ex. <code>UA-22345342-1</code>). This may slow down your page loads.', max_length=50, null=True, verbose_name='Analytics code', blank=True)),
('enable_epub_build', models.BooleanField(default=True, help_text='Create a EPUB version of your documentation with each build.', verbose_name='Enable EPUB build')),
('enable_pdf_build', models.BooleanField(default=True, help_text='Create a PDF version of your documentation with each build.', verbose_name='Enable PDF build')),
('path', models.CharField(help_text='The directory where <code>conf.py</code> lives', verbose_name='Path', max_length=255, editable=False)),
('conf_py_file', models.CharField(default=b'', help_text='Path from project root to <code>conf.py</code> file (ex. <code>docs/conf.py</code>).Leave blank if you want us to find it for you.', max_length=255, verbose_name='Python configuration file', blank=True)),
('featured', models.BooleanField(default=False, verbose_name='Featured')),
('skip', models.BooleanField(default=False, verbose_name='Skip')),
('mirror', models.BooleanField(default=False, verbose_name='Mirror')),
('use_virtualenv', models.BooleanField(default=False, help_text='Install your project inside a virtualenv using <code>setup.py install</code>', verbose_name='Use virtualenv')),
('python_interpreter', models.CharField(default=b'python', help_text='(Beta) The Python interpreter used to create the virtual environment.', max_length=20, verbose_name='Python Interpreter', choices=[(b'python', 'CPython 2.x'), (b'python3', 'CPython 3.x')])),
('use_system_packages', models.BooleanField(default=False, help_text='Give the virtual environment access to the global site-packages dir.', verbose_name='Use system packages')),
('django_packages_url', models.CharField(max_length=255, verbose_name='Django Packages URL', blank=True)),
('privacy_level', models.CharField(default=b'public', help_text='(Beta) Level of privacy that you want on the repository. Protected means public but not in listings.', max_length=20, verbose_name='Privacy Level', choices=[(b'public', 'Public'), (b'protected', 'Protected'), (b'private', 'Private')])),
('version_privacy_level', models.CharField(default=b'public', help_text='(Beta) Default level of privacy you want on built versions of documentation.', max_length=20, verbose_name='Version Privacy Level', choices=[(b'public', 'Public'), (b'protected', 'Protected'), (b'private', 'Private')])),
('language', models.CharField(default=b'en', help_text="The language the project documentation is rendered in. Note: this affects your project's URL.", max_length=20, verbose_name='Language', choices=[(b'aa', b'Afar'), (b'ab', b'Abkhaz'), (b'af', b'Afrikaans'), (b'am', b'Amharic'), (b'ar', b'Arabic'), (b'as', b'Assamese'), (b'ay', b'Aymara'), (b'az', b'Azerbaijani'), (b'ba', b'Bashkir'), (b'be', b'Belarusian'), (b'bg', b'Bulgarian'), (b'bh', b'Bihari'), (b'bi', b'Bislama'), (b'bn', b'Bengali'), (b'bo', b'Tibetan'), (b'br', b'Breton'), (b'ca', b'Catalan'), (b'co', b'Corsican'), (b'cs', b'Czech'), (b'cy', b'Welsh'), (b'da', b'Danish'), (b'de', b'German'), (b'dz', b'Dzongkha'), (b'el', b'Greek'), (b'en', b'English'), (b'eo', b'Esperanto'), (b'es', b'Spanish'), (b'et', b'Estonian'), (b'eu', b'Basque'), (b'fa', b'Iranian'), (b'fi', b'Finnish'), (b'fj', b'Fijian'), (b'fo', b'Faroese'), (b'fr', b'French'), (b'fy', b'Western Frisian'), (b'ga', b'Irish'), (b'gd', b'Scottish Gaelic'), (b'gl', b'Galician'), (b'gn', b'Guarani'), (b'gu', b'Gujarati'), (b'ha', b'Hausa'), (b'hi', b'Hindi'), (b'he', b'Hebrew'), (b'hr', b'Croatian'), (b'hu', b'Hungarian'), (b'hy', b'Armenian'), (b'ia', b'Interlingua'), (b'id', b'Indonesian'), (b'ie', b'Interlingue'), (b'ik', b'Inupiaq'), (b'is', b'Icelandic'), (b'it', b'Italian'), (b'iu', b'Inuktitut'), (b'ja', b'Japanese'), (b'jv', b'Javanese'), (b'ka', b'Georgian'), (b'kk', b'Kazakh'), (b'kl', b'Kalaallisut'), (b'km', b'Khmer'), (b'kn', b'Kannada'), (b'ko', b'Korean'), (b'ks', b'Kashmiri'), (b'ku', b'Kurdish'), (b'ky', b'Kyrgyz'), (b'la', b'Latin'), (b'ln', b'Lingala'), (b'lo', b'Lao'), (b'lt', b'Lithuanian'), (b'lv', b'Latvian'), (b'mg', b'Malagasy'), (b'mi', b'Maori'), (b'mk', b'Macedonian'), (b'ml', b'Malayalam'), (b'mn', b'Mongolian'), (b'mr', b'Marathi'), (b'ms', b'Malay'), (b'mt', b'Maltese'), (b'my', b'Burmese'), (b'na', b'Nauru'), (b'ne', b'Nepali'), (b'nl', b'Dutch'), (b'no', b'Norwegian'), (b'oc', b'Occitan'), (b'om', b'Oromo'), (b'or', b'Oriya'), (b'pa', b'Panjabi'), (b'pl', b'Polish'), (b'ps', b'Pashto'), (b'pt', b'Portuguese'), (b'qu', b'Quechua'), (b'rm', b'Romansh'), (b'rn', b'Kirundi'), (b'ro', b'Romanian'), (b'ru', b'Russian'), (b'rw', b'Kinyarwanda'), (b'sa', b'Sanskrit'), (b'sd', b'Sindhi'), (b'sg', b'Sango'), (b'si', b'Sinhala'), (b'sk', b'Slovak'), (b'sl', b'Slovenian'), (b'sm', b'Samoan'), (b'sn', b'Shona'), (b'so', b'Somali'), (b'sq', b'Albanian'), (b'sr', b'Serbian'), (b'ss', b'Swati'), (b'st', b'Southern Sotho'), (b'su', b'Sudanese'), (b'sv', b'Swedish'), (b'sw', b'Swahili'), (b'ta', b'Tamil'), (b'te', b'Telugu'), (b'tg', b'Tajik'), (b'th', b'Thai'), (b'ti', b'Tigrinya'), (b'tk', b'Turkmen'), (b'tl', b'Tagalog'), (b'tn', b'Tswana'), (b'to', b'Tonga'), (b'tr', b'Turkish'), (b'ts', b'Tsonga'), (b'tt', b'Tatar'), (b'tw', b'Twi'), (b'ug', b'Uyghur'), (b'uk', b'Ukrainian'), (b'ur', b'Urdu'), (b'uz', b'Uzbek'), (b'vi', b'Vietnamese'), (b'vo', b'Volapuk'), (b'wo', b'Wolof'), (b'xh', b'Xhosa'), (b'yi', b'Yiddish'), (b'yo', b'Yoruba'), (b'za', b'Zhuang'), (b'zh', b'Chinese'), (b'zu', b'Zulu'), (b'nb_NO', b'Norwegian Bokmal'), (b'pt_BR', b'Brazilian Portuguese'), (b'uk_UA', b'Ukrainian'), (b'zh_CN', b'Simplified Chinese'), (b'zh_TW', b'Traditional Chinese')])),
('programming_language', models.CharField(default=b'words', choices=[(b'words', b'Only Words'), (b'py', b'Python'), (b'js', b'Javascript'), (b'php', b'PHP'), (b'ruby', b'Ruby'), (b'perl', b'Perl'), (b'java', b'Java'), (b'go', b'Go'), (b'julia', b'Julia'), (b'c', b'C'), (b'csharp', b'C#'), (b'cpp', b'C++'), (b'objc', b'Objective-C'), (b'other', b'Other')], max_length=20, blank=True, help_text='The primary programming language the project is written in.', verbose_name='Programming Language')),
('num_major', models.IntegerField(default=2, blank=True, help_text='2 means supporting 3.X.X and 2.X.X, but not 1.X.X', null=True, verbose_name='Number of Major versions')),
('num_minor', models.IntegerField(default=2, blank=True, help_text='2 means supporting 2.2.X and 2.1.X, but not 2.0.X', null=True, verbose_name='Number of Minor versions')),
('num_point', models.IntegerField(default=2, blank=True, help_text='2 means supporting 2.2.2 and 2.2.1, but not 2.2.0', null=True, verbose_name='Number of Point versions')),
('main_language_project', models.ForeignKey(related_name='translations', blank=True, to='projects.Project', null=True)),
],
options={
'ordering': ('slug',),
'permissions': (('view_project', 'View Project'),),
},
),
migrations.CreateModel(
name='ProjectRelationship',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('child', models.ForeignKey(related_name='superprojects', verbose_name='Child', to='projects.Project')),
('parent', models.ForeignKey(related_name='subprojects', verbose_name='Parent', to='projects.Project')),
],
),
migrations.CreateModel(
name='WebHook',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('url', models.URLField(help_text='URL to send the webhook to', blank=True)),
('project', models.ForeignKey(related_name='webhook_notifications', to='projects.Project')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='project',
name='related_projects',
field=models.ManyToManyField(to='projects.Project', verbose_name='Related projects', through='projects.ProjectRelationship', blank=True),
),
migrations.AddField(
model_name='project',
name='tags',
field=taggit.managers.TaggableManager(to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'),
),
migrations.AddField(
model_name='project',
name='users',
field=models.ManyToManyField(related_name='projects', verbose_name='User', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='emailhook',
name='project',
field=models.ForeignKey(related_name='emailhook_notifications', to='projects.Project'),
),
]
| mit |
thiagopnts/servo | tests/wpt/web-platform-tests/tools/manifest/utils.py | 115 | 1374 | import platform
import os
from six import BytesIO
def rel_path_to_url(rel_path, url_base="/"):
assert not os.path.isabs(rel_path)
if url_base[0] != "/":
url_base = "/" + url_base
if url_base[-1] != "/":
url_base += "/"
return url_base + rel_path.replace(os.sep, "/")
def from_os_path(path):
assert os.path.sep == "/" or platform.system() == "Windows"
rv = path.replace(os.path.sep, "/")
if "\\" in rv:
raise ValueError("path contains \\ when separator is %s" % os.path.sep)
return rv
def to_os_path(path):
assert os.path.sep == "/" or platform.system() == "Windows"
if "\\" in path:
raise ValueError("normalised path contains \\")
return path.replace("/", os.path.sep)
class ContextManagerBytesIO(BytesIO):
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.close()
class cached_property(object):
def __init__(self, func):
self.func = func
self.__doc__ = getattr(func, "__doc__")
self.name = func.__name__
def __get__(self, obj, cls=None):
if obj is None:
return self
if self.name not in obj.__dict__:
obj.__dict__[self.name] = self.func(obj)
obj.__dict__.setdefault("__cached_properties__", set()).add(self.name)
return obj.__dict__[self.name]
| mpl-2.0 |
ArthurGarnier/SickRage | lib/github/GitTree.py | 10 | 3487 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# #
# This file is part of PyGithub. #
# http://pygithub.github.io/PyGithub/v1/index.html #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
import github.GitTreeElement
class GitTree(github.GithubObject.CompletableGithubObject):
"""
This class represents GitTrees as returned for example by http://developer.github.com/v3/todo
"""
def __repr__(self):
return self.get__repr__({"sha": self._sha.value})
@property
def sha(self):
"""
:type: string
"""
self._completeIfNotSet(self._sha)
return self._sha.value
@property
def tree(self):
"""
:type: list of :class:`github.GitTreeElement.GitTreeElement`
"""
self._completeIfNotSet(self._tree)
return self._tree.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
@property
def _identity(self):
return self.sha
def _initAttributes(self):
self._sha = github.GithubObject.NotSet
self._tree = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "sha" in attributes: # pragma no branch
self._sha = self._makeStringAttribute(attributes["sha"])
if "tree" in attributes: # pragma no branch
self._tree = self._makeListOfClassesAttribute(github.GitTreeElement.GitTreeElement, attributes["tree"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
| gpl-3.0 |
franky88/emperioanimesta | env/Lib/site-packages/pip/_vendor/appdirs.py | 327 | 22368 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2005-2010 ActiveState Software Inc.
# Copyright (c) 2013 Eddy Petrișor
"""Utilities for determining application-specific dirs.
See <http://github.com/ActiveState/appdirs> for details and usage.
"""
# Dev Notes:
# - MSDN on where to store app data files:
# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
# - macOS: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
__version_info__ = (1, 4, 0)
__version__ = '.'.join(map(str, __version_info__))
import sys
import os
PY3 = sys.version_info[0] == 3
if PY3:
unicode = str
if sys.platform.startswith('java'):
import platform
os_name = platform.java_ver()[3][0]
if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc.
system = 'win32'
elif os_name.startswith('Mac'): # "macOS", etc.
system = 'darwin'
else: # "Linux", "SunOS", "FreeBSD", etc.
# Setting this to "linux2" is not ideal, but only Windows or Mac
# are actually checked for and the rest of the module expects
# *sys.platform* style strings.
system = 'linux2'
else:
system = sys.platform
def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
macOS: ~/Library/Application Support/<AppName>
Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined
Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
That means, by default "~/.local/share/<AppName>".
"""
if system == "win32":
if appauthor is None:
appauthor = appname
const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
path = os.path.normpath(_get_win_folder(const))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
elif system == 'darwin':
path = os.path.expanduser('~/Library/Application Support/')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of data dirs should be
returned. By default, the first item from XDG_DATA_DIRS is
returned, or '/usr/local/share/<AppName>',
if XDG_DATA_DIRS is not set
Typical user data directories are:
macOS: /Library/Application Support/<AppName>
Unix: /usr/local/share/<AppName> or /usr/share/<AppName>
Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7.
For Unix, this is using the $XDG_DATA_DIRS[0] default.
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if system == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
elif system == 'darwin':
path = os.path.expanduser('/Library/Application Support')
if appname:
path = os.path.join(path, appname)
else:
# XDG default for $XDG_DATA_DIRS
# only first, if multipath is False
path = os.getenv('XDG_DATA_DIRS',
os.pathsep.join(['/usr/local/share', '/usr/share']))
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [os.sep.join([x, appname]) for x in pathlist]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
if appname and version:
path = os.path.join(path, version)
return path
def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific config dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
macOS: same as user_data_dir
Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined
Win *: same as user_data_dir
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
That means, by deafult "~/.config/<AppName>".
"""
if system in ["win32", "darwin"]:
path = user_data_dir(appname, appauthor, None, roaming)
else:
path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of config dirs should be
returned. By default, the first item from XDG_CONFIG_DIRS is
returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
Typical user data directories are:
macOS: same as site_data_dir
Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
$XDG_CONFIG_DIRS
Win *: same as site_data_dir
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if system in ["win32", "darwin"]:
path = site_data_dir(appname, appauthor)
if appname and version:
path = os.path.join(path, version)
else:
# XDG default for $XDG_CONFIG_DIRS
# only first, if multipath is False
path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [os.sep.join([x, appname]) for x in pathlist]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific cache dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Cache" to the base app data dir for Windows. See
discussion below.
Typical user cache directories are:
macOS: ~/Library/Caches/<AppName>
Unix: ~/.cache/<AppName> (XDG default)
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
On Windows the only suggestion in the MSDN docs is that local settings go in
the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
app data dir (the default returned by `user_data_dir` above). Apps typically
put cache data somewhere *under* the given dir here. Some examples:
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
...\Acme\SuperApp\Cache\1.0
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
This can be disabled with the `opinion=False` option.
"""
if system == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
if opinion:
path = os.path.join(path, "Cache")
elif system == 'darwin':
path = os.path.expanduser('~/Library/Caches')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific log dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Logs" to the base app data dir for Windows, and "log" to the
base cache dir for Unix. See discussion below.
Typical user cache directories are:
macOS: ~/Library/Logs/<AppName>
Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
On Windows the only suggestion in the MSDN docs is that local settings
go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
examples of what some windows apps use for a logs dir.)
OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
value for Windows and appends "log" to the user cache dir for Unix.
This can be disabled with the `opinion=False` option.
"""
if system == "darwin":
path = os.path.join(
os.path.expanduser('~/Library/Logs'),
appname)
elif system == "win32":
path = user_data_dir(appname, appauthor, version)
version = False
if opinion:
path = os.path.join(path, "Logs")
else:
path = user_cache_dir(appname, appauthor, version)
version = False
if opinion:
path = os.path.join(path, "log")
if appname and version:
path = os.path.join(path, version)
return path
class AppDirs(object):
"""Convenience wrapper for getting application dirs."""
def __init__(self, appname, appauthor=None, version=None, roaming=False,
multipath=False):
self.appname = appname
self.appauthor = appauthor
self.version = version
self.roaming = roaming
self.multipath = multipath
@property
def user_data_dir(self):
return user_data_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
@property
def site_data_dir(self):
return site_data_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
@property
def user_config_dir(self):
return user_config_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
@property
def site_config_dir(self):
return site_config_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
@property
def user_cache_dir(self):
return user_cache_dir(self.appname, self.appauthor,
version=self.version)
@property
def user_log_dir(self):
return user_log_dir(self.appname, self.appauthor,
version=self.version)
#---- internal support stuff
def _get_win_folder_from_registry(csidl_name):
"""This is a fallback technique at best. I'm not sure if using the
registry for this guarantees us the correct answer for all CSIDL_*
names.
"""
import _winreg
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
}[csidl_name]
key = _winreg.OpenKey(
_winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
)
dir, type = _winreg.QueryValueEx(key, shell_folder_name)
return dir
def _get_win_folder_with_pywin32(csidl_name):
from win32com.shell import shellcon, shell
dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
# Try to make this a unicode path because SHGetFolderPath does
# not return unicode strings when there is unicode data in the
# path.
try:
dir = unicode(dir)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
try:
import win32api
dir = win32api.GetShortPathName(dir)
except ImportError:
pass
except UnicodeError:
pass
return dir
def _get_win_folder_with_ctypes(csidl_name):
import ctypes
csidl_const = {
"CSIDL_APPDATA": 26,
"CSIDL_COMMON_APPDATA": 35,
"CSIDL_LOCAL_APPDATA": 28,
}[csidl_name]
buf = ctypes.create_unicode_buffer(1024)
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in buf:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf2 = ctypes.create_unicode_buffer(1024)
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
buf = buf2
return buf.value
def _get_win_folder_with_jna(csidl_name):
import array
from com.sun import jna
from com.sun.jna.platform import win32
buf_size = win32.WinDef.MAX_PATH * 2
buf = array.zeros('c', buf_size)
shell = win32.Shell32.INSTANCE
shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf = array.zeros('c', buf_size)
kernel = win32.Kernel32.INSTANCE
if kernal.GetShortPathName(dir, buf, buf_size):
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
return dir
if system == "win32":
try:
import win32com.shell
_get_win_folder = _get_win_folder_with_pywin32
except ImportError:
try:
from ctypes import windll
_get_win_folder = _get_win_folder_with_ctypes
except ImportError:
try:
import com.sun.jna
_get_win_folder = _get_win_folder_with_jna
except ImportError:
_get_win_folder = _get_win_folder_from_registry
#---- self test code
if __name__ == "__main__":
appname = "MyApp"
appauthor = "MyCompany"
props = ("user_data_dir", "site_data_dir",
"user_config_dir", "site_config_dir",
"user_cache_dir", "user_log_dir")
print("-- app dirs (with optional 'version')")
dirs = AppDirs(appname, appauthor, version="1.0")
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'version')")
dirs = AppDirs(appname, appauthor)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'appauthor')")
dirs = AppDirs(appname)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (with disabled 'appauthor')")
dirs = AppDirs(appname, appauthor=False)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
| gpl-3.0 |
DICENetworks/DICE-pjsip | pkgconfig.py | 75 | 4188 | import sys
import os
REMOVE_THESE = ["-I/usr/include", "-I/usr/include/", "-L/usr/lib", "-L/usr/lib/"]
class Pkg:
def __init__(self, pkg_name):
self.name = pkg_name
self.priority = 0
self.vars = {}
def parse(self, pkg_config_path):
f = None
for pkg_path in pkg_config_path.split(':'):
if pkg_path[-1] != '/':
pkg_path += '/'
fname = pkg_path + self.name + '.pc'
try:
f = open(fname, "r")
break
except:
continue
if not f:
#sys.stderr.write("pkgconfig.py: unable to find %s.pc in %s\n" % (self.name, pkg_config_path))
return False
for line in f.readlines():
line = line.strip()
if not line:
continue
if line[0]=='#':
continue
pos1 = line.find('=')
pos2 = line.find(':')
if pos1 > 0 and (pos1 < pos2 or pos2 < 0):
pos = pos1
elif pos2 > 0 and (pos2 < pos1 or pos1 < 0):
pos = pos2
else:
continue
name = line[:pos].lower()
value = line[pos+1:]
self.vars[name] = value
f.close()
for name in self.vars.keys():
value = self.vars[name]
while True:
pos1 = value.find("${")
if pos1 < 0:
break
pos2 = value.find("}")
if pos2 < 0:
break
value = value.replace(value[pos1:pos2+1], self.vars[value[pos1+2:pos2]])
self.vars[name] = value
return True
def requires(self):
if not 'requires' in self.vars:
return []
deps = []
req_list = self.vars['requires']
for req_item in req_list.split(','):
req_item = req_item.strip()
for i in range(len(req_item)):
if "=<>".find(req_item[i]) >= 0:
deps.append(req_item[:i].strip())
break
return deps
def libs(self):
if not 'libs' in self.vars:
return []
return self.vars['libs'].split(' ')
def cflags(self):
if not 'cflags' in self.vars:
return []
return self.vars['cflags'].split(' ')
def calculate_pkg_priority(pkg, pkg_dict, loop_cnt):
if loop_cnt > 10:
sys.stderr.write("Circular dependency with pkg %s\n" % (pkg))
return 0
reqs = pkg.requires()
prio = 1
for req in reqs:
if not req in pkg_dict:
continue
req_pkg = pkg_dict[req]
prio += calculate_pkg_priority(req_pkg, pkg_dict, loop_cnt+1)
return prio
if __name__ == "__main__":
pkg_names = []
pkg_dict = {}
commands = []
exist_check = False
for i in range(1,len(sys.argv)):
if sys.argv[i][0] == '-':
cmd = sys.argv[i]
commands.append(cmd)
if cmd=='--exists':
exist_check = True
elif cmd=="--help":
print "This is not very helpful, is it"
sys.exit(0)
elif cmd=="--version":
print "0.1"
sys.exit(0)
else:
pkg_names.append(sys.argv[i])
# Fix search path
PKG_CONFIG_PATH = os.getenv("PKG_CONFIG_PATH", "").strip()
if not PKG_CONFIG_PATH:
PKG_CONFIG_PATH="/usr/local/lib/pkgconfig:/usr/lib/pkgconfig"
PKG_CONFIG_PATH = PKG_CONFIG_PATH.replace(";", ":")
# Parse files
for pkg_name in pkg_names:
pkg = Pkg(pkg_name)
if not pkg.parse(PKG_CONFIG_PATH):
sys.exit(1)
pkg_dict[pkg_name] = pkg
if exist_check:
sys.exit(0)
# Calculate priority based on dependency
for pkg_name in pkg_dict.keys():
pkg = pkg_dict[pkg_name]
pkg.priority = calculate_pkg_priority(pkg, pkg_dict, 1)
# Sort package based on dependency
pkg_names = sorted(pkg_names, key=lambda pkg_name: pkg_dict[pkg_name].priority, reverse=True)
# Get the options
opts = []
for cmd in commands:
if cmd=='--libs':
for pkg_name in pkg_names:
libs = pkg_dict[pkg_name].libs()
for lib in libs:
opts.append(lib)
if lib[:2]=="-l":
break
for pkg_name in pkg_names:
opts += pkg_dict[pkg_name].libs()
elif cmd=='--cflags':
for pkg_name in pkg_names:
opts += pkg_dict[pkg_name].cflags()
elif cmd[0]=='-':
sys.stderr.write("pkgconfig.py: I don't know how to handle " + sys.argv[i] + "\n")
filtered_opts = []
for opt in opts:
opt = opt.strip()
if not opt:
continue
if REMOVE_THESE.count(opt) != 0:
continue
if opt != '-framework' and opt != '--framework' and filtered_opts.count(opt) != 0:
if len(filtered_opts) and (filtered_opts[-1] == '-framework' or filtered_opts[-1] == '--framework'):
filtered_opts.pop()
continue
filtered_opts.append(opt)
print ' '.join(filtered_opts)
| gpl-2.0 |
ryano144/intellij-community | python/lib/Lib/site-packages/django/contrib/auth/backends.py | 230 | 4582 | from django.db import connection
from django.contrib.auth.models import User, Permission
class ModelBackend(object):
"""
Authenticates against django.contrib.auth.models.User.
"""
supports_object_permissions = False
supports_anonymous_user = True
supports_inactive_user = True
# TODO: Model, login attribute name and password attribute name should be
# configurable.
def authenticate(self, username=None, password=None):
try:
user = User.objects.get(username=username)
if user.check_password(password):
return user
except User.DoesNotExist:
return None
def get_group_permissions(self, user_obj):
"""
Returns a set of permission strings that this user has through his/her
groups.
"""
if not hasattr(user_obj, '_group_perm_cache'):
if user_obj.is_superuser:
perms = Permission.objects.all()
else:
perms = Permission.objects.filter(group__user=user_obj)
perms = perms.values_list('content_type__app_label', 'codename').order_by()
user_obj._group_perm_cache = set(["%s.%s" % (ct, name) for ct, name in perms])
return user_obj._group_perm_cache
def get_all_permissions(self, user_obj):
if user_obj.is_anonymous():
return set()
if not hasattr(user_obj, '_perm_cache'):
user_obj._perm_cache = set([u"%s.%s" % (p.content_type.app_label, p.codename) for p in user_obj.user_permissions.select_related()])
user_obj._perm_cache.update(self.get_group_permissions(user_obj))
return user_obj._perm_cache
def has_perm(self, user_obj, perm):
if not user_obj.is_active:
return False
return perm in self.get_all_permissions(user_obj)
def has_module_perms(self, user_obj, app_label):
"""
Returns True if user_obj has any permissions in the given app_label.
"""
if not user_obj.is_active:
return False
for perm in self.get_all_permissions(user_obj):
if perm[:perm.index('.')] == app_label:
return True
return False
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
class RemoteUserBackend(ModelBackend):
"""
This backend is to be used in conjunction with the ``RemoteUserMiddleware``
found in the middleware module of this package, and is used when the server
is handling authentication outside of Django.
By default, the ``authenticate`` method creates ``User`` objects for
usernames that don't already exist in the database. Subclasses can disable
this behavior by setting the ``create_unknown_user`` attribute to
``False``.
"""
# Create a User object if not already in the database?
create_unknown_user = True
def authenticate(self, remote_user):
"""
The username passed as ``remote_user`` is considered trusted. This
method simply returns the ``User`` object with the given username,
creating a new ``User`` object if ``create_unknown_user`` is ``True``.
Returns None if ``create_unknown_user`` is ``False`` and a ``User``
object with the given username is not found in the database.
"""
if not remote_user:
return
user = None
username = self.clean_username(remote_user)
# Note that this could be accomplished in one try-except clause, but
# instead we use get_or_create when creating unknown users since it has
# built-in safeguards for multiple threads.
if self.create_unknown_user:
user, created = User.objects.get_or_create(username=username)
if created:
user = self.configure_user(user)
else:
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
pass
return user
def clean_username(self, username):
"""
Performs any cleaning on the "username" prior to using it to get or
create the user object. Returns the cleaned username.
By default, returns the username unchanged.
"""
return username
def configure_user(self, user):
"""
Configures a user after creation and returns the updated user.
By default, returns the user unmodified.
"""
return user
| apache-2.0 |
HerlanAssis/Django-AulaOsvandoSantana | lib/python2.7/site-packages/django/db/models/query.py | 20 | 71252 | """
The main QuerySet implementation. This provides the public API for the ORM.
"""
import copy
import sys
import warnings
from collections import OrderedDict, deque
from django.conf import settings
from django.core import exceptions
from django.db import (
DJANGO_VERSION_PICKLE_KEY, IntegrityError, connections, router,
transaction,
)
from django.db.models import sql
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import Collector
from django.db.models.expressions import F, Date, DateTime
from django.db.models.fields import AutoField, Empty
from django.db.models.query_utils import (
Q, InvalidQuery, deferred_class_factory,
)
from django.db.models.sql.constants import CURSOR
from django.utils import six, timezone
from django.utils.functional import partition
from django.utils.version import get_version
# The maximum number of items to display in a QuerySet.__repr__
REPR_OUTPUT_SIZE = 20
# Pull into this namespace for backwards compatibility.
EmptyResultSet = sql.EmptyResultSet
def _pickle_queryset(class_bases, class_dict):
"""
Used by `__reduce__` to create the initial version of the `QuerySet` class
onto which the output of `__getstate__` will be applied.
See `__reduce__` for more details.
"""
new = Empty()
new.__class__ = type(class_bases[0].__name__, class_bases, class_dict)
return new
class QuerySet(object):
"""
Represents a lazy database lookup for a set of objects.
"""
def __init__(self, model=None, query=None, using=None, hints=None):
self.model = model
self._db = using
self._hints = hints or {}
self.query = query or sql.Query(self.model)
self._result_cache = None
self._sticky_filter = False
self._for_write = False
self._prefetch_related_lookups = []
self._prefetch_done = False
self._known_related_objects = {} # {rel_field, {pk: rel_obj}}
def as_manager(cls):
# Address the circular dependency between `Queryset` and `Manager`.
from django.db.models.manager import Manager
manager = Manager.from_queryset(cls)()
manager._built_with_as_manager = True
return manager
as_manager.queryset_only = True
as_manager = classmethod(as_manager)
########################
# PYTHON MAGIC METHODS #
########################
def __deepcopy__(self, memo):
"""
Deep copy of a QuerySet doesn't populate the cache
"""
obj = self.__class__()
for k, v in self.__dict__.items():
if k == '_result_cache':
obj.__dict__[k] = None
else:
obj.__dict__[k] = copy.deepcopy(v, memo)
return obj
def __getstate__(self):
"""
Allows the QuerySet to be pickled.
"""
# Force the cache to be fully populated.
self._fetch_all()
obj_dict = self.__dict__.copy()
obj_dict[DJANGO_VERSION_PICKLE_KEY] = get_version()
return obj_dict
def __setstate__(self, state):
msg = None
pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY)
if pickled_version:
current_version = get_version()
if current_version != pickled_version:
msg = ("Pickled queryset instance's Django version %s does"
" not match the current version %s."
% (pickled_version, current_version))
else:
msg = "Pickled queryset instance's Django version is not specified."
if msg:
warnings.warn(msg, RuntimeWarning, stacklevel=2)
self.__dict__.update(state)
def __reduce__(self):
"""
Used by pickle to deal with the types that we create dynamically when
specialized queryset such as `ValuesQuerySet` are used in conjunction
with querysets that are *subclasses* of `QuerySet`.
See `_clone` implementation for more details.
"""
if hasattr(self, '_specialized_queryset_class'):
class_bases = (
self._specialized_queryset_class,
self._base_queryset_class,
)
class_dict = {
'_specialized_queryset_class': self._specialized_queryset_class,
'_base_queryset_class': self._base_queryset_class,
}
return _pickle_queryset, (class_bases, class_dict), self.__getstate__()
return super(QuerySet, self).__reduce__()
def __repr__(self):
data = list(self[:REPR_OUTPUT_SIZE + 1])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return repr(data)
def __len__(self):
self._fetch_all()
return len(self._result_cache)
def __iter__(self):
"""
The queryset iterator protocol uses three nested iterators in the
default case:
1. sql.compiler:execute_sql()
- Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE)
using cursor.fetchmany(). This part is responsible for
doing some column masking, and returning the rows in chunks.
2. sql/compiler.results_iter()
- Returns one row at time. At this point the rows are still just
tuples. In some cases the return values are converted to
Python values at this location.
3. self.iterator()
- Responsible for turning the rows into model objects.
"""
self._fetch_all()
return iter(self._result_cache)
def __bool__(self):
self._fetch_all()
return bool(self._result_cache)
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
def __getitem__(self, k):
"""
Retrieves an item or slice from the set of results.
"""
if not isinstance(k, (slice,) + six.integer_types):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0)) or
(isinstance(k, slice) and (k.start is None or k.start >= 0) and
(k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
if self._result_cache is not None:
return self._result_cache[k]
if isinstance(k, slice):
qs = self._clone()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
qs.query.set_limits(start, stop)
return list(qs)[::k.step] if k.step else qs
qs = self._clone()
qs.query.set_limits(k, k + 1)
return list(qs)[0]
def __and__(self, other):
self._merge_sanity_check(other)
if isinstance(other, EmptyQuerySet):
return other
if isinstance(self, EmptyQuerySet):
return self
combined = self._clone()
combined._merge_known_related_objects(other)
combined.query.combine(other.query, sql.AND)
return combined
def __or__(self, other):
self._merge_sanity_check(other)
if isinstance(self, EmptyQuerySet):
return other
if isinstance(other, EmptyQuerySet):
return self
combined = self._clone()
combined._merge_known_related_objects(other)
combined.query.combine(other.query, sql.OR)
return combined
####################################
# METHODS THAT DO DATABASE QUERIES #
####################################
def iterator(self):
"""
An iterator over the results from applying this QuerySet to the
database.
"""
db = self.db
compiler = self.query.get_compiler(using=db)
# Execute the query. This will also fill compiler.select, klass_info,
# and annotations.
results = compiler.execute_sql()
select, klass_info, annotation_col_map = (compiler.select, compiler.klass_info,
compiler.annotation_col_map)
if klass_info is None:
return
model_cls = klass_info['model']
select_fields = klass_info['select_fields']
model_fields_start, model_fields_end = select_fields[0], select_fields[-1] + 1
init_list = [f[0].target.attname
for f in select[model_fields_start:model_fields_end]]
if len(init_list) != len(model_cls._meta.concrete_fields):
init_set = set(init_list)
skip = [f.attname for f in model_cls._meta.concrete_fields
if f.attname not in init_set]
model_cls = deferred_class_factory(model_cls, skip)
related_populators = get_related_populators(klass_info, select, db)
for row in compiler.results_iter(results):
obj = model_cls.from_db(db, init_list, row[model_fields_start:model_fields_end])
if related_populators:
for rel_populator in related_populators:
rel_populator.populate(row, obj)
if annotation_col_map:
for attr_name, col_pos in annotation_col_map.items():
setattr(obj, attr_name, row[col_pos])
# Add the known related objects to the model, if there are any
if self._known_related_objects:
for field, rel_objs in self._known_related_objects.items():
# Avoid overwriting objects loaded e.g. by select_related
if hasattr(obj, field.get_cache_name()):
continue
pk = getattr(obj, field.get_attname())
try:
rel_obj = rel_objs[pk]
except KeyError:
pass # may happen in qs1 | qs2 scenarios
else:
setattr(obj, field.name, rel_obj)
yield obj
def aggregate(self, *args, **kwargs):
"""
Returns a dictionary containing the calculations (aggregation)
over the current queryset
If args is present the expression is passed as a kwarg using
the Aggregate object's default alias.
"""
if self.query.distinct_fields:
raise NotImplementedError("aggregate() + distinct(fields) not implemented.")
for arg in args:
# The default_alias property may raise a TypeError, so we use
# a try/except construct rather than hasattr in order to remain
# consistent between PY2 and PY3 (hasattr would swallow
# the TypeError on PY2).
try:
arg.default_alias
except (AttributeError, TypeError):
raise TypeError("Complex aggregates require an alias")
kwargs[arg.default_alias] = arg
query = self.query.clone()
for (alias, aggregate_expr) in kwargs.items():
query.add_annotation(aggregate_expr, alias, is_summary=True)
if not query.annotations[alias].contains_aggregate:
raise TypeError("%s is not an aggregate expression" % alias)
return query.get_aggregation(self.db, kwargs.keys())
def count(self):
"""
Performs a SELECT COUNT() and returns the number of records as an
integer.
If the QuerySet is already fully cached this simply returns the length
of the cached results set to avoid multiple SELECT COUNT(*) calls.
"""
if self._result_cache is not None:
return len(self._result_cache)
return self.query.get_count(using=self.db)
def get(self, *args, **kwargs):
"""
Performs the query and returns a single object matching the given
keyword arguments.
"""
clone = self.filter(*args, **kwargs)
if self.query.can_filter():
clone = clone.order_by()
num = len(clone)
if num == 1:
return clone._result_cache[0]
if not num:
raise self.model.DoesNotExist(
"%s matching query does not exist." %
self.model._meta.object_name
)
raise self.model.MultipleObjectsReturned(
"get() returned more than one %s -- it returned %s!" %
(self.model._meta.object_name, num)
)
def create(self, **kwargs):
"""
Creates a new object with the given kwargs, saving it to the database
and returning the created object.
"""
obj = self.model(**kwargs)
self._for_write = True
obj.save(force_insert=True, using=self.db)
return obj
def _populate_pk_values(self, objs):
for obj in objs:
if obj.pk is None:
obj.pk = obj._meta.pk.get_pk_value_on_save(obj)
def bulk_create(self, objs, batch_size=None):
"""
Inserts each of the instances into the database. This does *not* call
save() on each of the instances, does not send any pre/post save
signals, and does not set the primary key attribute if it is an
autoincrement field.
"""
# So this case is fun. When you bulk insert you don't get the primary
# keys back (if it's an autoincrement), so you can't insert into the
# child tables which references this. There are two workarounds, 1)
# this could be implemented if you didn't have an autoincrement pk,
# and 2) you could do it by doing O(n) normal inserts into the parent
# tables to get the primary keys back, and then doing a single bulk
# insert into the childmost table. Some databases might allow doing
# this by using RETURNING clause for the insert query. We're punting
# on these for now because they are relatively rare cases.
assert batch_size is None or batch_size > 0
if self.model._meta.parents:
raise ValueError("Can't bulk create an inherited model")
if not objs:
return objs
self._for_write = True
connection = connections[self.db]
fields = self.model._meta.local_concrete_fields
objs = list(objs)
self._populate_pk_values(objs)
with transaction.atomic(using=self.db, savepoint=False):
if (connection.features.can_combine_inserts_with_and_without_auto_increment_pk
and self.model._meta.has_auto_field):
self._batched_insert(objs, fields, batch_size)
else:
objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs)
if objs_with_pk:
self._batched_insert(objs_with_pk, fields, batch_size)
if objs_without_pk:
fields = [f for f in fields if not isinstance(f, AutoField)]
self._batched_insert(objs_without_pk, fields, batch_size)
return objs
def get_or_create(self, defaults=None, **kwargs):
"""
Looks up an object with the given kwargs, creating one if necessary.
Returns a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
lookup, params = self._extract_model_params(defaults, **kwargs)
self._for_write = True
try:
return self.get(**lookup), False
except self.model.DoesNotExist:
return self._create_object_from_params(lookup, params)
def update_or_create(self, defaults=None, **kwargs):
"""
Looks up an object with the given kwargs, updating one with defaults
if it exists, otherwise creates a new one.
Returns a tuple (object, created), where created is a boolean
specifying whether an object was created.
"""
defaults = defaults or {}
lookup, params = self._extract_model_params(defaults, **kwargs)
self._for_write = True
try:
obj = self.get(**lookup)
except self.model.DoesNotExist:
obj, created = self._create_object_from_params(lookup, params)
if created:
return obj, created
for k, v in six.iteritems(defaults):
setattr(obj, k, v)
with transaction.atomic(using=self.db, savepoint=False):
obj.save(using=self.db)
return obj, False
def _create_object_from_params(self, lookup, params):
"""
Tries to create an object using passed params.
Used by get_or_create and update_or_create
"""
try:
with transaction.atomic(using=self.db):
obj = self.create(**params)
return obj, True
except IntegrityError:
exc_info = sys.exc_info()
try:
return self.get(**lookup), False
except self.model.DoesNotExist:
pass
six.reraise(*exc_info)
def _extract_model_params(self, defaults, **kwargs):
"""
Prepares `lookup` (kwargs that are valid model attributes), `params`
(for creating a model instance) based on given kwargs; for use by
get_or_create and update_or_create.
"""
defaults = defaults or {}
lookup = kwargs.copy()
for f in self.model._meta.fields:
if f.attname in lookup:
lookup[f.name] = lookup.pop(f.attname)
params = {k: v for k, v in kwargs.items() if LOOKUP_SEP not in k}
params.update(defaults)
return lookup, params
def _earliest_or_latest(self, field_name=None, direction="-"):
"""
Returns the latest object, according to the model's
'get_latest_by' option or optional given field_name.
"""
order_by = field_name or getattr(self.model._meta, 'get_latest_by')
assert bool(order_by), "earliest() and latest() require either a "\
"field_name parameter or 'get_latest_by' in the model"
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken."
obj = self._clone()
obj.query.set_limits(high=1)
obj.query.clear_ordering(force_empty=True)
obj.query.add_ordering('%s%s' % (direction, order_by))
return obj.get()
def earliest(self, field_name=None):
return self._earliest_or_latest(field_name=field_name, direction="")
def latest(self, field_name=None):
return self._earliest_or_latest(field_name=field_name, direction="-")
def first(self):
"""
Returns the first object of a query, returns None if no match is found.
"""
objects = list((self if self.ordered else self.order_by('pk'))[:1])
if objects:
return objects[0]
return None
def last(self):
"""
Returns the last object of a query, returns None if no match is found.
"""
objects = list((self.reverse() if self.ordered else self.order_by('-pk'))[:1])
if objects:
return objects[0]
return None
def in_bulk(self, id_list):
"""
Returns a dictionary mapping each of the given IDs to the object with
that ID.
"""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with in_bulk"
if not id_list:
return {}
qs = self.filter(pk__in=id_list).order_by()
return {obj._get_pk_val(): obj for obj in qs}
def delete(self):
"""
Deletes the records in the current QuerySet.
"""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with delete."
del_query = self._clone()
# The delete is actually 2 queries - one to find related objects,
# and one to delete. Make sure that the discovery of related
# objects is performed on the same database as the deletion.
del_query._for_write = True
# Disable non-supported fields.
del_query.query.select_for_update = False
del_query.query.select_related = False
del_query.query.clear_ordering(force_empty=True)
collector = Collector(using=del_query.db)
collector.collect(del_query)
collector.delete()
# Clear the result cache, in case this QuerySet gets reused.
self._result_cache = None
delete.alters_data = True
delete.queryset_only = True
def _raw_delete(self, using):
"""
Deletes objects found from the given queryset in single direct SQL
query. No signals are sent, and there is no protection for cascades.
"""
sql.DeleteQuery(self.model).delete_qs(self, using)
_raw_delete.alters_data = True
def update(self, **kwargs):
"""
Updates all elements in the current QuerySet, setting all the given
fields to the appropriate values.
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
self._for_write = True
query = self.query.clone(sql.UpdateQuery)
query.add_update_values(kwargs)
with transaction.atomic(using=self.db, savepoint=False):
rows = query.get_compiler(self.db).execute_sql(CURSOR)
self._result_cache = None
return rows
update.alters_data = True
def _update(self, values):
"""
A version of update that accepts field objects instead of field names.
Used primarily for model saving and not intended for use by general
code (it requires too much poking around at model internals to be
useful at that level).
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
query = self.query.clone(sql.UpdateQuery)
query.add_update_fields(values)
self._result_cache = None
return query.get_compiler(self.db).execute_sql(CURSOR)
_update.alters_data = True
_update.queryset_only = False
def exists(self):
if self._result_cache is None:
return self.query.has_results(using=self.db)
return bool(self._result_cache)
def _prefetch_related_objects(self):
# This method can only be called once the result cache has been filled.
prefetch_related_objects(self._result_cache, self._prefetch_related_lookups)
self._prefetch_done = True
##################################################
# PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS #
##################################################
def raw(self, raw_query, params=None, translations=None, using=None):
if using is None:
using = self.db
return RawQuerySet(raw_query, model=self.model,
params=params, translations=translations,
using=using)
def values(self, *fields):
return self._clone(klass=ValuesQuerySet, setup=True, _fields=fields)
def values_list(self, *fields, **kwargs):
flat = kwargs.pop('flat', False)
if kwargs:
raise TypeError('Unexpected keyword arguments to values_list: %s'
% (list(kwargs),))
if flat and len(fields) > 1:
raise TypeError("'flat' is not valid when values_list is called with more than one field.")
return self._clone(klass=ValuesListQuerySet, setup=True, flat=flat,
_fields=fields)
def dates(self, field_name, kind, order='ASC'):
"""
Returns a list of date objects representing all available dates for
the given field_name, scoped to 'kind'.
"""
assert kind in ("year", "month", "day"), \
"'kind' must be one of 'year', 'month' or 'day'."
assert order in ('ASC', 'DESC'), \
"'order' must be either 'ASC' or 'DESC'."
return self.annotate(
datefield=Date(field_name, kind),
plain_field=F(field_name)
).values_list(
'datefield', flat=True
).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datefield')
def datetimes(self, field_name, kind, order='ASC', tzinfo=None):
"""
Returns a list of datetime objects representing all available
datetimes for the given field_name, scoped to 'kind'.
"""
assert kind in ("year", "month", "day", "hour", "minute", "second"), \
"'kind' must be one of 'year', 'month', 'day', 'hour', 'minute' or 'second'."
assert order in ('ASC', 'DESC'), \
"'order' must be either 'ASC' or 'DESC'."
if settings.USE_TZ:
if tzinfo is None:
tzinfo = timezone.get_current_timezone()
else:
tzinfo = None
return self.annotate(
datetimefield=DateTime(field_name, kind, tzinfo),
plain_field=F(field_name)
).values_list(
'datetimefield', flat=True
).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datetimefield')
def none(self):
"""
Returns an empty QuerySet.
"""
clone = self._clone()
clone.query.set_empty()
return clone
##################################################################
# PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
##################################################################
def all(self):
"""
Returns a new QuerySet that is a copy of the current one. This allows a
QuerySet to proxy for a model manager in some cases.
"""
return self._clone()
def filter(self, *args, **kwargs):
"""
Returns a new QuerySet instance with the args ANDed to the existing
set.
"""
return self._filter_or_exclude(False, *args, **kwargs)
def exclude(self, *args, **kwargs):
"""
Returns a new QuerySet instance with NOT (args) ANDed to the existing
set.
"""
return self._filter_or_exclude(True, *args, **kwargs)
def _filter_or_exclude(self, negate, *args, **kwargs):
if args or kwargs:
assert self.query.can_filter(), \
"Cannot filter a query once a slice has been taken."
clone = self._clone()
if negate:
clone.query.add_q(~Q(*args, **kwargs))
else:
clone.query.add_q(Q(*args, **kwargs))
return clone
def complex_filter(self, filter_obj):
"""
Returns a new QuerySet instance with filter_obj added to the filters.
filter_obj can be a Q object (or anything with an add_to_query()
method) or a dictionary of keyword lookup arguments.
This exists to support framework features such as 'limit_choices_to',
and usually it will be more natural to use other methods.
"""
if isinstance(filter_obj, Q) or hasattr(filter_obj, 'add_to_query'):
clone = self._clone()
clone.query.add_q(filter_obj)
return clone
else:
return self._filter_or_exclude(None, **filter_obj)
def select_for_update(self, nowait=False):
"""
Returns a new QuerySet instance that will select objects with a
FOR UPDATE lock.
"""
obj = self._clone()
obj._for_write = True
obj.query.select_for_update = True
obj.query.select_for_update_nowait = nowait
return obj
def select_related(self, *fields):
"""
Returns a new QuerySet instance that will select related objects.
If fields are specified, they must be ForeignKey fields and only those
related objects are included in the selection.
If select_related(None) is called, the list is cleared.
"""
obj = self._clone()
if fields == (None,):
obj.query.select_related = False
elif fields:
obj.query.add_select_related(fields)
else:
obj.query.select_related = True
return obj
def prefetch_related(self, *lookups):
"""
Returns a new QuerySet instance that will prefetch the specified
Many-To-One and Many-To-Many related objects when the QuerySet is
evaluated.
When prefetch_related() is called more than once, the list of lookups to
prefetch is appended to. If prefetch_related(None) is called, the list
is cleared.
"""
clone = self._clone()
if lookups == (None,):
clone._prefetch_related_lookups = []
else:
clone._prefetch_related_lookups.extend(lookups)
return clone
def annotate(self, *args, **kwargs):
"""
Return a query set in which the returned objects have been annotated
with extra data or aggregations.
"""
annotations = OrderedDict() # To preserve ordering of args
for arg in args:
# The default_alias property may raise a TypeError, so we use
# a try/except construct rather than hasattr in order to remain
# consistent between PY2 and PY3 (hasattr would swallow
# the TypeError on PY2).
try:
if arg.default_alias in kwargs:
raise ValueError("The named annotation '%s' conflicts with the "
"default name for another annotation."
% arg.default_alias)
except (AttributeError, TypeError):
raise TypeError("Complex annotations require an alias")
annotations[arg.default_alias] = arg
annotations.update(kwargs)
obj = self._clone()
names = getattr(self, '_fields', None)
if names is None:
names = {f.name for f in self.model._meta.get_fields()}
# Add the annotations to the query
for alias, annotation in annotations.items():
if alias in names:
raise ValueError("The annotation '%s' conflicts with a field on "
"the model." % alias)
obj.query.add_annotation(annotation, alias, is_summary=False)
# expressions need to be added to the query before we know if they contain aggregates
added_aggregates = []
for alias, annotation in obj.query.annotations.items():
if alias in annotations and annotation.contains_aggregate:
added_aggregates.append(alias)
if added_aggregates:
obj._setup_aggregate_query(list(added_aggregates))
return obj
def order_by(self, *field_names):
"""
Returns a new QuerySet instance with the ordering changed.
"""
assert self.query.can_filter(), \
"Cannot reorder a query once a slice has been taken."
obj = self._clone()
obj.query.clear_ordering(force_empty=False)
obj.query.add_ordering(*field_names)
return obj
def distinct(self, *field_names):
"""
Returns a new QuerySet instance that will select only distinct results.
"""
assert self.query.can_filter(), \
"Cannot create distinct fields once a slice has been taken."
obj = self._clone()
obj.query.add_distinct_fields(*field_names)
return obj
def extra(self, select=None, where=None, params=None, tables=None,
order_by=None, select_params=None):
"""
Adds extra SQL fragments to the query.
"""
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken"
clone = self._clone()
clone.query.add_extra(select, select_params, where, params, tables, order_by)
return clone
def reverse(self):
"""
Reverses the ordering of the QuerySet.
"""
clone = self._clone()
clone.query.standard_ordering = not clone.query.standard_ordering
return clone
def defer(self, *fields):
"""
Defers the loading of data for certain fields until they are accessed.
The set of fields to defer is added to any existing set of deferred
fields. The only exception to this is if None is passed in as the only
parameter, in which case all deferrals are removed (None acts as a
reset option).
"""
clone = self._clone()
if fields == (None,):
clone.query.clear_deferred_loading()
else:
clone.query.add_deferred_loading(fields)
return clone
def only(self, *fields):
"""
Essentially, the opposite of defer. Only the fields passed into this
method and that are not already specified as deferred are loaded
immediately when the queryset is evaluated.
"""
if fields == (None,):
# Can only pass None to defer(), not only(), as the rest option.
# That won't stop people trying to do this, so let's be explicit.
raise TypeError("Cannot pass None as an argument to only().")
clone = self._clone()
clone.query.add_immediate_loading(fields)
return clone
def using(self, alias):
"""
Selects which database this QuerySet should execute its query against.
"""
clone = self._clone()
clone._db = alias
return clone
###################################
# PUBLIC INTROSPECTION ATTRIBUTES #
###################################
def ordered(self):
"""
Returns True if the QuerySet is ordered -- i.e. has an order_by()
clause or a default ordering on the model.
"""
if self.query.extra_order_by or self.query.order_by:
return True
elif self.query.default_ordering and self.query.get_meta().ordering:
return True
else:
return False
ordered = property(ordered)
@property
def db(self):
"Return the database that will be used if this query is executed now"
if self._for_write:
return self._db or router.db_for_write(self.model, **self._hints)
return self._db or router.db_for_read(self.model, **self._hints)
###################
# PRIVATE METHODS #
###################
def _insert(self, objs, fields, return_id=False, raw=False, using=None):
"""
Inserts a new record for the given model. This provides an interface to
the InsertQuery class and is how Model.save() is implemented.
"""
self._for_write = True
if using is None:
using = self.db
query = sql.InsertQuery(self.model)
query.insert_values(fields, objs, raw=raw)
return query.get_compiler(using=using).execute_sql(return_id)
_insert.alters_data = True
_insert.queryset_only = False
def _batched_insert(self, objs, fields, batch_size):
"""
A little helper method for bulk_insert to insert the bulk one batch
at a time. Inserts recursively a batch from the front of the bulk and
then _batched_insert() the remaining objects again.
"""
if not objs:
return
ops = connections[self.db].ops
batch_size = (batch_size or max(ops.bulk_batch_size(fields, objs), 1))
for batch in [objs[i:i + batch_size]
for i in range(0, len(objs), batch_size)]:
self.model._base_manager._insert(batch, fields=fields,
using=self.db)
def _clone(self, klass=None, setup=False, **kwargs):
base_queryset_class = getattr(self, '_base_queryset_class', self.__class__)
if klass is None:
klass = self.__class__
elif not (issubclass(base_queryset_class, klass) or issubclass(klass, base_queryset_class)):
class_bases = (klass, base_queryset_class)
class_dict = {
'_base_queryset_class': base_queryset_class,
'_specialized_queryset_class': klass,
}
klass = type(klass.__name__, class_bases, class_dict)
query = self.query.clone()
if self._sticky_filter:
query.filter_is_sticky = True
c = klass(model=self.model, query=query, using=self._db, hints=self._hints)
c._for_write = self._for_write
c._prefetch_related_lookups = self._prefetch_related_lookups[:]
c._known_related_objects = self._known_related_objects
c.__dict__.update(kwargs)
if setup and hasattr(c, '_setup_query'):
c._setup_query()
return c
def _fetch_all(self):
if self._result_cache is None:
self._result_cache = list(self.iterator())
if self._prefetch_related_lookups and not self._prefetch_done:
self._prefetch_related_objects()
def _next_is_sticky(self):
"""
Indicates that the next filter call and the one following that should
be treated as a single filter. This is only important when it comes to
determining when to reuse tables for many-to-many filters. Required so
that we can filter naturally on the results of related managers.
This doesn't return a clone of the current QuerySet (it returns
"self"). The method is only used internally and should be immediately
followed by a filter() that does create a clone.
"""
self._sticky_filter = True
return self
def _merge_sanity_check(self, other):
"""
Checks that we are merging two comparable QuerySet classes. By default
this does nothing, but see the ValuesQuerySet for an example of where
it's useful.
"""
pass
def _merge_known_related_objects(self, other):
"""
Keep track of all known related objects from either QuerySet instance.
"""
for field, objects in other._known_related_objects.items():
self._known_related_objects.setdefault(field, {}).update(objects)
def _setup_aggregate_query(self, aggregates):
"""
Prepare the query for computing a result that contains aggregate annotations.
"""
if self.query.group_by is None:
self.query.group_by = True
def _prepare(self):
return self
def _as_sql(self, connection):
"""
Returns the internal query's SQL and parameters (as a tuple).
"""
obj = self.values("pk")
if obj._db is None or connection == connections[obj._db]:
return obj.query.get_compiler(connection=connection).as_nested_sql()
raise ValueError("Can't do subqueries with queries on different DBs.")
# When used as part of a nested query, a queryset will never be an "always
# empty" result.
value_annotation = True
def _add_hints(self, **hints):
"""
Update hinting information for later use by Routers
"""
# If there is any hinting information, add it to what we already know.
# If we have a new hint for an existing key, overwrite with the new value.
self._hints.update(hints)
def _has_filters(self):
"""
Checks if this QuerySet has any filtering going on. Note that this
isn't equivalent for checking if all objects are present in results,
for example qs[1:]._has_filters() -> False.
"""
return self.query.has_filters()
def is_compatible_query_object_type(self, opts):
model = self.model
return (
model == opts.concrete_model or
opts.concrete_model in model._meta.get_parent_list() or
model in opts.get_parent_list()
)
is_compatible_query_object_type.queryset_only = True
class InstanceCheckMeta(type):
def __instancecheck__(self, instance):
return instance.query.is_empty()
class EmptyQuerySet(six.with_metaclass(InstanceCheckMeta)):
"""
Marker class usable for checking if a queryset is empty by .none():
isinstance(qs.none(), EmptyQuerySet) -> True
"""
def __init__(self, *args, **kwargs):
raise TypeError("EmptyQuerySet can't be instantiated")
class ValuesQuerySet(QuerySet):
def __init__(self, *args, **kwargs):
super(ValuesQuerySet, self).__init__(*args, **kwargs)
# select_related isn't supported in values(). (FIXME -#3358)
self.query.select_related = False
# QuerySet.clone() will also set up the _fields attribute with the
# names of the model fields to select.
def only(self, *fields):
raise NotImplementedError("ValuesQuerySet does not implement only()")
def defer(self, *fields):
raise NotImplementedError("ValuesQuerySet does not implement defer()")
def iterator(self):
# Purge any extra columns that haven't been explicitly asked for
extra_names = list(self.query.extra_select)
field_names = self.field_names
annotation_names = list(self.query.annotation_select)
names = extra_names + field_names + annotation_names
for row in self.query.get_compiler(self.db).results_iter():
yield dict(zip(names, row))
def delete(self):
# values().delete() doesn't work currently - make sure it raises an
# user friendly error.
raise TypeError("Queries with .values() or .values_list() applied "
"can't be deleted")
def _setup_query(self):
"""
Constructs the field_names list that the values query will be
retrieving.
Called by the _clone() method after initializing the rest of the
instance.
"""
if self.query.group_by is True:
self.query.add_fields([f.attname for f in self.model._meta.concrete_fields], False)
self.query.set_group_by()
self.query.clear_deferred_loading()
self.query.clear_select_fields()
if self._fields:
self.extra_names = []
self.annotation_names = []
if not self.query._extra and not self.query._annotations:
# Short cut - if there are no extra or annotations, then
# the values() clause must be just field names.
self.field_names = list(self._fields)
else:
self.query.default_cols = False
self.field_names = []
for f in self._fields:
# we inspect the full extra_select list since we might
# be adding back an extra select item that we hadn't
# had selected previously.
if self.query._extra and f in self.query._extra:
self.extra_names.append(f)
elif f in self.query.annotation_select:
self.annotation_names.append(f)
else:
self.field_names.append(f)
else:
# Default to all fields.
self.extra_names = None
self.field_names = [f.attname for f in self.model._meta.concrete_fields]
self.annotation_names = None
self.query.select = []
if self.extra_names is not None:
self.query.set_extra_mask(self.extra_names)
self.query.add_fields(self.field_names, True)
if self.annotation_names is not None:
self.query.set_annotation_mask(self.annotation_names)
def _clone(self, klass=None, setup=False, **kwargs):
"""
Cloning a ValuesQuerySet preserves the current fields.
"""
c = super(ValuesQuerySet, self)._clone(klass, **kwargs)
if not hasattr(c, '_fields'):
# Only clone self._fields if _fields wasn't passed into the cloning
# call directly.
c._fields = self._fields[:]
c.field_names = self.field_names
c.extra_names = self.extra_names
c.annotation_names = self.annotation_names
if setup and hasattr(c, '_setup_query'):
c._setup_query()
return c
def _merge_sanity_check(self, other):
super(ValuesQuerySet, self)._merge_sanity_check(other)
if (set(self.extra_names) != set(other.extra_names) or
set(self.field_names) != set(other.field_names) or
self.annotation_names != other.annotation_names):
raise TypeError("Merging '%s' classes must involve the same values in each case."
% self.__class__.__name__)
def _setup_aggregate_query(self, aggregates):
"""
Prepare the query for computing a result that contains aggregate annotations.
"""
self.query.set_group_by()
if self.annotation_names is not None:
self.annotation_names.extend(aggregates)
self.query.set_annotation_mask(self.annotation_names)
super(ValuesQuerySet, self)._setup_aggregate_query(aggregates)
def _as_sql(self, connection):
"""
For ValuesQuerySet (and subclasses like ValuesListQuerySet), they can
only be used as nested queries if they're already set up to select only
a single field (in which case, that is the field column that is
returned). This differs from QuerySet.as_sql(), where the column to
select is set up by Django.
"""
if ((self._fields and len(self._fields) > 1) or
(not self._fields and len(self.model._meta.fields) > 1)):
raise TypeError('Cannot use a multi-field %s as a filter value.'
% self.__class__.__name__)
obj = self._clone()
if obj._db is None or connection == connections[obj._db]:
return obj.query.get_compiler(connection=connection).as_nested_sql()
raise ValueError("Can't do subqueries with queries on different DBs.")
def _prepare(self):
"""
Validates that we aren't trying to do a query like
value__in=qs.values('value1', 'value2'), which isn't valid.
"""
if ((self._fields and len(self._fields) > 1) or
(not self._fields and len(self.model._meta.fields) > 1)):
raise TypeError('Cannot use a multi-field %s as a filter value.'
% self.__class__.__name__)
return self
def is_compatible_query_object_type(self, opts):
"""
ValueQuerySets do not need to be checked for compatibility.
We trust that users of ValueQuerySets know what they are doing.
"""
return True
class ValuesListQuerySet(ValuesQuerySet):
def iterator(self):
compiler = self.query.get_compiler(self.db)
if self.flat and len(self._fields) == 1:
for row in compiler.results_iter():
yield row[0]
elif not self.query.extra_select and not self.query.annotation_select:
for row in compiler.results_iter():
yield tuple(row)
else:
# When extra(select=...) or an annotation is involved, the extra
# cols are always at the start of the row, and we need to reorder
# the fields to match the order in self._fields.
extra_names = list(self.query.extra_select)
field_names = self.field_names
annotation_names = list(self.query.annotation_select)
names = extra_names + field_names + annotation_names
# If a field list has been specified, use it. Otherwise, use the
# full list of fields, including extras and annotations.
if self._fields:
fields = list(self._fields) + [f for f in annotation_names if f not in self._fields]
else:
fields = names
for row in compiler.results_iter():
data = dict(zip(names, row))
yield tuple(data[f] for f in fields)
def _clone(self, *args, **kwargs):
clone = super(ValuesListQuerySet, self)._clone(*args, **kwargs)
if not hasattr(clone, "flat"):
# Only assign flat if the clone didn't already get it from kwargs
clone.flat = self.flat
return clone
class RawQuerySet(object):
"""
Provides an iterator which converts the results of raw SQL queries into
annotated model instances.
"""
def __init__(self, raw_query, model=None, query=None, params=None,
translations=None, using=None, hints=None):
self.raw_query = raw_query
self.model = model
self._db = using
self._hints = hints or {}
self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params)
self.params = params or ()
self.translations = translations or {}
def resolve_model_init_order(self):
"""
Resolve the init field names and value positions
"""
model_init_fields = [f for f in self.model._meta.fields if f.column in self.columns]
annotation_fields = [(column, pos) for pos, column in enumerate(self.columns)
if column not in self.model_fields]
model_init_order = [self.columns.index(f.column) for f in model_init_fields]
model_init_names = [f.attname for f in model_init_fields]
return model_init_names, model_init_order, annotation_fields
def __iter__(self):
# Cache some things for performance reasons outside the loop.
db = self.db
compiler = connections[db].ops.compiler('SQLCompiler')(
self.query, connections[db], db
)
query = iter(self.query)
try:
model_init_names, model_init_pos, annotation_fields = self.resolve_model_init_order()
# Find out which model's fields are not present in the query.
skip = set()
for field in self.model._meta.fields:
if field.attname not in model_init_names:
skip.add(field.attname)
if skip:
if self.model._meta.pk.attname in skip:
raise InvalidQuery('Raw query must include the primary key')
model_cls = deferred_class_factory(self.model, skip)
else:
model_cls = self.model
fields = [self.model_fields.get(c, None) for c in self.columns]
converters = compiler.get_converters([
f.get_col(f.model._meta.db_table) if f else None for f in fields
])
for values in query:
if converters:
values = compiler.apply_converters(values, converters)
# Associate fields to values
model_init_values = [values[pos] for pos in model_init_pos]
instance = model_cls.from_db(db, model_init_names, model_init_values)
if annotation_fields:
for column, pos in annotation_fields:
setattr(instance, column, values[pos])
yield instance
finally:
# Done iterating the Query. If it has its own cursor, close it.
if hasattr(self.query, 'cursor') and self.query.cursor:
self.query.cursor.close()
def __repr__(self):
return "<RawQuerySet: %s>" % self.query
def __getitem__(self, k):
return list(self)[k]
@property
def db(self):
"Return the database that will be used if this query is executed now"
return self._db or router.db_for_read(self.model, **self._hints)
def using(self, alias):
"""
Selects which database this Raw QuerySet should execute its query against.
"""
return RawQuerySet(self.raw_query, model=self.model,
query=self.query.clone(using=alias),
params=self.params, translations=self.translations,
using=alias)
@property
def columns(self):
"""
A list of model field names in the order they'll appear in the
query results.
"""
if not hasattr(self, '_columns'):
self._columns = self.query.get_columns()
# Adjust any column names which don't match field names
for (query_name, model_name) in self.translations.items():
try:
index = self._columns.index(query_name)
self._columns[index] = model_name
except ValueError:
# Ignore translations for non-existent column names
pass
return self._columns
@property
def model_fields(self):
"""
A dict mapping column names to model field names.
"""
if not hasattr(self, '_model_fields'):
converter = connections[self.db].introspection.table_name_converter
self._model_fields = {}
for field in self.model._meta.fields:
name, column = field.get_attname_column()
self._model_fields[converter(column)] = field
return self._model_fields
class Prefetch(object):
def __init__(self, lookup, queryset=None, to_attr=None):
# `prefetch_through` is the path we traverse to perform the prefetch.
self.prefetch_through = lookup
# `prefetch_to` is the path to the attribute that stores the result.
self.prefetch_to = lookup
if to_attr:
self.prefetch_to = LOOKUP_SEP.join(lookup.split(LOOKUP_SEP)[:-1] + [to_attr])
self.queryset = queryset
self.to_attr = to_attr
def add_prefix(self, prefix):
self.prefetch_through = LOOKUP_SEP.join([prefix, self.prefetch_through])
self.prefetch_to = LOOKUP_SEP.join([prefix, self.prefetch_to])
def get_current_prefetch_through(self, level):
return LOOKUP_SEP.join(self.prefetch_through.split(LOOKUP_SEP)[:level + 1])
def get_current_prefetch_to(self, level):
return LOOKUP_SEP.join(self.prefetch_to.split(LOOKUP_SEP)[:level + 1])
def get_current_to_attr(self, level):
parts = self.prefetch_to.split(LOOKUP_SEP)
to_attr = parts[level]
as_attr = self.to_attr and level == len(parts) - 1
return to_attr, as_attr
def get_current_queryset(self, level):
if self.get_current_prefetch_to(level) == self.prefetch_to:
return self.queryset
return None
def __eq__(self, other):
if isinstance(other, Prefetch):
return self.prefetch_to == other.prefetch_to
return False
def __hash__(self):
return hash(self.__class__) ^ hash(self.prefetch_to)
def normalize_prefetch_lookups(lookups, prefix=None):
"""
Helper function that normalize lookups into Prefetch objects.
"""
ret = []
for lookup in lookups:
if not isinstance(lookup, Prefetch):
lookup = Prefetch(lookup)
if prefix:
lookup.add_prefix(prefix)
ret.append(lookup)
return ret
def prefetch_related_objects(result_cache, related_lookups):
"""
Helper function for prefetch_related functionality
Populates prefetched objects caches for a list of results
from a QuerySet
"""
if len(result_cache) == 0:
return # nothing to do
related_lookups = normalize_prefetch_lookups(related_lookups)
# We need to be able to dynamically add to the list of prefetch_related
# lookups that we look up (see below). So we need some book keeping to
# ensure we don't do duplicate work.
done_queries = {} # dictionary of things like 'foo__bar': [results]
auto_lookups = set() # we add to this as we go through.
followed_descriptors = set() # recursion protection
all_lookups = deque(related_lookups)
while all_lookups:
lookup = all_lookups.popleft()
if lookup.prefetch_to in done_queries:
if lookup.queryset:
raise ValueError("'%s' lookup was already seen with a different queryset. "
"You may need to adjust the ordering of your lookups." % lookup.prefetch_to)
continue
# Top level, the list of objects to decorate is the result cache
# from the primary QuerySet. It won't be for deeper levels.
obj_list = result_cache
through_attrs = lookup.prefetch_through.split(LOOKUP_SEP)
for level, through_attr in enumerate(through_attrs):
# Prepare main instances
if len(obj_list) == 0:
break
prefetch_to = lookup.get_current_prefetch_to(level)
if prefetch_to in done_queries:
# Skip any prefetching, and any object preparation
obj_list = done_queries[prefetch_to]
continue
# Prepare objects:
good_objects = True
for obj in obj_list:
# Since prefetching can re-use instances, it is possible to have
# the same instance multiple times in obj_list, so obj might
# already be prepared.
if not hasattr(obj, '_prefetched_objects_cache'):
try:
obj._prefetched_objects_cache = {}
except AttributeError:
# Must be in a QuerySet subclass that is not returning
# Model instances, either in Django or 3rd
# party. prefetch_related() doesn't make sense, so quit
# now.
good_objects = False
break
if not good_objects:
break
# Descend down tree
# We assume that objects retrieved are homogeneous (which is the premise
# of prefetch_related), so what applies to first object applies to all.
first_obj = obj_list[0]
prefetcher, descriptor, attr_found, is_fetched = get_prefetcher(first_obj, through_attr)
if not attr_found:
raise AttributeError("Cannot find '%s' on %s object, '%s' is an invalid "
"parameter to prefetch_related()" %
(through_attr, first_obj.__class__.__name__, lookup.prefetch_through))
if level == len(through_attrs) - 1 and prefetcher is None:
# Last one, this *must* resolve to something that supports
# prefetching, otherwise there is no point adding it and the
# developer asking for it has made a mistake.
raise ValueError("'%s' does not resolve to an item that supports "
"prefetching - this is an invalid parameter to "
"prefetch_related()." % lookup.prefetch_through)
if prefetcher is not None and not is_fetched:
obj_list, additional_lookups = prefetch_one_level(obj_list, prefetcher, lookup, level)
# We need to ensure we don't keep adding lookups from the
# same relationships to stop infinite recursion. So, if we
# are already on an automatically added lookup, don't add
# the new lookups from relationships we've seen already.
if not (lookup in auto_lookups and descriptor in followed_descriptors):
done_queries[prefetch_to] = obj_list
new_lookups = normalize_prefetch_lookups(additional_lookups, prefetch_to)
auto_lookups.update(new_lookups)
all_lookups.extendleft(new_lookups)
followed_descriptors.add(descriptor)
else:
# Either a singly related object that has already been fetched
# (e.g. via select_related), or hopefully some other property
# that doesn't support prefetching but needs to be traversed.
# We replace the current list of parent objects with the list
# of related objects, filtering out empty or missing values so
# that we can continue with nullable or reverse relations.
new_obj_list = []
for obj in obj_list:
try:
new_obj = getattr(obj, through_attr)
except exceptions.ObjectDoesNotExist:
continue
if new_obj is None:
continue
# We special-case `list` rather than something more generic
# like `Iterable` because we don't want to accidentally match
# user models that define __iter__.
if isinstance(new_obj, list):
new_obj_list.extend(new_obj)
else:
new_obj_list.append(new_obj)
obj_list = new_obj_list
def get_prefetcher(instance, attr):
"""
For the attribute 'attr' on the given instance, finds
an object that has a get_prefetch_queryset().
Returns a 4 tuple containing:
(the object with get_prefetch_queryset (or None),
the descriptor object representing this relationship (or None),
a boolean that is False if the attribute was not found at all,
a boolean that is True if the attribute has already been fetched)
"""
prefetcher = None
is_fetched = False
# For singly related objects, we have to avoid getting the attribute
# from the object, as this will trigger the query. So we first try
# on the class, in order to get the descriptor object.
rel_obj_descriptor = getattr(instance.__class__, attr, None)
if rel_obj_descriptor is None:
attr_found = hasattr(instance, attr)
else:
attr_found = True
if rel_obj_descriptor:
# singly related object, descriptor object has the
# get_prefetch_queryset() method.
if hasattr(rel_obj_descriptor, 'get_prefetch_queryset'):
prefetcher = rel_obj_descriptor
if rel_obj_descriptor.is_cached(instance):
is_fetched = True
else:
# descriptor doesn't support prefetching, so we go ahead and get
# the attribute on the instance rather than the class to
# support many related managers
rel_obj = getattr(instance, attr)
if hasattr(rel_obj, 'get_prefetch_queryset'):
prefetcher = rel_obj
return prefetcher, rel_obj_descriptor, attr_found, is_fetched
def prefetch_one_level(instances, prefetcher, lookup, level):
"""
Helper function for prefetch_related_objects
Runs prefetches on all instances using the prefetcher object,
assigning results to relevant caches in instance.
The prefetched objects are returned, along with any additional
prefetches that must be done due to prefetch_related lookups
found from default managers.
"""
# prefetcher must have a method get_prefetch_queryset() which takes a list
# of instances, and returns a tuple:
# (queryset of instances of self.model that are related to passed in instances,
# callable that gets value to be matched for returned instances,
# callable that gets value to be matched for passed in instances,
# boolean that is True for singly related objects,
# cache name to assign to).
# The 'values to be matched' must be hashable as they will be used
# in a dictionary.
rel_qs, rel_obj_attr, instance_attr, single, cache_name = (
prefetcher.get_prefetch_queryset(instances, lookup.get_current_queryset(level)))
# We have to handle the possibility that the QuerySet we just got back
# contains some prefetch_related lookups. We don't want to trigger the
# prefetch_related functionality by evaluating the query. Rather, we need
# to merge in the prefetch_related lookups.
additional_lookups = getattr(rel_qs, '_prefetch_related_lookups', [])
if additional_lookups:
# Don't need to clone because the manager should have given us a fresh
# instance, so we access an internal instead of using public interface
# for performance reasons.
rel_qs._prefetch_related_lookups = []
all_related_objects = list(rel_qs)
rel_obj_cache = {}
for rel_obj in all_related_objects:
rel_attr_val = rel_obj_attr(rel_obj)
rel_obj_cache.setdefault(rel_attr_val, []).append(rel_obj)
for obj in instances:
instance_attr_val = instance_attr(obj)
vals = rel_obj_cache.get(instance_attr_val, [])
to_attr, as_attr = lookup.get_current_to_attr(level)
if single:
val = vals[0] if vals else None
to_attr = to_attr if as_attr else cache_name
setattr(obj, to_attr, val)
else:
if as_attr:
setattr(obj, to_attr, vals)
else:
# Cache in the QuerySet.all().
qs = getattr(obj, to_attr).all()
qs._result_cache = vals
# We don't want the individual qs doing prefetch_related now,
# since we have merged this into the current work.
qs._prefetch_done = True
obj._prefetched_objects_cache[cache_name] = qs
return all_related_objects, additional_lookups
class RelatedPopulator(object):
"""
RelatedPopulator is used for select_related() object instantiation.
The idea is that each select_related() model will be populated by a
different RelatedPopulator instance. The RelatedPopulator instances get
klass_info and select (computed in SQLCompiler) plus the used db as
input for initialization. That data is used to compute which columns
to use, how to instantiate the model, and how to populate the links
between the objects.
The actual creation of the objects is done in populate() method. This
method gets row and from_obj as input and populates the select_related()
model instance.
"""
def __init__(self, klass_info, select, db):
self.db = db
# Pre-compute needed attributes. The attributes are:
# - model_cls: the possibly deferred model class to instantiate
# - either:
# - cols_start, cols_end: usually the columns in the row are
# in the same order model_cls.__init__ expects them, so we
# can instantiate by model_cls(*row[cols_start:cols_end])
# - reorder_for_init: When select_related descends to a child
# class, then we want to reuse the already selected parent
# data. However, in this case the parent data isn't necessarily
# in the same order that Model.__init__ expects it to be, so
# we have to reorder the parent data. The reorder_for_init
# attribute contains a function used to reorder the field data
# in the order __init__ expects it.
# - pk_idx: the index of the primary key field in the reordered
# model data. Used to check if a related object exists at all.
# - init_list: the field attnames fetched from the database. For
# deferred models this isn't the same as all attnames of the
# model's fields.
# - related_populators: a list of RelatedPopulator instances if
# select_related() descends to related models from this model.
# - cache_name, reverse_cache_name: the names to use for setattr
# when assigning the fetched object to the from_obj. If the
# reverse_cache_name is set, then we also set the reverse link.
select_fields = klass_info['select_fields']
from_parent = klass_info['from_parent']
if not from_parent:
self.cols_start = select_fields[0]
self.cols_end = select_fields[-1] + 1
self.init_list = [
f[0].target.attname for f in select[self.cols_start:self.cols_end]
]
self.reorder_for_init = None
else:
model_init_attnames = [
f.attname for f in klass_info['model']._meta.concrete_fields
]
reorder_map = []
for idx in select_fields:
field = select[idx][0].target
init_pos = model_init_attnames.index(field.attname)
reorder_map.append((init_pos, field.attname, idx))
reorder_map.sort()
self.init_list = [v[1] for v in reorder_map]
pos_list = [row_pos for _, _, row_pos in reorder_map]
def reorder_for_init(row):
return [row[row_pos] for row_pos in pos_list]
self.reorder_for_init = reorder_for_init
self.model_cls = self.get_deferred_cls(klass_info, self.init_list)
self.pk_idx = self.init_list.index(self.model_cls._meta.pk.attname)
self.related_populators = get_related_populators(klass_info, select, self.db)
field = klass_info['field']
reverse = klass_info['reverse']
self.reverse_cache_name = None
if reverse:
self.cache_name = field.rel.get_cache_name()
self.reverse_cache_name = field.get_cache_name()
else:
self.cache_name = field.get_cache_name()
if field.unique:
self.reverse_cache_name = field.rel.get_cache_name()
def get_deferred_cls(self, klass_info, init_list):
model_cls = klass_info['model']
if len(init_list) != len(model_cls._meta.concrete_fields):
init_set = set(init_list)
skip = [
f.attname for f in model_cls._meta.concrete_fields
if f.attname not in init_set
]
model_cls = deferred_class_factory(model_cls, skip)
return model_cls
def populate(self, row, from_obj):
if self.reorder_for_init:
obj_data = self.reorder_for_init(row)
else:
obj_data = row[self.cols_start:self.cols_end]
if obj_data[self.pk_idx] is None:
obj = None
else:
obj = self.model_cls.from_db(self.db, self.init_list, obj_data)
if obj and self.related_populators:
for rel_iter in self.related_populators:
rel_iter.populate(row, obj)
setattr(from_obj, self.cache_name, obj)
if obj and self.reverse_cache_name:
setattr(obj, self.reverse_cache_name, from_obj)
def get_related_populators(klass_info, select, db):
iterators = []
related_klass_infos = klass_info.get('related_klass_infos', [])
for rel_klass_info in related_klass_infos:
rel_cls = RelatedPopulator(rel_klass_info, select, db)
iterators.append(rel_cls)
return iterators
| mit |
plepe/pgmapcss | pgmapcss/db/osmosis/db_functions.py | 1 | 15332 | # Use this functions only with a database based on an import with osmosis
def objects_bbox(_bbox, db_selects, options, add_columns={}, add_param_type=[], add_param_value=[]):
import pghstore
qry = ''
if len(add_columns):
add_columns_qry = ', ' + ', '.join([
q + ' as "' + k + '"'
for k, q in add_columns.items()
])
else:
add_columns_qry = ''
if _bbox:
param_type = [ 'geometry' ] + add_param_type
param_value = [ _bbox ] + add_param_value
else:
param_type = add_param_type
param_value = add_param_value
# nodes
w = []
for t in ('*', 'node', 'point'):
if t in db_selects:
w.append(db_selects[t])
if len(w):
bbox = ''
if _bbox is not None:
bbox = 'geom && $1 and ST_Intersects(geom, $1) and'
qry = '''
select 'n' || cast(id as text) as id, version, user_id, (select name from users where id=user_id) as user, tstamp, changeset_id,
tags, geom as geo, Array['point', 'node'] as types
{add_columns}
from nodes
where {bbox} ( {w} )
'''.format(bbox=bbox, w=' or '.join(w), add_columns=add_columns_qry.replace('__geo__', 'geom'))
plan = plpy.prepare(qry, param_type )
res = plpy.cursor(plan, param_value )
for r in res:
r['types'] = list(r['types'])
r['tags'] = pghstore.loads(r['tags'])
r['tags']['osm:id'] = str(r['id'])
r['tags']['osm:version'] = str(r['version'])
r['tags']['osm:user_id'] = str(r['user_id'])
r['tags']['osm:user'] = r['user']
r['tags']['osm:timestamp'] = str(r['tstamp'])
r['tags']['osm:changeset'] = str(r['changeset_id'])
yield(r)
# ways
w = []
for t in ('*', 'line', 'area', 'way'):
if t in db_selects:
w.append(db_selects[t])
if len(w):
bbox = ''
if _bbox is not None:
bbox = 'linestring && $1 and (ST_NPoints(linestring) = 1 or ST_Intersects(linestring, $1)) and'
qry = '''
select * {add_columns} from (
select 'w' || cast(id as text) as id, version, user_id, (select name from users where id=user_id) as user, tstamp, changeset_id,
tags, (CASE WHEN ST_NPoints(linestring) >= 4 and ST_IsClosed(linestring) THEN ST_MakePolygon(linestring) ELSE linestring END) as geo, (ST_NPoints(linestring) >= 4) and ST_IsClosed(linestring) as is_closed, Array['line', 'way'] as types
'''
# START db.multipolygons
# START db.multipolygons-v0.2
# deprecated by osmosis-multipolygon v0.3
qry += '''
, (select array_agg(has_outer_tags) from relation_members join multipolygons on relation_members.relation_id=multipolygons.id where relation_members.member_id=ways.id and relation_members.member_type='W' and relation_members.member_role in ('outer', 'exclave')) part_of_mp_outer
'''
# ELSE db.multipolygons-v0.2
qry += '''
, (select array_agg(true) from multipolygons where hide_outer_ways @> Array[ways.id]) part_of_mp_outer
'''
# END db.multipolygons-v0.2
# END db.multipolygons
qry += '''
from ways
where {bbox} ( {w} ) offset 0) t
'''
qry = qry.format(bbox=bbox, w=' or '.join(w), add_columns=add_columns_qry.replace('__geo__', 'geo'))
plan = plpy.prepare(qry, param_type )
res = plpy.cursor(plan, param_value )
for r in res:
r['types'] = list(r['types'])
r['tags'] = pghstore.loads(r['tags'])
if r['is_closed']:
# START db.multipolygons
if not r['part_of_mp_outer'] or True not in r['part_of_mp_outer']:
# END db.multipolygons
r['types'].append('area')
r['tags']['osm:id'] = str(r['id'])
r['tags']['osm:version'] = str(r['version'])
r['tags']['osm:user_id'] = str(r['user_id'])
r['tags']['osm:user'] = r['user']
r['tags']['osm:timestamp'] = str(r['tstamp'])
r['tags']['osm:changeset'] = str(r['changeset_id'])
yield(r)
done_multipolygons = set()
# START db.multipolygons
# multipolygons
w = []
for t in ('*', 'relation', 'area'):
if t in db_selects:
w.append(db_selects[t])
if len(w):
bbox = ''
if _bbox is not None:
bbox = 'geom && $1 and ST_Intersects(geom, $1) and'
qry = '''
select * {add_columns} from (
select (CASE WHEN has_outer_tags THEN 'm' ELSE 'r' END) || cast(id as text) as id, id as rid, version, user_id, (select name from users where id=user_id) as user, tstamp, changeset_id, has_outer_tags,
tags, geom as geo, Array['area'] as types
from (select multipolygons.*, relations.version, relations.user_id, relations.tstamp, relations.changeset_id from multipolygons left join relations on multipolygons.id = relations.id) t
where {bbox} ( {w} ) offset 0) t
'''.format(bbox=bbox, w=' or '.join(w), add_columns=add_columns_qry.replace('__geo__', 'geo'))
plan = plpy.prepare(qry, param_type )
res = plpy.cursor(plan, param_value )
for r in res:
r['types'] = list(r['types'])
r['tags'] = pghstore.loads(r['tags'])
r['tags']['osm:id'] = str(r['id'])
r['tags']['osm:version'] = str(r['version'])
r['tags']['osm:user_id'] = str(r['user_id'])
r['tags']['osm:user'] = r['user']
r['tags']['osm:timestamp'] = str(r['tstamp'])
r['tags']['osm:changeset'] = str(r['changeset_id'])
if r['has_outer_tags']:
r['tags']['osm:has_outer_tags'] = 'yes'
else:
done_multipolygons.add(r['rid'])
r['types'].append('relation')
yield(r)
# END db.multipolygons
# relations - (no bbox match!)
w = []
for t in ('*', 'relation'):
if t in db_selects:
w.append(db_selects[t])
if len(w):
qry = '''
select * {add_columns} from (
select 'r' || cast(id as text) as id, version, user_id, (select name from users where id=user_id) as user, tstamp, changeset_id,
tags, null as geo, Array['relation'] as types
from relations
where ({w}) and not id = ANY(Array[{done}]::bigint[])) t
'''.format(w=' or '.join(w), add_columns=add_columns_qry, done=','.join({ str(d) for d in done_multipolygons}))
plan = plpy.prepare(qry, param_type )
res = plpy.cursor(plan, param_value )
for r in res:
r['types'] = list(r['types'])
r['tags'] = pghstore.loads(r['tags'])
r['tags']['osm:id'] = str(r['id'])
r['tags']['osm:version'] = str(r['version'])
r['tags']['osm:user_id'] = str(r['user_id'])
r['tags']['osm:user'] = r['user']
r['tags']['osm:timestamp'] = str(r['tstamp'])
r['tags']['osm:changeset'] = str(r['changeset_id'])
yield(r)
def objects_by_id(id_list, options):
_id_list = [ int(i[1:]) for i in id_list if i[0] == 'n' ]
plan = plpy.prepare('select *, (select name from users where id=user_id) as user from nodes where id=any($1)', ['bigint[]']);
res = plpy.cursor(plan, [_id_list])
for r in res:
t = {
'id': 'n' + str(r['id']),
'members': [],
'tags': pghstore.loads(r['tags']),
'geo': r['geom'],
'types': ['node', 'point']
}
t['tags']['osm:id'] = str(t['id'])
t['tags']['osm:version'] = str(r['version'])
t['tags']['osm:user_id'] = str(r['user_id'])
t['tags']['osm:user'] = r['user']
t['tags']['osm:timestamp'] = str(r['tstamp'])
t['tags']['osm:changeset'] = str(r['changeset_id'])
yield(t)
_id_list = [ int(i[1:]) for i in id_list if i[0] == 'w' ]
plan = plpy.prepare('select id, tags, version, user_id, (select name from users where id=user_id) as user, tstamp, changeset_id, linestring as linestring, array_agg(node_id) as member_ids from (select ways.*, node_id from ways left join way_nodes on ways.id=way_nodes.way_id where ways.id=any($1) order by way_nodes.sequence_id) t group by id, tags, version, user_id, tstamp, changeset_id, linestring', ['bigint[]']);
res = plpy.cursor(plan, [_id_list])
for r in res:
t = {
'id': 'w' + str(r['id']),
'members': [ {
'member_id': 'n' + str(m),
'sequence_id': str(i)
}
for i, m in enumerate(r['member_ids'])
],
'tags': pghstore.loads(r['tags']),
'geo': r['linestring'],
'types': ['way', 'line', 'area']
}
t['tags']['osm:id'] = str(t['id'])
t['tags']['osm:version'] = str(r['version'])
t['tags']['osm:user_id'] = str(r['user_id'])
t['tags']['osm:user'] = r['user']
t['tags']['osm:timestamp'] = str(r['tstamp'])
t['tags']['osm:changeset'] = str(r['changeset_id'])
yield(t)
_id_list = [ int(i[1:]) for i in id_list if i[0] == 'r' ]
plan = plpy.prepare('select id, tags, version, user_id, (select name from users where id=user_id) as user, tstamp, changeset_id, array_agg(lower(member_type) || member_id) as member_ids, array_agg(member_role) as member_roles from (select relations.*, member_type, member_id, member_role from relations left join relation_members on relations.id=relation_members.relation_id where relations.id=any($1) order by relation_members.sequence_id) t group by id, tags, version, user_id, tstamp, changeset_id', ['bigint[]']);
res = plpy.cursor(plan, [_id_list])
for r in res:
t = {
'id': 'r' + str(r['id']),
'tags': pghstore.loads(r['tags']),
'members': [ {
'member_id': m[0],
'role': m[1],
'sequence_id': i
}
for i, m in enumerate(zip(r['member_ids'], r['member_roles']))
],
'geo': None,
'types': ['relation']
}
t['tags']['osm:id'] = str(t['id'])
t['tags']['osm:version'] = str(r['version'])
t['tags']['osm:user_id'] = str(r['user_id'])
t['tags']['osm:user'] = r['user']
t['tags']['osm:timestamp'] = str(r['tstamp'])
t['tags']['osm:changeset'] = str(r['changeset_id'])
yield(t)
def objects_member_of(objects, other_selects, self_selects, options):
if 'relation' in other_selects:
plan = plpy.prepare('select *, (select name from users where id=user_id) as user from relation_members join relations on relation_members.relation_id=relations.id where member_id=$1 and member_type=$2', ['bigint', 'text']);
for ob in objects:
member_id = ob['id']
res = plpy.cursor(plan, [int(member_id[1:]), member_id[0:1].upper()])
for r in res:
t = {
'id': 'r' + str(r['id']),
'tags': pghstore.loads(r['tags']),
'types': ['relation'],
'geo': None,
}
link_tags = {
'sequence_id': str(r['sequence_id']),
'role': str(r['member_role']),
'member_id': r['member_type'].lower() + str(r['member_id']),
}
t['tags']['osm:id'] = str(t['id'])
t['tags']['osm:version'] = str(r['version'])
t['tags']['osm:user_id'] = str(r['user_id'])
t['tags']['osm:user'] = r['user']
t['tags']['osm:timestamp'] = str(r['tstamp'])
t['tags']['osm:changeset'] = str(r['changeset_id'])
yield((ob, t, link_tags))
if 'way' in other_selects:
plan = plpy.prepare('select *, (select name from users where id=user_id) as user from way_nodes join ways on way_nodes.way_id=ways.id where node_id=$1', ['bigint']);
for o in objects:
member_id = o['id']
if member_id[0] != 'n':
continue
num_id = int(member_id[1:])
res = plpy.cursor(plan, [num_id])
for r in res:
t = {
'id': 'w' + str(r['id']),
'tags': pghstore.loads(r['tags']),
'types': ['way'],
'geo': r['linestring'],
}
link_tags = {
'member_id': member_id,
'sequence_id': str(r['sequence_id'])
}
t['tags']['osm:id'] = str(t['id'])
t['tags']['osm:version'] = str(r['version'])
t['tags']['osm:user_id'] = str(r['user_id'])
t['tags']['osm:user'] = r['user']
t['tags']['osm:timestamp'] = str(r['tstamp'])
t['tags']['osm:changeset'] = str(r['changeset_id'])
yield((ob, t, link_tags))
def objects_members(objects, other_selects, self_selects, options):
for _ob in objects:
# relation don't get 'members' from objects_bbox(), therefore reload object
ob = list(objects_by_id([ _ob['id'] ], {}))
if not len(ob):
continue
ob = ob[0]
link_obs_ids = [ i['member_id'] for i in ob['members'] ]
link_obs = {}
for o in objects_by_id(link_obs_ids, {}):
link_obs[o['id']] = o
for member in ob['members']:
if not member['member_id'] in link_obs:
continue
ret = link_obs[member['member_id']]
if len(other_selects.keys() - ret['types']):
continue
yield (( _ob, ret, member ))
def objects_near(objects, other_selects, self_selects, options):
for ob in objects:
geom = ob['geo']
max_distance = to_float(eval_metric([ options['distance'], 'u' ]))
if max_distance is None:
return
elif max_distance == 0:
bbox = geom
else:
plan = plpy.prepare('select ST_Transform(ST_Buffer(ST_Transform(ST_Envelope($1), {unit.srs}), $2), {db.srs}) as r', ['geometry', 'float'])
res = plpy.execute(plan, [ geom, max_distance ])
bbox = res[0]['r']
if not 'check_geo' in options:
pass
elif options['check_geo'] == 'within':
where_clause += " and ST_DWithin(way, $2, 0.0)"
elif options['check_geo'] == 'surrounds':
where_clause += " and ST_DWithin($2, way, 0.0)"
elif options['check_geo'] == 'overlaps':
where_clause += " and ST_Overlaps($2, way)"
obs = []
for o in objects_bbox(
bbox,
other_selects,
{},
{ # add_columns
'__distance': 'ST_Distance(ST_Transform($2, {unit.srs}), ST_Transform(__geo__, {unit.srs}))'
},
[ 'geometry' ],
[ geom ]
):
if o['id'] != ob['id'] and o['__distance'] <= max_distance:
link_tags = {
'distance': eval_metric([ str(o['__distance']) + 'u', 'px' ])
}
obs.append((o, link_tags))
obs = sorted(obs, key=lambda o: o[0]['__distance'] )
for o in obs:
yield((ob, o[0], o[1]))
| agpl-3.0 |
safchain/contrail-sandesh | library/python/pysandesh/test/sandesh_trace_test.py | 3 | 12959 | #!/usr/bin/env python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
#
# sandesh_trace_test
#
import sys
import socket
import unittest
import test_utils
sys.path.insert(1, sys.path[0]+'/../../../python')
from pysandesh.sandesh_base import *
from gen_py.msg_test.ttypes import *
class SandeshTraceTest(unittest.TestCase):
def setUp(self):
self._sandesh = Sandesh()
http_port = test_utils.get_free_port()
self._sandesh.init_generator('sandesh_trace_test', socket.gethostname(),
'Test', 'Test', None, 'trace_test_ctxt', http_port)
self._sandesh.set_logging_params(level=SandeshLevel.SYS_DEBUG,
enable_local_log=True,
enable_trace_print=True)
self._trace_read_list = []
#end setUp
def sandesh_trace_read_handler(self, trace_msg, more):
self._trace_read_list.append(trace_msg)
#end sandesh_trace_read_handler
def test_create_delete_trace_buffer(self):
trace_buf_name = 'test_create_delete_trace_buffer'
trace_buf_size = 5
self._sandesh.trace_buffer_create(trace_buf_name, trace_buf_size)
self.assertTrue(trace_buf_name in self._sandesh.trace_buffer_list_get())
self.assertEqual(trace_buf_size, self._sandesh.trace_buffer_size_get(trace_buf_name))
# Read from empty trace buffer
self._sandesh.trace_buffer_read(name=trace_buf_name, read_context='test',
count=0, read_cb=self.sandesh_trace_read_handler)
exp_trace_list = []
self.assertEqual(exp_trace_list, self._trace_read_list)
self._sandesh.trace_buffer_delete(trace_buf_name)
self.assertFalse(trace_buf_name in self._sandesh.trace_buffer_list_get())
# Read deleted trace buffer
self._sandesh.trace_buffer_read(name=trace_buf_name, read_context='test',
count=0, read_cb=self.sandesh_trace_read_handler)
exp_trace_list = []
self.assertEqual(exp_trace_list, self._trace_read_list)
#end test_create_delete_trace_buffer
def test_enable_disable_trace_buffer(self):
trace_buf_name = 'test_enable_disable_trace_buffer'
trace_buf_size = 5
# Create trace buffer in disabled state
self._sandesh.trace_buffer_create(trace_buf_name, trace_buf_size, False)
tmsg1 = TraceTest(magicNo=1234, sandesh=self._sandesh)
tmsg1.trace_msg(name=trace_buf_name, sandesh=self._sandesh)
self._sandesh.trace_buffer_read(name=trace_buf_name, read_context='read1',
count=0, read_cb=self.sandesh_trace_read_handler)
# Trace buffer should be empty
exp_trace_list = []
self.assertEqual(exp_trace_list, self._trace_read_list)
# Enable trace buffer
self._sandesh.trace_buffer_enable(trace_buf_name)
tmsg1.trace_msg(name=trace_buf_name, sandesh=self._sandesh)
tmsg2 = TraceTest(magicNo=3456, sandesh=self._sandesh)
tmsg2.trace_msg(name=trace_buf_name, sandesh=self._sandesh)
self._sandesh.trace_buffer_read(name=trace_buf_name, read_context='read2',
count=0, read_cb=self.sandesh_trace_read_handler)
exp_trace_list = [tmsg1, tmsg2]
self.assertEqual(exp_trace_list, self._trace_read_list)
self._trace_read_list = []
# Disable trace buffer
self._sandesh.trace_buffer_disable(trace_buf_name)
tmsg3 = TraceTest(magicNo=7890, sandesh=self._sandesh)
tmsg3.trace_msg(name=trace_buf_name, sandesh=self._sandesh)
self._sandesh.trace_buffer_read(name=trace_buf_name, read_context='read3',
count=0, read_cb=self.sandesh_trace_read_handler)
exp_trace_list = [tmsg1, tmsg2]
self.assertEqual(exp_trace_list, self._trace_read_list)
#end test_enable_disable_trace_buffer
def test_enable_disable_trace(self):
trace_buf_name = 'test_enable_disable_trace'
trace_buf_size = 3
# Disable trace
self._sandesh.trace_disable()
self._sandesh.trace_buffer_create(trace_buf_name, trace_buf_size)
tmsg1 = TraceTest(magicNo=1234, sandesh=self._sandesh)
tmsg1.trace_msg(name=trace_buf_name, sandesh=self._sandesh)
self._sandesh.trace_buffer_read(name=trace_buf_name, read_context='read1',
count=0, read_cb=self.sandesh_trace_read_handler)
# Trace buffer should be empty
exp_trace_list = []
self.assertEqual(exp_trace_list, self._trace_read_list)
# Enable trace
self._sandesh.trace_enable()
tmsg1.trace_msg(name=trace_buf_name, sandesh=self._sandesh)
self._sandesh.trace_buffer_read(name=trace_buf_name, read_context='read2',
count=0, read_cb=self.sandesh_trace_read_handler)
exp_trace_list = [tmsg1]
self.assertEqual(exp_trace_list, self._trace_read_list)
#end test_enable_disable_trace
def test_read_count_trace_buffer(self):
trace_buf_name = 'test_read_count_trace_buffer'
trace_buf_size = 10
self._sandesh.trace_buffer_create(trace_buf_name, trace_buf_size)
tmsg1 = TraceTest(magicNo=1, sandesh=self._sandesh)
tmsg1.trace_msg(name=trace_buf_name, sandesh=self._sandesh)
tmsg2 = TraceTest(magicNo=2, sandesh=self._sandesh)
tmsg2.trace_msg(name=trace_buf_name, sandesh=self._sandesh)
tmsg3 = TraceTest(magicNo=3, sandesh=self._sandesh)
tmsg3.trace_msg(name=trace_buf_name, sandesh=self._sandesh)
# Total messages in trace buffer = 3, count = 1
self._sandesh.trace_buffer_read(name=trace_buf_name, read_context='read1',
count=1, read_cb=self.sandesh_trace_read_handler)
exp_trace_list = [tmsg1]
self.assertEqual(exp_trace_list, self._trace_read_list)
self._trace_read_list = []
# count = 0, should read the last two messages
self._sandesh.trace_buffer_read(name=trace_buf_name, read_context='read1',
count=0, read_cb=self.sandesh_trace_read_handler)
exp_trace_list = [tmsg2, tmsg3]
self.assertEqual(exp_trace_list, self._trace_read_list)
self._trace_read_list = []
# Total messages in trace buffer = 3, count = 5 (< trace_buf_size)
self._sandesh.trace_buffer_read(name=trace_buf_name, read_context='read2',
count=5, read_cb=self.sandesh_trace_read_handler)
exp_trace_list = [tmsg1, tmsg2, tmsg3]
self.assertEqual(exp_trace_list, self._trace_read_list)
self._trace_read_list = []
# Total messages in trace buffer = 3, count = 20 (> trace_buf_size)
self._sandesh.trace_buffer_read(name=trace_buf_name, read_context='read3',
count=20, read_cb=self.sandesh_trace_read_handler)
exp_trace_list = [tmsg1, tmsg2, tmsg3]
self.assertEqual(exp_trace_list, self._trace_read_list)
#end test_read_count_trace_buffer
def test_overwrite_trace_buffer(self):
trace_buf_name = 'test_overwrite_trace_buffer'
trace_buf_size = 3
self._sandesh.trace_buffer_create(trace_buf_name, trace_buf_size)
tmsg1 = TraceTest(magicNo=123, sandesh=self._sandesh)
tmsg1.trace_msg(name=trace_buf_name, sandesh=self._sandesh)
tmsg2 = TraceTest(magicNo=345, sandesh=self._sandesh)
tmsg2.trace_msg(name=trace_buf_name, sandesh=self._sandesh)
tmsg3 = TraceTest(magicNo=567, sandesh=self._sandesh)
tmsg3.trace_msg(name=trace_buf_name, sandesh=self._sandesh)
self._sandesh.trace_buffer_read(name=trace_buf_name, read_context='read1',
count=0, read_cb=self.sandesh_trace_read_handler)
exp_trace_list = [tmsg1, tmsg2, tmsg3]
self.assertEqual(exp_trace_list, self._trace_read_list)
self._trace_read_list = []
# Overwrite trace buffer
tmsg4 = TraceTest(magicNo=789, sandesh=self._sandesh)
tmsg4.trace_msg(name=trace_buf_name, sandesh=self._sandesh)
self._sandesh.trace_buffer_read(name=trace_buf_name, read_context='read2',
count=0, read_cb=self.sandesh_trace_read_handler)
exp_trace_list = [tmsg2, tmsg3, tmsg4]
self.assertEqual(exp_trace_list, self._trace_read_list)
#end test_overwrite_trace_buffer
def test_trace_buffer_read_context(self):
trace_buf_name = 'test_trace_buffer_read_context'
trace_buf_size = 3
self._sandesh.trace_buffer_create(trace_buf_name, trace_buf_size)
tmsg1 = TraceTest(magicNo=123, sandesh=self._sandesh)
tmsg1.trace_msg(name=trace_buf_name, sandesh=self._sandesh)
tmsg2 = TraceTest(magicNo=345, sandesh=self._sandesh)
tmsg2.trace_msg(name=trace_buf_name, sandesh=self._sandesh)
self._sandesh.trace_buffer_read(name=trace_buf_name, read_context='read1',
count=0, read_cb=self.sandesh_trace_read_handler)
exp_trace_list = [tmsg1, tmsg2]
self.assertEqual(exp_trace_list, self._trace_read_list)
self._trace_read_list = []
# After reading the entire content of trace buffer,
# dont delete the read context. Subsequent call to trace_buffer_read()
# should not read any trace message
self._sandesh.trace_buffer_read(name=trace_buf_name, read_context='read1',
count=0, read_cb=self.sandesh_trace_read_handler)
exp_trace_list = []
self.assertEqual(exp_trace_list, self._trace_read_list)
# We have not deleted the read context. Add more trace messages
# and make sure we don't read the already read trace messages.
tmsg3 = TraceTest(magicNo=56, sandesh=self._sandesh)
tmsg3.trace_msg(name=trace_buf_name, sandesh=self._sandesh)
tmsg4 = TraceTest(magicNo=67, sandesh=self._sandesh)
tmsg4.trace_msg(name=trace_buf_name, sandesh=self._sandesh)
self._sandesh.trace_buffer_read(name=trace_buf_name, read_context='read1',
count=1, read_cb=self.sandesh_trace_read_handler)
exp_trace_list = [tmsg3]
self.assertEqual(exp_trace_list, self._trace_read_list)
self._trace_read_list = []
# Now read the last message
self._sandesh.trace_buffer_read(name=trace_buf_name, read_context='read1',
count=0, read_cb=self.sandesh_trace_read_handler)
exp_trace_list = [tmsg4]
self.assertEqual(exp_trace_list, self._trace_read_list)
self._trace_read_list = []
# Read the trace buffer with different read_context
self._sandesh.trace_buffer_read(name=trace_buf_name, read_context='read2',
count=2, read_cb=self.sandesh_trace_read_handler)
exp_trace_list = [tmsg2, tmsg3]
self.assertEqual(exp_trace_list, self._trace_read_list)
self._trace_read_list = []
# Delete read context
self._sandesh.trace_buffer_read_done(name=trace_buf_name, context='read2')
self._sandesh.trace_buffer_read(name=trace_buf_name, read_context='read2',
count=1, read_cb=self.sandesh_trace_read_handler)
exp_trace_list = [tmsg2]
self.assertEqual(exp_trace_list, self._trace_read_list)
self._trace_read_list = []
# Interleave reading and writing of trace buffer - invalidate read context
tmsg5 = TraceTest(magicNo=78, sandesh=self._sandesh)
tmsg5.trace_msg(name=trace_buf_name, sandesh=self._sandesh)
tmsg6 = TraceTest(magicNo=89, sandesh=self._sandesh)
tmsg6.trace_msg(name=trace_buf_name, sandesh=self._sandesh)
self._sandesh.trace_buffer_read(name=trace_buf_name, read_context='read2',
count=2, read_cb=self.sandesh_trace_read_handler)
exp_trace_list = [tmsg4, tmsg5]
self.assertEqual(exp_trace_list, self._trace_read_list)
self._trace_read_list = []
tmsg7 = TraceTest(magicNo=98, sandesh=self._sandesh)
tmsg7.trace_msg(name=trace_buf_name, sandesh=self._sandesh)
self._sandesh.trace_buffer_read(name=trace_buf_name, read_context='read1',
count=0, read_cb=self.sandesh_trace_read_handler)
exp_trace_list = [tmsg5, tmsg6, tmsg7]
self.assertEqual(exp_trace_list, self._trace_read_list)
#end test_trace_buffer_read_context
#end SandeshTraceTest
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
egroeper/exscript | src/Exscriptd/config/DatabaseConfig.py | 6 | 2012 | # Copyright (C) 2010 Samuel Abels.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from Exscriptd.Config import Config
from Exscriptd.config.ConfigSection import ConfigSection
class DatabaseConfig(ConfigSection):
def __init__(self, *args, **kwargs):
ConfigSection.__init__(self, *args, **kwargs)
self.db_name = None
self.dbn = None
self.config = Config(self.global_options.config_dir, False)
@staticmethod
def get_description():
return 'add, edit, or remove databases'
@staticmethod
def get_commands():
return (('add', 'configure a new database'),
('edit', 'configure an existing database'))
def prepare_add(self, parser, db_name, dbn):
self.db_name = db_name
self.dbn = dbn
if self.config.has_database(self.db_name):
parser.error('database already exists')
def start_add(self):
self.config.add_database(self.db_name, self.dbn)
print 'Database added.'
def prepare_edit(self, parser, db_name, dbn):
self.db_name = db_name
self.dbn = dbn
if not self.config.has_database(self.db_name):
parser.error('database not found')
def start_edit(self):
if self.config.add_database(self.db_name, self.dbn):
print 'Database configured.'
else:
print 'No changes were made.'
| gpl-2.0 |
houzhenggang/hiwifi-openwrt-HC5661-HC5761 | staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/ctypes/test/test_pickling.py | 88 | 2200 | import unittest
import pickle
from ctypes import *
import _ctypes_test
dll = CDLL(_ctypes_test.__file__)
class X(Structure):
_fields_ = [("a", c_int), ("b", c_double)]
init_called = 0
def __init__(self, *args, **kw):
X.init_called += 1
self.x = 42
class Y(X):
_fields_ = [("str", c_char_p)]
class PickleTest(unittest.TestCase):
def dumps(self, item):
return pickle.dumps(item)
def loads(self, item):
return pickle.loads(item)
def test_simple(self):
for src in [
c_int(42),
c_double(3.14),
]:
dst = self.loads(self.dumps(src))
self.assertEqual(src.__dict__, dst.__dict__)
self.assertEqual(memoryview(src).tobytes(),
memoryview(dst).tobytes())
def test_struct(self):
X.init_called = 0
x = X()
x.a = 42
self.assertEqual(X.init_called, 1)
y = self.loads(self.dumps(x))
# loads must NOT call __init__
self.assertEqual(X.init_called, 1)
# ctypes instances are identical when the instance __dict__
# and the memory buffer are identical
self.assertEqual(y.__dict__, x.__dict__)
self.assertEqual(memoryview(y).tobytes(),
memoryview(x).tobytes())
def test_unpickable(self):
# ctypes objects that are pointers or contain pointers are
# unpickable.
self.assertRaises(ValueError, lambda: self.dumps(Y()))
prototype = CFUNCTYPE(c_int)
for item in [
c_char_p(),
c_wchar_p(),
c_void_p(),
pointer(c_int(42)),
dll._testfunc_p_p,
prototype(lambda: 42),
]:
self.assertRaises(ValueError, lambda: self.dumps(item))
def test_wchar(self):
pickle.dumps(c_char("x"))
# Issue 5049
pickle.dumps(c_wchar(u"x"))
class PickleTest_1(PickleTest):
def dumps(self, item):
return pickle.dumps(item, 1)
class PickleTest_2(PickleTest):
def dumps(self, item):
return pickle.dumps(item, 2)
if __name__ == "__main__":
unittest.main()
| gpl-2.0 |
bigdocker/cloud-init | cloudinit/sources/DataSourceNoCloud.py | 4 | 10215 | # vi: ts=4 expandtab
#
# Copyright (C) 2009-2010 Canonical Ltd.
# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
# Copyright (C) 2012 Yahoo! Inc.
#
# Author: Scott Moser <scott.moser@canonical.com>
# Author: Juerg Hafliger <juerg.haefliger@hp.com>
# Author: Joshua Harlow <harlowja@yahoo-inc.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import errno
import os
from cloudinit import log as logging
from cloudinit import sources
from cloudinit import util
LOG = logging.getLogger(__name__)
class DataSourceNoCloud(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.dsmode = 'local'
self.seed = None
self.cmdline_id = "ds=nocloud"
self.seed_dir = os.path.join(paths.seed_dir, 'nocloud')
self.supported_seed_starts = ("/", "file://")
def __str__(self):
root = sources.DataSource.__str__(self)
return "%s [seed=%s][dsmode=%s]" % (root, self.seed, self.dsmode)
def get_data(self):
defaults = {
"instance-id": "nocloud",
"dsmode": self.dsmode,
}
found = []
mydata = {'meta-data': {}, 'user-data': "", 'vendor-data': ""}
try:
# Parse the kernel command line, getting data passed in
md = {}
if parse_cmdline_data(self.cmdline_id, md):
found.append("cmdline")
mydata['meta-data'].update(md)
except:
util.logexc(LOG, "Unable to parse command line data")
return False
# Check to see if the seed dir has data.
pp2d_kwargs = {'required': ['user-data', 'meta-data'],
'optional': ['vendor-data']}
try:
seeded = util.pathprefix2dict(self.seed_dir, **pp2d_kwargs)
found.append(self.seed_dir)
LOG.debug("Using seeded data from %s", self.seed_dir)
except ValueError as e:
pass
if self.seed_dir in found:
mydata = _merge_new_seed(mydata, seeded)
# If the datasource config had a 'seedfrom' entry, then that takes
# precedence over a 'seedfrom' that was found in a filesystem
# but not over external media
if self.ds_cfg.get('seedfrom'):
found.append("ds_config_seedfrom")
mydata['meta-data']["seedfrom"] = self.ds_cfg['seedfrom']
# fields appropriately named can also just come from the datasource
# config (ie, 'user-data', 'meta-data', 'vendor-data' there)
if 'user-data' in self.ds_cfg and 'meta-data' in self.ds_cfg:
mydata = _merge_new_seed(mydata, self.ds_cfg)
found.append("ds_config")
def _pp2d_callback(mp, data):
return util.pathprefix2dict(mp, **data)
label = self.ds_cfg.get('fs_label', "cidata")
if label is not None:
# Query optical drive to get it in blkid cache for 2.6 kernels
util.find_devs_with(path="/dev/sr0")
util.find_devs_with(path="/dev/sr1")
fslist = util.find_devs_with("TYPE=vfat")
fslist.extend(util.find_devs_with("TYPE=iso9660"))
label_list = util.find_devs_with("LABEL=%s" % label)
devlist = list(set(fslist) & set(label_list))
devlist.sort(reverse=True)
for dev in devlist:
try:
LOG.debug("Attempting to use data from %s", dev)
try:
seeded = util.mount_cb(dev, _pp2d_callback,
pp2d_kwargs)
except ValueError as e:
if dev in label_list:
LOG.warn("device %s with label=%s not a"
"valid seed.", dev, label)
continue
mydata = _merge_new_seed(mydata, seeded)
# For seed from a device, the default mode is 'net'.
# that is more likely to be what is desired. If they want
# dsmode of local, then they must specify that.
if 'dsmode' not in mydata['meta-data']:
mydata['dsmode'] = "net"
LOG.debug("Using data from %s", dev)
found.append(dev)
break
except OSError as e:
if e.errno != errno.ENOENT:
raise
except util.MountFailedError:
util.logexc(LOG, "Failed to mount %s when looking for "
"data", dev)
# There was no indication on kernel cmdline or data
# in the seeddir suggesting this handler should be used.
if len(found) == 0:
return False
seeded_interfaces = None
# The special argument "seedfrom" indicates we should
# attempt to seed the userdata / metadata from its value
# its primarily value is in allowing the user to type less
# on the command line, ie: ds=nocloud;s=http://bit.ly/abcdefg
if "seedfrom" in mydata['meta-data']:
seedfrom = mydata['meta-data']["seedfrom"]
seedfound = False
for proto in self.supported_seed_starts:
if seedfrom.startswith(proto):
seedfound = proto
break
if not seedfound:
LOG.debug("Seed from %s not supported by %s", seedfrom, self)
return False
if 'network-interfaces' in mydata['meta-data']:
seeded_interfaces = self.dsmode
# This could throw errors, but the user told us to do it
# so if errors are raised, let them raise
(md_seed, ud) = util.read_seeded(seedfrom, timeout=None)
LOG.debug("Using seeded cache data from %s", seedfrom)
# Values in the command line override those from the seed
mydata['meta-data'] = util.mergemanydict([mydata['meta-data'],
md_seed])
mydata['user-data'] = ud
found.append(seedfrom)
# Now that we have exhausted any other places merge in the defaults
mydata['meta-data'] = util.mergemanydict([mydata['meta-data'],
defaults])
# Update the network-interfaces if metadata had 'network-interfaces'
# entry and this is the local datasource, or 'seedfrom' was used
# and the source of the seed was self.dsmode
# ('local' for NoCloud, 'net' for NoCloudNet')
if ('network-interfaces' in mydata['meta-data'] and
(self.dsmode in ("local", seeded_interfaces))):
LOG.debug("Updating network interfaces from %s", self)
self.distro.apply_network(
mydata['meta-data']['network-interfaces'])
if mydata['meta-data']['dsmode'] == self.dsmode:
self.seed = ",".join(found)
self.metadata = mydata['meta-data']
self.userdata_raw = mydata['user-data']
self.vendordata = mydata['vendor-data']
return True
LOG.debug("%s: not claiming datasource, dsmode=%s", self, md['dsmode'])
return False
# Returns true or false indicating if cmdline indicated
# that this module should be used
# Example cmdline:
# root=LABEL=uec-rootfs ro ds=nocloud
def parse_cmdline_data(ds_id, fill, cmdline=None):
if cmdline is None:
cmdline = util.get_cmdline()
cmdline = " %s " % cmdline
if not (" %s " % ds_id in cmdline or " %s;" % ds_id in cmdline):
return False
argline = ""
# cmdline can contain:
# ds=nocloud[;key=val;key=val]
for tok in cmdline.split():
if tok.startswith(ds_id):
argline = tok.split("=", 1)
# argline array is now 'nocloud' followed optionally by
# a ';' and then key=value pairs also terminated with ';'
tmp = argline[1].split(";")
if len(tmp) > 1:
kvpairs = tmp[1:]
else:
kvpairs = ()
# short2long mapping to save cmdline typing
s2l = {"h": "local-hostname", "i": "instance-id", "s": "seedfrom"}
for item in kvpairs:
if item == "":
continue
try:
(k, v) = item.split("=", 1)
except:
k = item
v = None
if k in s2l:
k = s2l[k]
fill[k] = v
return True
def _merge_new_seed(cur, seeded):
ret = cur.copy()
ret['meta-data'] = util.mergemanydict([cur['meta-data'],
util.load_yaml(seeded['meta-data'])])
ret['user-data'] = seeded['user-data']
if 'vendor-data' in seeded:
ret['vendor-data'] = seeded['vendor-data']
return ret
class DataSourceNoCloudNet(DataSourceNoCloud):
def __init__(self, sys_cfg, distro, paths):
DataSourceNoCloud.__init__(self, sys_cfg, distro, paths)
self.cmdline_id = "ds=nocloud-net"
self.supported_seed_starts = ("http://", "https://", "ftp://")
self.seed_dir = os.path.join(paths.seed_dir, 'nocloud-net')
self.dsmode = "net"
# Used to match classes to dependencies
datasources = [
(DataSourceNoCloud, (sources.DEP_FILESYSTEM, )),
(DataSourceNoCloudNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
]
# Return a list of data sources that match this set of dependencies
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
| gpl-3.0 |
calancha/DIRAC | Core/Utilities/CountryMapping.py | 11 | 1071 | # $HeadURL$
""" The CountryMapping module performs the necessary CS gymnastics to resolve country codes """
__RCSID__ = "$Id$"
from DIRAC import gConfig, S_OK, S_ERROR
def getCountryMapping( country ):
""" Determines the associated country from the country code"""
mappedCountries = [country]
while True:
mappedCountry = gConfig.getValue( '/Resources/Countries/%s/AssignedTo' % country, country )
if mappedCountry == country:
break
elif mappedCountry in mappedCountries:
return S_ERROR( 'Circular mapping detected for %s' % country )
else:
country = mappedCountry
mappedCountries.append( mappedCountry )
return S_OK( mappedCountry )
def getCountryMappingTier1( country ):
""" Returns the Tier1 site mapped to a country code """
res = getCountryMapping( country )
if not res['OK']:
return res
mappedCountry = res['Value']
tier1 = gConfig.getValue( '/Resources/Countries/%s/Tier1' % mappedCountry, '' )
if not tier1:
return S_ERROR( "No Tier1 assigned to %s" % mappedCountry )
return S_OK( tier1 )
| gpl-3.0 |
Juniper/contrail-dev-generateDS | ServiceGenerator.py | 4 | 131390 | import os
import time
import logging
import textwrap
def CamelCase(input):
words = input.replace('_', '-').split('-')
name = ''
for w in words:
name += w.capitalize()
return name
def escape_string(instring):
s1 = instring
s1 = s1.replace('\\', '\\\\')
s1 = s1.replace("'", "\\'")
return s1
class ServiceApiGenerator(object):
def __init__(self, parser_generator, root):
self._PGenr = parser_generator
self._genStandAlone = True
self._xsd_root = root
def setLanguage(self, lang):
if (lang == 'py'):
self._LangGenr = PyGenerator(self._PGenr)
elif (lang == 'c++'):
self._LangGenr = CppGenerator(self._PGenr)
def Generate(self, gen_filepath_pfx):
self.generate(self._xsd_root, None, gen_filepath_pfx + ".py", genStandAlone = False)
def generate(self, root, infile, outfileName, genStandAlone = True):
self._genStandAlone = genStandAlone
# Create an output file.
# Note that even if the user does not request an output file,
# we still need to go through the process of generating classes
# because it produces data structures needed during generation of
# subclasses.
outfile = None
if outfileName:
outfile = self._PGenr.makeFile(outfileName)
if not outfile:
outfile = os.tmpfile()
wrt = outfile.write
processed = []
if genStandAlone:
self._LangGenr.generateHeader(wrt, self._PGenr.prefix)
#generateSimpleTypes(outfile, prefix, SimpleTypeDict)
self._PGenr.DelayedElements = []
self._PGenr.DelayedElements_subclass = []
elements = root.getChildren()
outfile.write('"""\n')
outfile.write("This module defines the classes for types defined in :doc:`services.xsd`\n")
outfile.write('"""\n')
outfile.write("import json\n")
#outfile.write("from generatedssuper import *\n")
self._generateFromTree(wrt, self._PGenr.prefix, elements, processed)
while 1:
if len(self._PGenr.DelayedElements) <= 0:
break
element = self._PGenr.DelayedElements.pop()
name = element.getCleanName()
if name not in processed:
processed.append(name)
self._generateClasses(wrt, prefix, element, 1)
#
# Generate the elements that were postponed because we had not
# yet generated their base class.
while 1:
if len(self._PGenr.PostponedExtensions) <= 0:
break
element = self._PGenr.PostponedExtensions.pop()
parentName, parent = self._PGenr.getParentName(element)
if parentName:
if (parentName in self._PGenr.AlreadyGenerated or
parentName in self._PGenr.SimpleTypeDict.keys()):
self._generateClasses(wrt, prefix, element, 1)
else:
self._PGenr.PostponedExtensions.insert(0, element)
#
# Disable the generation of SAX handler/parser.
# It failed when we stopped putting simple types into ElementDict.
# When there are duplicate names, the SAX parser probably does
# not work anyway.
#NN self._generateMain(outfile, self._PGenr.prefix, root)
if genStandAlone:
self._LangGenr.generateMain(outfile, self._PGenr.prefix, root)
outfile.close()
if self._PGenr.subclassFilename:
self._generateSubclasses(root, self._PGenr.subclassFilename, behaviorFilename,
prefix, superModule)
# Generate __all__. When using the parser as a module it is useful
# to isolate important classes from internal ones. This way one
# can do a reasonably safe "from parser import *"
if outfileName:
exportableClassList = ['"%s"' % self._PGenr.mapName(self._PGenr.cleanupName(CamelCase(name)))
for name in self._PGenr.AlreadyGenerated]
exportableClassList.sort()
exportableClassNames = ',\n '.join(exportableClassList)
exportLine = "\n__all__ = [\n %s\n ]\n" % exportableClassNames
outfile = open(outfileName, "a")
outfile.write(exportLine)
outfile.close()
def _generateMain(self, outfile, prefix, root):
name = self._PGenr.RootElement or root.getChildren()[0].getName()
elType = self._PGenr.cleanupName(root.getChildren()[0].getType())
if self._PGenr.RootElement:
rootElement = self._PGenr.RootElement
else:
rootElement = elType
params = {
'prefix': prefix,
'cap_name': self._PGenr.cleanupName(self._PGenr.make_gs_name(name)),
'name': name,
'cleanname': self._PGenr.cleanupName(name),
'module_name': os.path.splitext(os.path.basename(outfile.name))[0],
'root': rootElement,
'namespacedef': self._PGenr.Namespacedef,
}
s1 = self._PGenr.TEMPLATE_MAIN % params
outfile.write(s1)
def _generateFromTree(self, wrt, prefix, elements, processed):
for element in elements:
name = element.getCleanName()
if 1: # if name not in processed:
processed.append(name)
self._generateClasses(wrt, prefix, element, 0)
children = element.getChildren()
if children:
self._generateFromTree(wrt, prefix, element.getChildren(), processed)
def _generateClasses(self, wrt, prefix, element, delayed):
logging.debug("Generating class for: %s" % element)
parentName, base = self._PGenr.getParentName(element)
logging.debug("Element base: %s" % base)
if not element.isExplicitDefine():
logging.debug("Not an explicit define, returning.")
return
# If this element is an extension (has a base) and the base has
# not been generated, then postpone it.
if parentName:
if (parentName not in self._PGenr.AlreadyGenerated and
parentName not in self._PGenr.SimpleTypeDict.keys()):
self._PGenr.PostponedExtensions.append(element)
return
if element.getName() in self._PGenr.AlreadyGenerated:
return
self._PGenr.AlreadyGenerated.append(element.getName())
if element.getMixedExtensionError():
err_msg('*** Element %s extension chain contains mixed and non-mixed content. Not generated.\n' % (
element.getName(), ))
return
self._PGenr.ElementsForSubclasses.append(element)
name = element.getCleanName()
self._LangGenr.generateClassDefLine(wrt, parentName, prefix, name)
# If this element has documentation, generate a doc-string.
if element.documentation:
self._LangGenr.generateElemDoc(wrt, element)
if self._PGenr.UserMethodsModule or self._PGenr.MemberSpecs:
self._LangGenr.generateMemberSpec(wrt, element)
#LG wrt(' subclass = None\n')
parentName, parent = self._PGenr.getParentName(element)
superclass_name = 'None'
if parentName and parentName in self._PGenr.AlreadyGenerated:
superclass_name = self._PGenr.mapName(self._PGenr.cleanupName(parentName))
self._LangGenr.generateSubSuperInit(wrt, superclass_name)
self._LangGenr.generateClassVars(wrt, element.getName())
s4 = self._LangGenr.generateCtor(wrt, element)
self._LangGenr.generateFactory(wrt, prefix, name)
self._generateGettersAndSetters(wrt, element)
if self._PGenr.Targetnamespace in self._PGenr.NamespacesDict:
namespace = self._PGenr.NamespacesDict[self._PGenr.Targetnamespace]
else:
namespace = ''
#self._generateExportFn(wrt, prefix, element, namespace)
#self._generateExportLiteralFn(wrt, prefix, element)
self._generateExportDictFn(wrt, prefix, element)
self._generateExportDict2Fn(wrt, prefix, element)
#self._generateBuildFn(wrt, prefix, element, delayed)
#self._generateUserMethods(wrt, element)
self._LangGenr.generateEnd(wrt, name, s4)
# end _generateClasses
def _generateGettersAndSetters(self, wrt, element):
generatedSimpleTypes = []
childCount = self._PGenr.countChildren(element, 0)
for child in element.getChildren():
if child.getType() == self._PGenr.AnyTypeIdentifier:
self._LangGenr.generateGetterAnyType(wrt)
self._LangGenr.generateSetterAnyType(wrt)
if child.getMaxOccurs() > 1:
self._LangGenr.generateAdderAnyType(wrt)
self._LangGenr.generateInserterAnyType(wrt)
else:
name = self._PGenr.cleanupName(child.getCleanName())
unmappedName = self._PGenr.cleanupName(child.getName())
capName = self._PGenr.make_gs_name(unmappedName)
getMaxOccurs = child.getMaxOccurs()
childType = child.getType()
self._LangGenr.generateGetter(wrt, capName, name, childType)
self._LangGenr.generateSetter(wrt, capName, name, childType)
if child.getMaxOccurs() > 1:
self._LangGenr.generateAdder(wrt, capName, name)
self._LangGenr.generateInserter(wrt, capName, name)
if self._PGenr.GenerateProperties:
self._LangGenr.generateProperty(wrt, unmappedName, capName, name)
#
# If this child is defined in a simpleType, then generate
# a validator method.
typeName = None
name = self._PGenr.cleanupName(child.getName())
mappedName = self._PGenr.mapName(name)
childType = child.getType()
childType1 = child.getSimpleType()
if not child.isComplex() and childType1 and childType1 in self._PGenr.SimpleTypeDict:
childType = self._PGenr.SimpleTypeDict[childType1].getBase()
elif mappedName in self._PGenr.ElementDict:
childType = self._PGenr.ElementDict[mappedName].getType()
typeName = child.getSimpleType()
if (typeName and
typeName in self._PGenr.SimpleTypeDict and
typeName not in generatedSimpleTypes):
generatedSimpleTypes.append(typeName)
self._LangGenr.generateValidator(wrt, typeName)
attrDefs = element.getAttributeDefs()
for key in attrDefs:
attrDef = attrDefs[key]
name = self._PGenr.cleanupName(attrDef.getName().replace(':', '_'))
mappedName = self._PGenr.mapName(name)
gsName = self._PGenr.make_gs_name(name)
self._LangGenr.generateGetter(wrt, gsName, mappedName)
self._LangGenr.generateSetter(wrt, gsName, mappedName)
if self._PGenr.GenerateProperties:
self._LangGenr.generateProperty(wrt, name, gsName, gsName)
typeName = attrDef.getType()
if (typeName and
typeName in self._PGenr.SimpleTypeDict and
typeName not in generatedSimpleTypes):
generatedSimpleTypes.append(typeName)
self._LangGenr.generateValidator(wrt, typeName)
#LG TODO put in lang specific parts for these if needed
if element.getSimpleContent() or element.isMixed():
wrt(' def get%s_(self): return self.valueOf_\n' % (
self._PGenr.make_gs_name('valueOf'), ))
wrt(' def set%s_(self, valueOf_): self.valueOf_ = valueOf_\n' % (
self._PGenr.make_gs_name('valueOf'), ))
if element.getAnyAttribute():
wrt(' def get%s_(self): return self.anyAttributes_\n' % (
self._PGenr.make_gs_name('anyAttributes'), ))
wrt(' def set%s_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_\n' % (
self._PGenr.make_gs_name('anyAttributes'), ))
if element.getExtended():
wrt(' def get%s_(self): return self.extensiontype_\n' % (
self._PGenr.make_gs_name('extensiontype'), ))
wrt(' def set%s_(self, extensiontype_): self.extensiontype_ = extensiontype_\n' % (
self._PGenr.make_gs_name('extensiontype'), ))
def _generateSubclasses(root, subclassFilename, behaviorFilename,
prefix, superModule='xxx'):
name = root.getChildren()[0].getName()
subclassFile = makeFile(subclassFilename)
wrt = subclassFile.write
if subclassFile:
# Read in the XMLBehavior file.
xmlbehavior = None
behaviors = None
baseUrl = None
if behaviorFilename:
try:
# Add the currect working directory to the path so that
# we use the user/developers local copy.
sys.path.insert(0, '.')
import xmlbehavior_sub as xmlbehavior
except ImportError:
err_msg('*** You have requested generation of extended methods.\n')
err_msg('*** But, no xmlbehavior module is available.\n')
err_msg('*** Generation of extended behavior methods is omitted.\n')
if xmlbehavior:
behaviors = xmlbehavior.parse(behaviorFilename)
behaviors.make_class_dictionary(self._PGenr.cleanupName)
baseUrl = behaviors.getBase_impl_url()
wrt = subclassFile.write
tstamp = (not NoDates and time.ctime()) or ''
if NoVersion:
version = ''
else:
version = ' version %s' % VERSION
wrt(TEMPLATE_SUBCLASS_HEADER % (tstamp, version,
superModule, ExternalEncoding, ))
for element in self._PGenr.ElementsForSubclasses:
generateSubclass(wrt, element, prefix, xmlbehavior, behaviors, baseUrl)
name = root.getChildren()[0].getName()
elType = self._PGenr.cleanupName(root.getChildren()[0].getType())
if self._PGenr.RootElement:
rootElement = self._PGenr.RootElement
else:
rootElement = elType
params = {
'cap_name': self._PGenr.make_gs_name(self._PGenr.cleanupName(name)),
'name': name,
'cleanname': self._PGenr.cleanupName(name),
'module_name': os.path.splitext(os.path.basename(subclassFilename))[0],
'root': rootElement,
'namespacedef': Namespacedef,
'super': superModule,
}
wrt(TEMPLATE_SUBCLASS_FOOTER % params)
subclassFile.close()
def _generateExportFn(self, wrt, prefix, element, namespace):
self._LangGenr.generateExport(wrt, namespace, element)
self._LangGenr.generateExportAttributesFn(wrt, namespace, element)
self._LangGenr.generateExportChildrenFn(wrt, namespace, element)
def _generateExportLiteralFn(self, wrt, prefix, element):
base = element.getBase()
wrt(" def exportLiteral(self, outfile, level, name_='%s'):\n" % element.getName())
wrt(" level += 1\n")
wrt(" self.exportLiteralAttributes(outfile, level, [], name_)\n")
wrt(" if self.hasContent_():\n")
wrt(" self.exportLiteralChildren(outfile, level, name_)\n")
childCount = self._PGenr.countChildren(element, 0)
if element.getSimpleContent() or element.isMixed():
wrt(" showIndent(outfile, level)\n")
wrt(" outfile.write('valueOf_ = \"\"\"%s\"\"\",\\n' % (self.valueOf_,))\n")
wrt(" def exportLiteralAttributes(self, outfile, level, already_processed, name_):\n")
count = 0
attrDefs = element.getAttributeDefs()
for key in attrDefs:
attrDef = attrDefs[key]
count += 1
name = attrDef.getName()
cleanName = self._PGenr.cleanupName(name)
capName = self._PGenr.make_gs_name(cleanName)
mappedName = mapName(cleanName)
data_type = attrDef.getData_type()
attrType = attrDef.getType()
if attrType in self._PGenr.SimpleTypeDict:
attrType = self._PGenr.SimpleTypeDict[attrType].getBase()
if attrType in self._PGenr.SimpleTypeDict:
attrType = self._PGenr.SimpleTypeDict[attrType].getBase()
wrt(" if self.%s is not None and '%s' not in already_processed:\n" % (
mappedName, mappedName, ))
wrt(" already_processed.append('%s')\n" % (
mappedName, ))
wrt(" showIndent(outfile, level)\n")
if attrType in self._PGenr.StringType or \
attrType in self._PGenr.IDTypes or \
attrType == self._PGenr.TokenType or \
attrType == self._PGenr.DateTimeType or \
attrType == self._PGenr.TimeType or \
attrType == self._PGenr.DateType or \
attrType == self._PGenr.NCNameType:
wrt(" outfile.write('%s = \"%%s\",\\n' %% (self.%s,))\n" % \
(mappedName, mappedName,))
elif attrType in self._PGenr.IntegerType or \
attrType == self._PGenr.PositiveIntegerType or \
attrType == self._PGenr.NonPositiveIntegerType or \
attrType == self._PGenr.NegativeIntegerType or \
attrType == self._PGenr.NonNegativeIntegerType:
wrt(" outfile.write('%s = %%d,\\n' %% (self.%s,))\n" % \
(mappedName, mappedName,))
elif attrType == self._PGenr.BooleanType:
wrt(" outfile.write('%s = %%s,\\n' %% (self.%s,))\n" % \
(mappedName, mappedName,))
elif attrType == self._PGenr.FloatType or \
attrType == self._PGenr.DecimalType:
wrt(" outfile.write('%s = %%f,\\n' %% (self.%s,))\n" % \
(mappedName, mappedName,))
elif attrType == self._PGenr.DoubleType:
wrt(" outfile.write('%s = %%e,\\n' %% (self.%s,))\n" % \
(mappedName, mappedName,))
else:
wrt(" outfile.write('%s = %%s,\\n' %% (self.%s,))\n" % \
(mappedName, mappedName,))
if element.getAnyAttribute():
count += 1
wrt(' for name, value in self.anyAttributes_.items():\n')
wrt(' showIndent(outfile, level)\n')
wrt(" outfile.write('%s = \"%s\",\\n' % (name, value,))\n")
parentName, parent = self._PGenr.getParentName(element)
if parentName:
count += 1
elName = element.getCleanName()
wrt(" super(%s, self).exportLiteralAttributes(outfile, level, already_processed, name_)\n" % \
(elName, ))
if count == 0:
wrt(" pass\n")
wrt(" def exportLiteralChildren(self, outfile, level, name_):\n")
parentName, parent = self._PGenr.getParentName(element)
if parentName:
elName = element.getCleanName()
wrt(" super(%s, self).exportLiteralChildren(outfile, level, name_)\n" % \
(elName, ))
for child in element.getChildren():
name = child.getName()
name = self._PGenr.cleanupName(name)
mappedName = self._PGenr.mapName(name)
if element.isMixed():
wrt(" showIndent(outfile, level)\n")
wrt(" outfile.write('content_ = [\\n')\n")
wrt(' for item_ in self.content_:\n')
wrt(' item_.exportLiteral(outfile, level, name_)\n')
wrt(" showIndent(outfile, level)\n")
wrt(" outfile.write('],\\n')\n")
else:
# fix_abstract
type_element = None
abstract_child = False
type_name = child.getAttrs().get('type')
if type_name:
type_element = self._PGenr.ElementDict.get(type_name)
if type_element and type_element.isAbstract():
abstract_child = True
if abstract_child:
pass
else:
type_name = name
if child.getMaxOccurs() > 1:
if child.getType() == self._PGenr.AnyTypeIdentifier:
wrt(" showIndent(outfile, level)\n")
wrt(" outfile.write('anytypeobjs_=[\\n')\n")
wrt(" level += 1\n")
wrt(" for anytypeobjs_ in self.anytypeobjs_:\n")
wrt(" anytypeobjs_.exportLiteral(outfile, level)\n")
wrt(" level -= 1\n")
wrt(" showIndent(outfile, level)\n")
wrt(" outfile.write('],\\n')\n")
else:
wrt(" showIndent(outfile, level)\n")
wrt(" outfile.write('%s=[\\n')\n" % (mappedName, ))
wrt(" level += 1\n")
wrt(" for %s_ in self.%s:\n" % (name, mappedName))
self._generateExportLiteralFn_2(wrt, child, name, ' ')
wrt(" level -= 1\n")
wrt(" showIndent(outfile, level)\n")
wrt(" outfile.write('],\\n')\n")
else:
self._generateExportLiteralFn_1(wrt, child, type_name, '')
if childCount == 0 or element.isMixed():
wrt(" pass\n")
def _generateExportLiteralFn_1(self, wrt, child, name, fill):
cleanName = self._PGenr.cleanupName(name)
mappedName = self._PGenr.mapName(cleanName)
childType = child.getType()
if childType == self._PGenr.AnyTypeIdentifier:
wrt('%s if self.anytypeobjs_ is not None:\n' % (fill, ))
wrt('%s showIndent(outfile, level)\n' % fill)
wrt("%s outfile.write('anytypeobjs_=model_.anytypeobjs_(\\n')\n" % \
(fill, ))
wrt("%s self.anytypeobjs_.exportLiteral(outfile, level)\n" % (
fill, ))
wrt('%s showIndent(outfile, level)\n' % fill)
wrt("%s outfile.write('),\\n')\n" % (fill, ))
else:
wrt('%s if self.%s is not None:\n' % (fill, mappedName, ))
if childType in self._PGenr.StringType or \
childType in self._PGenr.IDTypes or \
childType == self._PGenr.TokenType or \
childType == self._PGenr.DateTimeType or \
childType == self._PGenr.TimeType or \
childType == self._PGenr.DateType:
# wrt('%s if self.%s is not None:\n' % (fill, mappedName, ))
wrt('%s showIndent(outfile, level)\n' % fill)
if (child.getSimpleType() in self._PGenr.SimpleTypeDict and
self._PGenr.SimpleTypeDict[child.getSimpleType()].isListType()):
wrt("%s if self.%s:\n" % (fill, mappedName, ))
wrt("%s outfile.write('%s=%%s,\\n' %% quote_python(' '.join(self.%s)).encode(ExternalEncoding)) \n" % \
(fill, mappedName, mappedName, ))
wrt("%s else:\n" % (fill, ))
wrt("%s outfile.write('%s=None,\\n')\n" % \
(fill, mappedName, ))
else:
wrt("%s outfile.write('%s=%%s,\\n' %% quote_python(self.%s).encode(ExternalEncoding))\n" % \
(fill, mappedName, mappedName, ))
elif childType in self._PGenr.IntegerType or \
childType == self._PGenr.PositiveIntegerType or \
childType == self._PGenr.NonPositiveIntegerType or \
childType == self._PGenr.NegativeIntegerType or \
childType == self._PGenr.NonNegativeIntegerType:
# wrt('%s if self.%s is not None:\n' % (fill, mappedName, ))
wrt('%s showIndent(outfile, level)\n' % fill)
wrt("%s outfile.write('%s=%%d,\\n' %% self.%s)\n" % \
(fill, mappedName, mappedName, ))
elif childType == self._PGenr.BooleanType:
# wrt('%s if self.%s is not None:\n' % (fill, mappedName, ))
wrt('%s showIndent(outfile, level)\n' % fill)
wrt("%s outfile.write('%s=%%s,\\n' %% self.%s)\n" % \
(fill, mappedName, mappedName, ))
elif childType == self._PGenr.FloatType or \
childType == self._PGenr.DecimalType:
# wrt('%s if self.%s is not None:\n' % (fill, mappedName, ))
wrt('%s showIndent(outfile, level)\n' % fill)
wrt("%s outfile.write('%s=%%f,\\n' %% self.%s)\n" % \
(fill, mappedName, mappedName, ))
elif childType == self._PGenr.DoubleType:
# wrt('%s if self.%s is not None:\n' % (fill, mappedName, ))
wrt('%s showIndent(outfile, level)\n' % fill)
wrt("%s outfile.write('%s=%%e,\\n' %% self.%s)\n" % \
(fill, name, mappedName, ))
else:
# wrt('%s if self.%s is not None:\n' % (fill, mappedName, ))
wrt('%s showIndent(outfile, level)\n' % fill)
wrt("%s outfile.write('%s=model_.%s(\\n')\n" % \
(fill, mappedName, self._PGenr.mapName(self._PGenr.cleanupName(child.getType()))))
if name == child.getType():
s1 = "%s self.%s.exportLiteral(outfile, level)\n" % \
(fill, mappedName)
else:
s1 = "%s self.%s.exportLiteral(outfile, level, name_='%s')\n" % \
(fill, mappedName, name)
wrt(s1)
wrt('%s showIndent(outfile, level)\n' % fill)
wrt("%s outfile.write('),\\n')\n" % (fill, ))
def _generateExportLiteralFn_2(self, wrt, child, name, fill):
cleanName = self._PGenr.cleanupName(name)
mappedName = self._PGenr.mapName(cleanName)
childType = child.getType()
wrt('%s showIndent(outfile, level)\n' % fill)
if childType in self._PGenr.StringType or \
childType == self._PGenr.TokenType or \
childType == self._PGenr.DateTimeType or \
childType == self._PGenr.TimeType or \
childType == self._PGenr.DateType:
wrt("%s outfile.write('%%s,\\n' %% quote_python(%s_).encode(ExternalEncoding))\n" % \
(fill, name))
elif childType in self._PGenr.IntegerType or \
childType == self._PGenr.PositiveIntegerType or \
childType == self._PGenr.NonPositiveIntegerType or \
childType == self._PGenr.NegativeIntegerType or \
childType == self._PGenr.NonNegativeIntegerType:
wrt("%s outfile.write('%%d,\\n' %% %s)\n" % (fill, name))
elif childType == self._PGenr.BooleanType:
wrt("%s outfile.write('%%s,\\n' %% %s)\n" % (fill, name))
elif childType == self._PGenr.FloatType or \
childType == self._PGenr.DecimalType:
wrt("%s outfile.write('%%f,\\n' %% %s_)\n" % (fill, name))
elif childType == self._PGenr.DoubleType:
wrt("%s outfile.write('%%e,\\n' %% %s)\n" % (fill, name))
else:
name1 = self._PGenr.mapName(self._PGenr.cleanupName(child.getType()))
wrt("%s outfile.write('model_.%s(\\n')\n" % (fill, name1, ))
if name == child.getType():
s1 = "%s %s_.exportLiteral(outfile, level)\n" % (
fill, self._PGenr.cleanupName(child.getType()), )
else:
s1 = "%s %s_.exportLiteral(outfile, level, name_='%s')\n" % \
(fill, name, child.getType(), )
wrt(s1)
wrt('%s showIndent(outfile, level)\n' % fill)
wrt("%s outfile.write('),\\n')\n" % (fill, ))
def _generateExportDictFn(self, wrt, prefix, element):
self._LangGenr.generateExportDict(wrt, element)
def _generateExportDict2Fn(self, wrt, prefix, element):
self._LangGenr.generateExportDict2(wrt, element)
def generateSubclass(self, wrt, element, prefix, xmlbehavior, behaviors, baseUrl):
self._LangGenr.generateSubclass()
if not element.isComplex():
return
if element.getName() in AlreadyGenerated_subclass:
return
AlreadyGenerated_subclass.append(element.getName())
name = element.getCleanName()
wrt('class %s%s%s(supermod.%s):\n' % (prefix, name, SubclassSuffix, name))
childCount = self._PGenr.countChildren(element, 0)
s1 = buildCtorArgs_multilevel(element, childCount)
wrt(' def __init__(self%s):\n' % s1)
args = buildCtorParams(element, element, childCount)
s1 = ''.join(args)
if len(args) > 254:
wrt(' arglist_ = (%s)\n' % (s1, ))
wrt(' super(%s%s%s, self).__init__(*arglist_)\n' % (prefix, name, SubclassSuffix, ))
else:
#wrt(' supermod.%s%s.__init__(%s)\n' % (prefix, name, s1))
wrt(' super(%s%s%s, self).__init__(%s)\n' % (prefix, name, SubclassSuffix, s1, ))
if xmlbehavior and behaviors:
wrt('\n')
wrt(' #\n')
wrt(' # XMLBehaviors\n')
wrt(' #\n')
# Get a list of behaviors for this class/subclass.
classDictionary = behaviors.get_class_dictionary()
if name in classDictionary:
classBehaviors = classDictionary[name]
else:
classBehaviors = None
if classBehaviors:
generateClassBehaviors(wrt, classBehaviors, baseUrl)
wrt('supermod.%s.subclass = %s%s\n' % (name, name, SubclassSuffix))
wrt('# end class %s%s%s\n' % (prefix, name, SubclassSuffix))
wrt('\n\n')
def _generateBuildFn(self, wrt, prefix, element, delayed):
self._LangGenr.generateBuild(wrt, element)
self._LangGenr.generateBuildAttributesFn(wrt, element)
self._LangGenr.generateBuildChildren(wrt, element, prefix, delayed)
def _generateUserMethods(self, wrt, element):
if not self._PGenr.UserMethodsModule:
return
specs = self._PGenr.UserMethodsModule.METHOD_SPECS
name = self._PGenr.cleanupName(element.getCleanName())
values_dict = {'class_name': name, }
for spec in specs:
if spec.match_name(name):
source = spec.get_interpolated_source(values_dict)
wrt(source)
#
# Generators for Language specific parts
#
class PyGenerator(object):
def __init__(self, parser_generator):
self._PGenr = parser_generator
def generateHeader(self, wrt, prefix):
tstamp = (not self._PGenr.NoDates and time.ctime()) or ''
if self._PGenr.NoVersion:
version = ''
else:
version = ' version %s' % self._PGenr.Version
s1 = self._PGenr.TEMPLATE_HEADER % (tstamp, version, self._PGenr.ExternalEncoding, )
wrt(s1)
def generateClassDefLine(self, wrt, parentName, prefix, name):
if parentName:
s1 = 'class %s%s(%s):\n' % (prefix, CamelCase(name), parentName,)
else:
#s1 = 'class %s%s(GeneratedsSuper):\n' % (prefix, CamelCase(name))
s1 = 'class %s%s(object):\n' % (prefix, CamelCase(name))
wrt(s1)
wrt(' """\n')
wrt(' %s%s class definition from :doc:`services.xsd`\n' %(prefix, name))
wrt(' """\n')
def generateClassVars(self, wrt, name):
wrt(" service_type = '%s'\n" % (CamelCase(name)))
wrt(" token = None\n")
def generateSubclass(self):
if not element.isComplex():
return
if element.getName() in AlreadyGenerated_subclass:
return
AlreadyGenerated_subclass.append(element.getName())
name = element.getCleanName()
wrt('class %s%s%s(supermod.%s):\n' % (prefix, name, SubclassSuffix, name))
childCount = self._PGenr.countChildren(element, 0)
s1 = buildCtorArgs_multilevel(element, childCount)
wrt(' def __init__(self%s):\n' % s1)
args = buildCtorParams(element, element, childCount)
s1 = ''.join(args)
if len(args) > 254:
wrt(' arglist_ = (%s)\n' % (s1, ))
wrt(' super(%s%s%s, self).__init__(*arglist_)\n' % (prefix, name, SubclassSuffix, ))
else:
#wrt(' supermod.%s%s.__init__(%s)\n' % (prefix, name, s1))
wrt(' super(%s%s%s, self).__init__(%s)\n' % (prefix, name, SubclassSuffix, s1, ))
if xmlbehavior and behaviors:
wrt('\n')
wrt(' #\n')
wrt(' # XMLBehaviors\n')
wrt(' #\n')
# Get a list of behaviors for this class/subclass.
classDictionary = behaviors.get_class_dictionary()
if name in classDictionary:
classBehaviors = classDictionary[name]
else:
classBehaviors = None
if classBehaviors:
generateClassBehaviors(wrt, classBehaviors, baseUrl)
wrt('supermod.%s.subclass = %s%s\n' % (name, name, SubclassSuffix))
wrt('# end class %s%s%s\n' % (prefix, name, SubclassSuffix))
wrt('\n\n')
def generateFactory(self, wrt, prefix, name):
wrt(' def factory(*args_, **kwargs_):\n')
wrt(' if %s%s.subclass:\n' % (prefix, CamelCase(name)))
wrt(' return %s%s.subclass(*args_, **kwargs_)\n' % (prefix, CamelCase(name)))
wrt(' else:\n')
wrt(' return %s%s(*args_, **kwargs_)\n' % (prefix, CamelCase(name)))
wrt(' factory = staticmethod(factory)\n')
def generateElemDoc(self, wrt, element):
s2 = ' '.join(element.documentation.strip().split())
s2 = s2.encode('utf-8')
s2 = textwrap.fill(s2, width=68, subsequent_indent=' ')
if s2[0] == '"' or s2[-1] == '"':
s2 = ' """ %s """\n' % (s2, )
else:
s2 = ' """%s"""\n' % (s2, )
wrt(s2)
#
# Generate a class variable whose value is a list of tuples, one
# tuple for each member data item of the class.
# Each tuble has 3 elements: (1) member name, (2) member data type,
# (3) container/list or not (maxoccurs > 1).
def generateMemberSpec(wrt, element):
generateDict = MemberSpecs and MemberSpecs == 'dict'
if generateDict:
content = [' member_data_items_ = {']
else:
content = [' member_data_items_ = [']
add = content.append
for attrName, attrDef in element.getAttributeDefs().items():
item1 = attrName
item2 = attrDef.getType()
item3 = 0
if generateDict:
item = " '%s': MemberSpec_('%s', '%s', %d)," % (
item1, item1, item2, item3, )
else:
item = " MemberSpec_('%s', '%s', %d)," % (
item1, item2, item3, )
add(item)
for child in element.getChildren():
name = self._PGenr.cleanupName(child.getCleanName())
item1 = name
simplebase = child.getSimpleBase()
if simplebase:
if len(simplebase) == 1:
item2 = "'%s'" % (simplebase[0], )
else:
item2 = simplebase
else:
element1 = self._PGenr.ElementDict.get(name)
if element1:
item2 = "'%s'" % element1.getType()
else:
item2 = "'%s'" % (child.getType(), )
if child.getMaxOccurs() > 1:
item3 = 1
else:
item3 = 0
if generateDict:
item = " '%s': MemberSpec_('%s', %s, %d)," % (
item1, item1, item2, item3, )
else:
#item = " ('%s', '%s', %d)," % (item1, item2, item3, )
item = " MemberSpec_('%s', %s, %d)," % (
item1, item2, item3, )
add(item)
simplebase = element.getSimpleBase()
childCount = self._PGenr.countChildren(element, 0)
if element.getSimpleContent() or element.isMixed():
if len(simplebase) == 1:
simplebase = "'%s'" % (simplebase[0], )
if generateDict:
item = " 'valueOf_': MemberSpec_('valueOf_', %s, 0)," % (
simplebase, )
else:
item = " MemberSpec_('valueOf_', %s, 0)," % (
simplebase, )
add(item)
elif element.isMixed():
if generateDict:
item = " 'valueOf_': MemberSpec_('valueOf_', '%s', 0)," % (
'xs:string', )
else:
item = " MemberSpec_('valueOf_', '%s', 0)," % (
'xs:string', )
add(item)
if generateDict:
add(' }')
else:
add(' ]')
wrt('\n'.join(content))
wrt('\n')
def generateSubSuperInit(self, wrt, superclass_name):
wrt(' subclass = None\n')
wrt(' superclass = %s\n' % (superclass_name, ))
def generateCtor(self, wrt, element):
elName = element.getCleanName()
childCount = self._PGenr.countChildren(element, 0)
s2 = self.buildCtorArgs_multilevel(element, childCount)
wrt(' def __init__(self%s, **kwargs):\n' % s2)
base = element.getBase()
parentName, parent = self._PGenr.getParentName(element)
if parentName:
if parentName in self._PGenr.AlreadyGenerated:
args = self.buildCtorParams(element, parent, childCount)
s2 = ''.join(args)
if len(args) > 254:
wrt(' arglist_ = (%s)\n' % (s2, ))
wrt(' super(%s, self).__init__(*arglist_)\n' % (elName, ))
else:
wrt(' super(%s, self).__init__(%s)\n' % (elName, s2, ))
attrDefs = element.getAttributeDefs()
for key in attrDefs:
attrDef = attrDefs[key]
mappedName = self._PGenr.cleanupName(attrDef.getName())
mappedName = mapName(mappedName)
logging.debug("Constructor attribute: %s" % mappedName)
pythonType = SchemaToPythonTypeMap.get(attrDef.getType())
attrVal = "_cast(%s, %s)" % (pythonType, mappedName)
wrt(' self.%s = %s\n' % (mappedName, attrVal))
member = 1
# Generate member initializers in ctor.
# wrt(" self.service_type = '%s'\n" % (elName))
member = 0
nestedElements = 0
for child in element.getChildren():
name = self._PGenr.cleanupName(child.getCleanName())
logging.debug("Constructor child: %s" % name)
logging.debug("Dump: %s" % child.__dict__)
if child.getType() == self._PGenr.AnyTypeIdentifier:
if child.getMaxOccurs() > 1:
wrt(' if anytypeobjs_ is None:\n')
wrt(' self.anytypeobjs_ = []\n')
wrt(' else:\n')
wrt(' self.anytypeobjs_ = anytypeobjs_\n')
else:
wrt(' self.anytypeobjs_ = anytypeobjs_\n')
else:
if child.getMaxOccurs() > 1:
child_type = child.getType()
wrt(' if (%s is None) or (%s == []):\n' % (name, name))
wrt(' self.%s = []\n' % (name, ))
wrt(' else:\n')
if (child.isComplex()):
wrt(' if isinstance(%s[0], dict):\n' %(name))
wrt(' objs = [%s(**elem) for elem in %s]\n' \
%(child_type, name))
wrt(' self.%s = objs\n' % (name))
wrt(' else:\n')
wrt(' self.%s = %s\n' % (name, name))
else:
wrt(' self.%s = %s\n' % (name, name))
else:
typeObj = self._PGenr.ElementDict.get(child.getType())
if (child.getDefault() and
typeObj is not None and
typeObj.getSimpleContent()):
wrt(' if %s is None:\n' % (name, ))
wrt(" self.%s = globals()['%s']('%s')\n" % (name,
child.getType(), child.getDefault(), ))
wrt(' else:\n')
wrt(' self.%s = %s\n' % (name, name))
else:
child_type = child.getType()
if (child.isComplex()):
wrt(' if isinstance(%s, dict):\n' %(name))
wrt(' obj = %s(**%s)\n' %(child_type, name))
wrt(' self.%s = obj\n' % (name))
wrt(' else:\n')
wrt(' self.%s = %s\n' % (name, name))
else:
wrt(' self.%s = %s\n' % (name, name))
member = 1
nestedElements = 1
eltype = element.getType()
if (element.getSimpleContent() or
element.isMixed() or
eltype in self._PGenr.SimpleTypeDict or
self._PGenr.CurrentNamespacePrefix + eltype in self._PGenr.OtherSimpleTypes
):
wrt(' self.valueOf_ = valueOf_\n')
member = 1
if element.getAnyAttribute():
wrt(' self.anyAttributes_ = {}\n')
member = 1
if element.getExtended():
wrt(' self.extensiontype_ = extensiontype_\n')
member = 1
if not member:
wrt(' pass\n')
if element.isMixed():
wrt(MixedCtorInitializers)
# end generateCtor
def buildCtorArgs_multilevel(self, element, childCount):
content = []
addedArgs = {}
add = content.append
self.buildCtorArgs_multilevel_aux(addedArgs, add, element)
eltype = element.getType()
if (element.getSimpleContent() or
element.isMixed() or
eltype in self._PGenr.SimpleTypeDict or
self._PGenr.CurrentNamespacePrefix + eltype in self._PGenr.OtherSimpleTypes
):
print "SimpleContent()"
add(", valueOf_=None")
if element.isMixed():
print "Mixed"
add(', mixedclass_=None')
add(', content_=None')
if element.getExtended():
print "Extended"
add(', extensiontype_=None')
s1 = ''.join(content)
return s1
def buildCtorArgs_multilevel_aux(self, addedArgs, add, element):
parentName, parentObj = self._PGenr.getParentName(element)
if parentName:
self.buildCtorArgs_multilevel_aux(addedArgs, add, parentObj)
self.buildCtorArgs_aux(addedArgs, add, element)
def buildCtorArgs_aux(self, addedArgs, add, element):
attrDefs = element.getAttributeDefs()
for key in attrDefs:
attrDef = attrDefs[key]
name = attrDef.getName()
default = attrDef.getDefault()
mappedName = name.replace(':', '_')
mappedName = self._PGenr.cleanupName(mapName(mappedName))
if mappedName in addedArgs:
continue
addedArgs[mappedName] = 1
try:
atype = attrDef.getData_type()
except KeyError:
atype = self._PGenr.StringType
if atype in self._PGenr.StringType or \
atype == self._PGenr.TokenType or \
atype == self._PGenr.DateTimeType or \
atype == self._PGenr.TimeType or \
atype == self._PGenr.DateType:
if default is None:
add(", %s=None" % mappedName)
else:
default1 = escape_string(default)
add(", %s='%s'" % (mappedName, default1))
elif atype in IntegerType:
if default is None:
add(', %s=None' % mappedName)
else:
add(', %s=%s' % (mappedName, default))
elif atype == PositiveIntegerType:
if default is None:
add(', %s=None' % mappedName)
else:
add(', %s=%s' % (mappedName, default))
elif atype == NonPositiveIntegerType:
if default is None:
add(', %s=None' % mappedName)
else:
add(', %s=%s' % (mappedName, default))
elif atype == NegativeIntegerType:
if default is None:
add(', %s=None' % mappedName)
else:
add(', %s=%s' % (mappedName, default))
elif atype == NonNegativeIntegerType:
if default is None:
add(', %s=None' % mappedName)
else:
add(', %s=%s' % (mappedName, default))
elif atype == BooleanType:
if default is None:
add(', %s=None' % mappedName)
else:
if default in ('false', '0'):
add(', %s=%s' % (mappedName, "False"))
else:
add(', %s=%s' % (mappedName, "True"))
elif atype == FloatType or atype == DoubleType or atype == DecimalType:
if default is None:
add(', %s=None' % mappedName)
else:
add(', %s=%s' % (mappedName, default))
else:
if default is None:
add(', %s=None' % mappedName)
else:
add(", %s='%s'" % (mappedName, default, ))
nestedElements = 0
for child in element.getChildren():
cleanName = child.getCleanName()
if cleanName in addedArgs:
continue
addedArgs[cleanName] = 1
default = child.getDefault()
nestedElements = 1
if child.getType() == self._PGenr.AnyTypeIdentifier:
add(', anytypeobjs_=None')
elif child.getMaxOccurs() > 1:
add(', %s=None' % cleanName)
else:
childType = child.getType()
if childType in self._PGenr.StringType or \
childType == self._PGenr.TokenType or \
childType == self._PGenr.DateTimeType or \
childType == self._PGenr.TimeType or \
childType == self._PGenr.DateType:
if default is None:
add(", %s=None" % cleanName)
else:
default1 = escape_string(default)
add(", %s='%s'" % (cleanName, default1, ))
elif (childType in self._PGenr.IntegerType or
childType == self._PGenr.PositiveIntegerType or
childType == self._PGenr.NonPositiveIntegerType or
childType == self._PGenr.NegativeIntegerType or
childType == self._PGenr.NonNegativeIntegerType
):
if default is None:
add(', %s=None' % cleanName)
else:
add(', %s=%s' % (cleanName, default, ))
## elif childType in IntegerType:
## if default is None:
## add(', %s=-1' % cleanName)
## else:
## add(', %s=%s' % (cleanName, default, ))
## elif childType == PositiveIntegerType:
## if default is None:
## add(', %s=1' % cleanName)
## else:
## add(', %s=%s' % (cleanName, default, ))
## elif childType == NonPositiveIntegerType:
## if default is None:
## add(', %s=0' % cleanName)
## else:
## add(', %s=%s' % (cleanName, default, ))
## elif childType == NegativeIntegerType:
## if default is None:
## add(', %s=-1' % cleanName)
## else:
## add(', %s=%s' % (cleanName, default, ))
## elif childType == NonNegativeIntegerType:
## if default is None:
## add(', %s=0' % cleanName)
## else:
## add(', %s=%s' % (cleanName, default, ))
elif childType == self._PGenr.BooleanType:
if default is None:
add(', %s=None' % cleanName)
else:
if default in ('false', '0'):
add(', %s=%s' % (cleanName, "False", ))
else:
add(', %s=%s' % (cleanName, "True", ))
elif childType == self._PGenr.FloatType or \
childType == self._PGenr.DoubleType or \
childType == self._PGenr.DecimalType:
if default is None:
add(', %s=None' % cleanName)
else:
add(', %s=%s' % (cleanName, default, ))
else:
add(', %s=None' % cleanName)
# end buildCtorArgs_aux
def generateEnd(self, wrt, name, s4):
wrt('# end class %s\n' % CamelCase(name))
wrt('\n\n')
def generateGetterAnyType(self, wrt):
wrt(' def get_anytypeobjs_(self): return self.anytypeobjs_\n')
def generateSetterAnyType(self, wrt):
wrt(' def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_\n')
def generateAdderAnyType(self, wrt):
wrt(' def add_anytypeobjs_(self, value): self.anytypeobjs_.append(value)\n')
def generateInserterAnyType(self, wrt):
wrt(' def insert_anytypeobjs_(self, index, value): self._anytypeobjs_[index] = value\n')
def generateGetter(self, wrt, capName, name, childType):
wrt(' def get%s(self): return self.%s\n' % (capName, name))
def generateSetter(self, wrt, capName, name, childType):
wrt(' def set%s(self, %s): self.%s = %s\n' %
(capName, name, name, name))
def generateAdder(self, wrt, capName, name):
wrt(' def add%s(self, value): self.%s.append(value)\n' %
(capName, name))
def generateInserter(self, wrt, capName, name):
wrt(' def insert%s(self, index, value): self.%s[index] = value\n' %
(capName, name))
def generateProperty(self, wrt, unmappedName, capName, name):
wrt(' %sProp = property(get%s, set%s)\n' %
(unmappedName, capName, capName))
def generateValidator(self, wrt, typeName):
wrt(' def validate_%s(self, value):\n' % (typeName, ))
if typeName in self._PGenr.SimpleTypeDict:
stObj = self._PGenr.SimpleTypeDict[typeName]
wrt(' # Validate type %s, a restriction on %s.\n' % (
typeName, stObj.getBase(), ))
else:
wrt(' # validate type %s\n' % (typeName, ))
wrt(self._getValidatorBody(typeName))
#
# Attempt to retrieve the body (implementation) of a validator
# from a directory containing one file for each simpleType.
# The name of the file should be the same as the name of the
# simpleType with and optional ".py" extension.
def _getValidatorBody(self, stName):
retrieved = 0
if self._PGenr.ValidatorBodiesBasePath:
found = 0
path = '%s%s%s.py' % (self._PGenr.ValidatorBodiesBasePath, os.sep, stName, )
if os.path.exists(path):
found = 1
else:
path = '%s%s%s' % (self._PGenr.ValidatorBodiesBasePath, os.sep, stName, )
if os.path.exists(path):
found = 1
if found:
infile = open(path, 'r')
lines = infile.readlines()
infile.close()
lines1 = []
for line in lines:
if not line.startswith('##'):
lines1.append(line)
s1 = ''.join(lines1)
retrieved = 1
if not retrieved:
s1 = ' pass\n'
return s1
def generateExport(self, wrt, namespace, element):
childCount = self._PGenr.countChildren(element, 0)
name = element.getName()
base = element.getBase()
wrt(" def export(self, outfile, level=1, namespace_='%s', name_='%s', namespacedef_='', pretty_print=True):\n" % \
(namespace, name, ))
wrt(' if pretty_print:\n')
wrt(" eol_ = '\\n'\n")
wrt(' else:\n')
wrt(" eol_ = ''\n")
wrt(' showIndent(outfile, level, pretty_print)\n')
wrt(" outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))\n")
wrt(" already_processed = []\n")
wrt(" self.exportAttributes(outfile, level, already_processed, namespace_, name_='%s')\n" % \
(name, ))
# fix_abstract
if base and base in self._PGenr.ElementDict:
base_element = self._PGenr.ElementDict[base]
# fix_derived
if base_element.isAbstract():
pass
if childCount == 0 and element.isMixed():
wrt(" outfile.write('>')\n")
wrt(" self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)\n")
wrt(" outfile.write('</%s%s>%s' % (namespace_, name_, eol_))\n")
else:
wrt(" if self.hasContent_():\n")
# Added to keep value on the same line as the tag no children.
if element.getSimpleContent():
wrt(" outfile.write('>')\n")
if not element.isMixed():
wrt(" outfile.write(str(self.valueOf_).encode(ExternalEncoding))\n")
else:
wrt(" outfile.write('>%s' % (eol_, ))\n")
wrt(" self.exportChildren(outfile, level + 1, namespace_, name_, pretty_print=pretty_print)\n")
# Put a condition on the indent to require children.
if childCount != 0:
wrt(' showIndent(outfile, level, pretty_print)\n')
wrt(" outfile.write('</%s%s>%s' % (namespace_, name_, eol_))\n")
wrt(" else:\n")
wrt(" outfile.write('/>%s' % (eol_, ))\n")
def generateExportAttributesFn(self, wrt, namespace, element):
name = element.getName()
wrt(" def exportAttributes(self, outfile, level, already_processed, namespace_='%s', name_='%s'):\n" % \
(namespace, name, ))
hasAttributes = 0
if element.getAnyAttribute():
wrt(" unique_counter = 0\n")
wrt(' for name, value in self.anyAttributes_.items():\n')
wrt(" xsinamespaceprefix = 'xsi'\n")
wrt(" xsinamespace1 = 'http://www.w3.org/2001/XMLSchema-instance'\n")
wrt(" xsinamespace2 = '{%s}' % (xsinamespace1, )\n")
wrt(" if name.startswith(xsinamespace2):\n")
wrt(" name1 = name[len(xsinamespace2):]\n")
wrt(" name2 = '%s:%s' % (xsinamespaceprefix, name1, )\n")
wrt(" if name2 not in already_processed:\n")
wrt(" already_processed.append(name2)\n")
wrt(" outfile.write(' %s=%s' % (name2, quote_attrib(value), ))\n")
wrt(" else:\n")
wrt(" mo = re_.match(Namespace_extract_pat_, name)\n")
wrt(" if mo is not None:\n")
wrt(" namespace, name = mo.group(1, 2)\n")
wrt(" if name not in already_processed:\n")
wrt(" already_processed.append(name)\n")
wrt(" if namespace == 'http://www.w3.org/XML/1998/namespace':\n")
wrt(" outfile.write(' %s=%s' % (name, quote_attrib(value), ))\n")
wrt(" else:\n")
wrt(" unique_counter += 1\n")
wrt(" outfile.write(' xmlns:yyy%d=\"%s\"' % (unique_counter, namespace, ))\n")
wrt(" outfile.write(' yyy%d:%s=%s' % (unique_counter, name, quote_attrib(value), ))\n")
wrt(" else:\n")
wrt(" if name not in already_processed:\n")
wrt(" already_processed.append(name)\n")
wrt(" outfile.write(' %s=%s' % (name, quote_attrib(value), ))\n")
parentName, parent = self._PGenr.getParentName(element)
if parentName:
hasAttributes += 1
elName = element.getCleanName()
wrt(" super(%s, self).exportAttributes(outfile, level, already_processed, namespace_, name_='%s')\n" % \
(elName, name, ))
hasAttributes += self.generateExportAttributes(wrt, element, hasAttributes)
if hasAttributes == 0:
wrt(" pass\n")
def generateExportAttributes(self, wrt, element, hasAttributes):
if len(element.getAttributeDefs()) > 0:
hasAttributes += 1
attrDefs = element.getAttributeDefs()
for key in attrDefs.keys():
attrDef = attrDefs[key]
name = attrDef.getName()
cleanName = mapName(self._PGenr.cleanupName(name))
capName = self._PGenr.make_gs_name(cleanName)
if True: # attrDef.getUse() == 'optional':
wrt(" if self.%s is not None and '%s' not in already_processed:\n" % (
cleanName, cleanName, ))
wrt(" already_processed.append('%s')\n" % (
cleanName, ))
indent = " "
else:
indent = ""
if (attrDef.getType() in self._PGenr.StringType or
attrDef.getType() in self._PGenr.IDTypes or
attrDef.getType() == self._PGenr.TokenType or
attrDef.getType() == self._PGenr.DateTimeType or
attrDef.getType() == self._PGenr.TimeType or
attrDef.getType() == self._PGenr.DateType):
s1 = '''%s outfile.write(' %s=%%s' %% (self.gds_format_string(quote_attrib(self.%s).encode(ExternalEncoding), input_name='%s'), ))\n''' % \
(indent, name, cleanName, name, )
elif attrDef.getType() in self._PGenr.IntegerType or \
attrDef.getType() == self._PGenr.PositiveIntegerType or \
attrDef.getType() == self._PGenr.NonPositiveIntegerType or \
attrDef.getType() == self._PGenr.NegativeIntegerType or \
attrDef.getType() == self._PGenr.NonNegativeIntegerType:
s1 = '''%s outfile.write(' %s="%%s"' %% self.gds_format_integer(self.%s, input_name='%s'))\n''' % (
indent, name, cleanName, name, )
elif attrDef.getType() == BooleanType:
s1 = '''%s outfile.write(' %s="%%s"' %% self.gds_format_boolean(self.gds_str_lower(str(self.%s)), input_name='%s'))\n''' % (
indent, name, cleanName, name, )
elif attrDef.getType() == FloatType or \
attrDef.getType() == DecimalType:
s1 = '''%s outfile.write(' %s="%%s"' %% self.gds_format_float(self.%s, input_name='%s'))\n''' % (
indent, name, cleanName, name)
elif attrDef.getType() == DoubleType:
s1 = '''%s outfile.write(' %s="%%s"' %% self.gds_format_double(self.%s, input_name='%s'))\n''' % (
indent, name, cleanName, name)
else:
s1 = '''%s outfile.write(' %s=%%s' %% (quote_attrib(self.%s), ))\n''' % (
indent, name, cleanName, )
wrt(s1)
if element.getExtended():
wrt(" if self.extensiontype_ is not None and 'xsi:type' not in already_processed:\n")
wrt(" already_processed.append('xsi:type')\n")
wrt(" outfile.write(' xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"')\n")
wrt(''' outfile.write(' xsi:type="%s"' % self.extensiontype_)\n''')
return hasAttributes
# end generateExportAttributes
def generateExportChildrenFn(self, wrt, namespace, element):
childCount = self._PGenr.countChildren(element, 0)
name = element.getName()
wrt(" def exportChildren(self, outfile, level, namespace_='%s', name_='%s', fromsubclass_=False, pretty_print=True):\n" % \
(namespace, name, ))
hasChildren = 0
# Generate call to exportChildren in the superclass only if it is
# an extension, but *not* if it is a restriction.
parentName, parent = self._PGenr.getParentName(element)
if parentName and not element.getRestrictionBaseObj():
hasChildren += 1
elName = element.getCleanName()
wrt(" super(%s, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)\n" % (elName, ))
hasChildren += self._generateExportChildren(wrt, element, hasChildren, namespace)
if childCount == 0: # and not element.isMixed():
wrt(" pass\n")
if True or hasChildren > 0 or element.isMixed():
self._generateHascontentMethod(wrt, element)
def _generateHascontentMethod(self, wrt, element):
childCount = self._PGenr.countChildren(element, 0)
wrt(' def hasContent_(self):\n')
wrt(' if (\n')
firstTime = True
for child in element.getChildren():
if child.getType() == self._PGenr.AnyTypeIdentifier:
name = 'anytypeobjs_'
else:
name = self._PGenr.mapName(self._PGenr.cleanupName(child.getName()))
if not firstTime:
wrt(' or\n')
firstTime = False
if child.getMaxOccurs() > 1:
wrt(' self.%s' % (name, ))
else:
wrt(' self.%s is not None' % (name, ))
if element.getSimpleContent() or element.isMixed():
if not firstTime:
wrt(' or\n')
firstTime = False
wrt(' self.valueOf_')
parentName, parent = self._PGenr.getParentName(element)
if parentName:
elName = element.getCleanName()
if not firstTime:
wrt(' or\n')
firstTime = False
wrt(' super(%s, self).hasContent_()' % (elName, ))
wrt('\n ):\n')
wrt(' return True\n')
wrt(' else:\n')
wrt(' return False\n')
def _generateExportChildren(self, wrt, element, hasChildren, namespace):
fill = ' '
if len(element.getChildren()) > 0:
hasChildren += 1
if element.isMixed():
wrt('%sif not fromsubclass_:\n' % (fill, ))
wrt("%s for item_ in self.content_:\n" % (fill, ))
wrt("%s item_.export(outfile, level, item_.name, namespace_, pretty_print=pretty_print)\n" % (
fill, ))
else:
wrt('%sif pretty_print:\n' % (fill, ))
wrt("%s eol_ = '\\n'\n" % (fill, ))
wrt('%selse:\n' % (fill, ))
wrt("%s eol_ = ''\n" % (fill, ))
any_type_child = None
for child in element.getChildren():
unmappedName = child.getName()
name = self._PGenr.mapName(self._PGenr.cleanupName(child.getName()))
# fix_abstract
type_element = None
abstract_child = False
type_name = child.getAttrs().get('type')
if type_name:
type_element = self._PGenr.ElementDict.get(type_name)
if type_element and type_element.isAbstract():
abstract_child = True
if child.getType() == self._PGenr.AnyTypeIdentifier:
any_type_child = child
else:
if abstract_child and child.getMaxOccurs() > 1:
wrt("%sfor %s_ in self.get%s():\n" % (fill,
name, self._PGenr.make_gs_name(name),))
wrt("%s %s_.export(outfile, level, namespace_, name_='%s', pretty_print=pretty_print)\n" % (
fill, name, name, ))
elif abstract_child:
wrt("%sif self.%s is not None:\n" % (fill, name, ))
wrt("%s self.%s.export(outfile, level, namespace_, name_='%s', pretty_print=pretty_print)\n" % (
fill, name, name, ))
elif child.getMaxOccurs() > 1:
self._generateExportFn_2(wrt, child, unmappedName, namespace, ' ')
else:
if (child.getOptional()):
self._generateExportFn_3(wrt, child, unmappedName, namespace, '')
else:
self._generateExportFn_1(wrt, child, unmappedName, namespace, '')
if any_type_child is not None:
if any_type_child.getMaxOccurs() > 1:
wrt(' for obj_ in self.anytypeobjs_:\n')
wrt(" obj_.export(outfile, level, namespace_, pretty_print=pretty_print)\n")
else:
wrt(' if self.anytypeobjs_ is not None:\n')
wrt(" self.anytypeobjs_.export(outfile, level, namespace_, pretty_print=pretty_print)\n")
return hasChildren
def _generateExportFn_1(self, wrt, child, name, namespace, fill):
cleanName = self._PGenr.cleanupName(name)
mappedName = self._PGenr.mapName(cleanName)
child_type = child.getType()
if child_type in self._PGenr.StringType or \
child_type == self._PGenr.TokenType or \
child_type == self._PGenr.DateTimeType or \
child_type == self._PGenr.TimeType or \
child_type == self._PGenr.DateType:
wrt('%s if self.%s is not None:\n' % (fill, mappedName, ))
wrt('%s showIndent(outfile, level, pretty_print)\n' % fill)
# fixlist
if (child.getSimpleType() in self._PGenr.SimpleTypeDict and
self._PGenr.SimpleTypeDict[child.getSimpleType()].isListType()):
s1 = "%s outfile.write('<%%s%s>%%s</%%s%s>%%s' %% (namespace_, self.gds_format_string(quote_xml(' '.join(self.%s)).encode(ExternalEncoding), input_name='%s'), namespace_, eol_))\n" % \
(fill, name, name, mappedName, name, )
else:
s1 = "%s outfile.write('<%%s%s>%%s</%%s%s>%%s' %% (namespace_, self.gds_format_string(quote_xml(self.%s).encode(ExternalEncoding), input_name='%s'), namespace_, eol_))\n" % \
(fill, name, name, mappedName, name, )
wrt(s1)
elif child_type in self._PGenr.IntegerType or \
child_type == self._PGenr.PositiveIntegerType or \
child_type == self._PGenr.NonPositiveIntegerType or \
child_type == self._PGenr.NegativeIntegerType or \
child_type == self._PGenr.NonNegativeIntegerType:
wrt('%s if self.%s is not None:\n' % (fill, mappedName, ))
wrt('%s showIndent(outfile, level, pretty_print)\n' % fill)
if child.isListType():
s1 = "%s outfile.write('<%%s%s>%%s</%%s%s>%%s' %% (namespace_, self.gds_format_integer_list(self.%s, input_name='%s'), namespace_, eol_))\n" % \
(fill, name, name, mappedName, name, )
else:
s1 = "%s outfile.write('<%%s%s>%%s</%%s%s>%%s' %% (namespace_, self.gds_format_integer(self.%s, input_name='%s'), namespace_, eol_))\n" % \
(fill, name, name, mappedName, name, )
wrt(s1)
elif child_type == self._PGenr.BooleanType:
wrt('%s if self.%s is not None:\n' % (fill, mappedName, ))
wrt('%s showIndent(outfile, level, pretty_print)\n' % fill)
if child.isListType():
s1 = "%s outfile.write('<%%s%s>%%s</%%s%s>%%s' %% (namespace_, self.gds_format_boolean_list(self.gds_str_lower(str(self.%s)), input_name='%s'), namespace_, eol_))\n" % \
(fill, name, name, mappedName, name, )
else:
s1 = "%s outfile.write('<%%s%s>%%s</%%s%s>%%s' %% (namespace_, self.gds_format_boolean(self.gds_str_lower(str(self.%s)), input_name='%s'), namespace_, eol_))\n" % \
(fill, name, name, mappedName, name, )
wrt(s1)
elif child_type == self._PGenr.FloatType or \
child_type == self._PGenr.DecimalType:
wrt('%s if self.%s is not None:\n' % (fill, mappedName, ))
wrt('%s showIndent(outfile, level, pretty_print)\n' % fill)
if child.isListType():
s1 = "%s outfile.write('<%%s%s>%%s</%%s%s>%%s' %% (namespace_, self.gds_format_float_list(self.%s, input_name='%s'), namespace_, eol_))\n" % \
(fill, name, name, mappedName, name, )
else:
s1 = "%s outfile.write('<%%s%s>%%s</%%s%s>%%s' %% (namespace_, self.gds_format_float(self.%s, input_name='%s'), namespace_, eol_))\n" % \
(fill, name, name, mappedName, name, )
wrt(s1)
elif child_type == self._PGenr.DoubleType:
wrt('%s if self.%s is not None:\n' % (fill, mappedName, ))
wrt('%s showIndent(outfile, level, pretty_print)\n' % fill)
if child.isListType():
s1 = "%s outfile.write('<%%s%s>%%s</%%s%s>%%s' %% (namespace_, self.gds_format_double_list(self.%s, input_name='%s'), namespace_, eol_))\n" % \
(fill, name, name, mappedName, name, )
else:
s1 = "%s outfile.write('<%%s%s>%%s</%%s%s>%%s' %% (namespace_, self.gds_format_double(self.%s, input_name='%s'), namespace_, eol_))\n" % \
(fill, name, name, mappedName, name, )
wrt(s1)
else:
wrt("%s if self.%s is not None:\n" % (fill, mappedName))
# name_type_problem
if False: # name == child.getType():
s1 = "%s self.%s.export(outfile, level, namespace_, pretty_print=pretty_print)\n" % \
(fill, mappedName)
else:
s1 = "%s self.%s.export(outfile, level, namespace_, name_='%s', pretty_print=pretty_print)\n" % \
(fill, mappedName, name)
wrt(s1)
# end _generateExportFn_1
def _generateExportFn_2(self, wrt, child, name, namespace, fill):
cleanName = self._PGenr.cleanupName(name)
mappedName = self._PGenr.mapName(cleanName)
child_type = child.getType()
# fix_simpletype
wrt("%s for %s_ in self.%s:\n" % (fill, cleanName, mappedName, ))
if child_type in self._PGenr.StringType or \
child_type == self._PGenr.TokenType or \
child_type == self._PGenr.DateTimeType or \
child_type == self._PGenr.TimeType or \
child_type == self._PGenr.DateType:
wrt('%s showIndent(outfile, level, pretty_print)\n' % fill)
wrt("%s outfile.write('<%%s%s>%%s</%%s%s>%%s' %% (namespace_, self.gds_format_string(quote_xml(%s_).encode(ExternalEncoding), input_name='%s'), namespace_, eol_))\n" %
(fill, name, name, cleanName, name,))
elif child_type in self._PGenr.IntegerType or \
child_type == self._PGenr.PositiveIntegerType or \
child_type == self._PGenr.NonPositiveIntegerType or \
child_type == self._PGenr.NegativeIntegerType or \
child_type == self._PGenr.NonNegativeIntegerType:
wrt('%s showIndent(outfile, level, pretty_print)\n' % fill)
if child.isListType():
s1 = "%s outfile.write('<%%s%s>%%s</%%s%s>%%s' %% (namespace_, self.gds_format_integer_list(%s_, input_name='%s'), namespace_, eol_))\n" % \
(fill, name, name, cleanName, name, )
else:
s1 = "%s outfile.write('<%%s%s>%%s</%%s%s>%%s' %% (namespace_, self.gds_format_integer(%s_, input_name='%s'), namespace_, eol_))\n" % \
(fill, name, name, cleanName, name, )
wrt(s1)
elif child_type == self._PGenr.BooleanType:
wrt('%s showIndent(outfile, level, pretty_print)\n' % fill)
if child.isListType():
s1 = "%s outfile.write('<%%s%s>%%s</%%s%s>%%s' %% (namespace_, self.gds_format_boolean_list(self.gds_str_lower(str(%s_)), input_name='%s'), namespace_, eol_))\n" % \
(fill, name, name, cleanName, name, )
else:
s1 = "%s outfile.write('<%%s%s>%%s</%%s%s>%%s' %% (namespace_, self.gds_format_boolean(self.gds_str_lower(str(%s_)), input_name='%s'), namespace_, eol_))\n" % \
(fill, name, name, cleanName, name, )
wrt(s1)
elif child_type == self._PGenr.FloatType or \
child_type == self._PGenr.DecimalType:
wrt('%s showIndent(outfile, level, pretty_print)\n' % fill)
if child.isListType():
s1 = "%s outfile.write('<%%s%s>%%s</%%s%s>%%s' %% (namespace_, self.gds_format_float_list(%s_, input_name='%s'), namespace_, eol_))\n" % \
(fill, name, name, cleanName, name, )
else:
s1 = "%s outfile.write('<%%s%s>%%s</%%s%s>%%s' %% (namespace_, self.gds_format_float(%s_, input_name='%s'), namespace_, eol_))\n" % \
(fill, name, name, cleanName, name, )
wrt(s1)
elif child_type == self._PGenr.DoubleType:
wrt('%s showIndent(outfile, level, pretty_print)\n' % fill)
if child.isListType():
s1 = "%s outfile.write('<%%s%s>%%s</%%s%s>%%s' %% (namespace_, self.gds_format_double_list(%s_, input_name='%s'), namespace_, eol_))\n" % \
(fill, name, name, cleanName, name, )
else:
s1 = "%s outfile.write('<%%s%s>%%s</%%s%s>%%s' %% (namespace_, self.gds_format_double(%s_, input_name='%s'), namespace_, eol_))\n" % \
(fill, name, name, cleanName, name, )
wrt(s1)
else:
# name_type_problem
if False: # name == child.getType():
s1 = "%s %s_.export(outfile, level, namespace_, pretty_print=pretty_print)\n" % (fill, cleanName)
else:
wrt("%s if isinstance(%s_, dict):\n" %(fill, cleanName))
wrt("%s %s_ = %s(**%s_)\n" %(fill, cleanName, child_type, cleanName))
s1 = "%s %s_.export(outfile, level, namespace_, name_='%s', pretty_print=pretty_print)\n" % \
(fill, cleanName, name)
wrt(s1)
# end generateExportFn_2
def _generateExportFn_3(self, wrt, child, name, namespace, fill):
cleanName = self._PGenr.cleanupName(name)
mappedName = self._PGenr.mapName(cleanName)
child_type = child.getType()
# fix_simpletype
if child_type in self._PGenr.StringType or \
child_type == self._PGenr.TokenType or \
child_type == self._PGenr.DateTimeType or \
child_type == self._PGenr.TimeType or \
child_type == self._PGenr.DateType:
wrt('%s if self.%s is not None:\n' % (fill, mappedName, ))
wrt('%s showIndent(outfile, level, pretty_print)\n' % fill)
# fixlist
if (child.getSimpleType() in self._PGenr.SimpleTypeDict and
self._PGenr.SimpleTypeDict[child.getSimpleType()].isListType()):
s1 = "%s outfile.write('<%%s%s>%%s</%%s%s>%%s' %% (namespace_, self.gds_format_string(quote_xml(' '.join(self.%s)).encode(ExternalEncoding), input_name='%s'), namespace_, eol_))\n" % \
(fill, name, name, mappedName, name, )
else:
s1 = "%s outfile.write('<%%s%s>%%s</%%s%s>%%s' %% (namespace_, self.gds_format_string(quote_xml(self.%s).encode(ExternalEncoding), input_name='%s'), namespace_, eol_))\n" % \
(fill, name, name, mappedName, name, )
wrt(s1)
elif child_type in self._PGenr.IntegerType or \
child_type == self._PGenr.PositiveIntegerType or \
child_type == self._PGenr.NonPositiveIntegerType or \
child_type == self._PGenr.NegativeIntegerType or \
child_type == self._PGenr.NonNegativeIntegerType:
wrt('%s if self.%s is not None:\n' % (fill, mappedName, ))
wrt('%s showIndent(outfile, level, pretty_print)\n' % fill)
if child.isListType():
s1 = "%s outfile.write('<%%s%s>%%s</%%s%s>%%s' %% (namespace_, self.gds_format_integer_list(self.%s, input_name='%s'), namespace_, eol_))\n" % \
(fill, name, name, mappedName, name, )
else:
s1 = "%s outfile.write('<%%s%s>%%s</%%s%s>%%s' %% (namespace_, self.gds_format_integer(self.%s, input_name='%s'), namespace_, eol_))\n" % \
(fill, name, name, mappedName, name, )
wrt(s1)
elif child_type == self._PGenr.BooleanType:
wrt('%s if self.%s is not None:\n' % (fill, mappedName, ))
wrt('%s showIndent(outfile, level, pretty_print)\n' % fill)
if child.isListType():
s1 = "%s outfile.write('<%%s%s>%%s</%%s%s>%%s' %% (namespace_, self.gds_format_boolean_list(self.gds_str_lower(str(self.%s)), input_name='%s'), namespace_, eol_))\n" % \
(fill, name, name, mappedName, name )
else:
s1 = "%s outfile.write('<%%s%s>%%s</%%s%s>%%s' %% (namespace_, self.gds_format_boolean(self.gds_str_lower(str(self.%s)), input_name='%s'), namespace_, eol_))\n" % \
(fill, name, name, mappedName, name )
wrt(s1)
elif child_type == self._PGenr.FloatType or \
child_type == self._PGenr.DecimalType:
wrt('%s if self.%s is not None:\n' % (fill, mappedName, ))
wrt('%s showIndent(outfile, level, pretty_print)\n' % fill)
if child.isListType():
s1 = "%s outfile.write('<%%s%s>%%s</%%s%s>%%s' %% (namespace_, self.gds_format_float_list(self.%s, input_name='%s'), namespace_, eol_))\n" % \
(fill, name, name, mappedName, name, )
else:
s1 = "%s outfile.write('<%%s%s>%%s</%%s%s>%%s' %% (namespace_, self.gds_format_float(self.%s, input_name='%s'), namespace_, eol_))\n" % \
(fill, name, name, mappedName, name, )
wrt(s1)
elif child_type == self._PGenr.DoubleType:
wrt('%s if self.%s is not None:\n' % (fill, mappedName, ))
wrt('%s showIndent(outfile, level, pretty_print)\n' % fill)
if child.isListType():
s1 = "%s outfile.write('<%%s%s>%%s</%%s%s>%%s' %% (namespace_, self.gds_format_double_list(self.%s, input_name='%s'), namespace_, eol_))\n" % \
(fill, name, name, mappedName, name, )
else:
s1 = "%s outfile.write('<%%s%s>%%s</%%s%s>%%s' %% (namespace_, self.gds_format_double(self.%s, input_name='%s'), namespace_, eol_))\n" % \
(fill, name, name, mappedName, name, )
wrt(s1)
else:
wrt("%s if self.%s is not None:\n" % (fill, mappedName))
# name_type_problem
if False: # name == child.getType():
s1 = "%s self.%s.export(outfile, level, namespace_, pretty_print=pretty_print)\n" % \
(fill, mappedName)
else:
s1 = "%s self.%s.export(outfile, level, namespace_, name_='%s', pretty_print=pretty_print)\n" % \
(fill, mappedName, name)
wrt(s1)
# end generateExportFn_3
def generateExportDict(self, wrt, element):
name = element.getName()
base = element.getBase()
wrt(" def exportDict(self, name_='%s'):\n" % (CamelCase(name), ))
wrt(' obj_json = json.dumps(self, default=lambda o: dict((k, v) for k, v in o.__dict__.iteritems()))\n')
wrt(' obj_dict = json.loads(obj_json)\n')
wrt(" return {name_ : obj_dict}\n")
def generateExportDict2(self, wrt, element):
name = element.getName()
childCount = self._PGenr.countChildren(element, 0)
wrt(" def exportDict2(self, name_='%s'):\n" % (CamelCase(name), ))
wrt(' obj_dict = {\n')
for child in element.getChildren():
if child.getType() == self._PGenr.AnyTypeIdentifier:
name = 'anytypeobjs_'
else:
name = self._PGenr.mapName(self._PGenr.cleanupName(child.getName()))
wrt(' "%s" : self.%s,\n' %(child.getName(), name))
wrt(' }\n')
wrt(" return (name_, obj_dict)\n")
def generateBuild(self, wrt, element):
base = element.getBase()
wrt(' def build(self, node):\n')
wrt(' self.buildAttributes(node, node.attrib, [])\n')
childCount = self._PGenr.countChildren(element, 0)
if element.isMixed() or element.getSimpleContent():
wrt(" self.valueOf_ = get_all_text_(node)\n")
if element.isMixed():
wrt(" if node.text is not None:\n")
wrt(" obj_ = self.mixedclass_(MixedContainer.CategoryText,\n")
wrt(" MixedContainer.TypeNone, '', node.text)\n")
wrt(" self.content_.append(obj_)\n")
wrt(' for child in node:\n')
wrt(" nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]\n")
wrt(" self.buildChildren(child, node, nodeName_)\n")
def generateBuildAttributesFn(self, wrt, element):
wrt(' def buildAttributes(self, node, attrs, already_processed):\n')
hasAttributes = 0
hasAttributes = self._generateBuildAttributes(wrt, element, hasAttributes)
parentName, parent = self._PGenr.getParentName(element)
if parentName:
hasAttributes += 1
elName = element.getCleanName()
wrt(' super(%s, self).buildAttributes(node, attrs, already_processed)\n' % (
elName, ))
if hasAttributes == 0:
wrt(' pass\n')
def _generateBuildAttributes(self, wrt, element, hasAttributes):
attrDefs = element.getAttributeDefs()
for key in attrDefs:
attrDef = attrDefs[key]
hasAttributes += 1
name = attrDef.getName()
cleanName = self._PGenr.cleanupName(name)
mappedName = self._PGenr.mapName(cleanName)
atype = attrDef.getType()
if atype in self._PGenr.SimpleTypeDict:
atype = self._PGenr.SimpleTypeDict[atype].getBase()
self._LangGenr.generateBuildAttributeForType(wrt, element, atype, name, mappedName)
hasAttributes += self._generateBuildAttributeForAny(wrt, element)
hasAttributes += self._generateBuildAttributeForExt(wrt, element)
return hasAttributes
def _generateBuildAttributeForType(self, wrt, element, atype, name, mappedName):
if atype in self._PGenr.IntegerType or \
atype == self._PGenr.PositiveIntegerType or \
atype == self._PGenr.NonPositiveIntegerType or \
atype == self._PGenr.NegativeIntegerType or \
atype == self._PGenr.NonNegativeIntegerType:
wrt(" value = find_attr_value_('%s', node)\n" % (name, ))
wrt(" if value is not None and '%s' not in already_processed:\n" % (
name, ))
wrt(" already_processed.append('%s')\n" % (name, ))
wrt(' try:\n')
wrt(" self.%s = int(value)\n" % (mappedName, ))
wrt(' except ValueError, exp:\n')
wrt(" raise_parse_error(node, 'Bad integer attribute: %s' % exp)\n")
if atype == self._PGenr.PositiveIntegerType:
wrt(' if self.%s <= 0:\n' % mappedName)
wrt(" raise_parse_error(node, 'Invalid PositiveInteger')\n")
elif atype == self._PGenr.NonPositiveIntegerType:
wrt(' if self.%s > 0:\n' % mappedName)
wrt(" raise_parse_error(node, 'Invalid NonPositiveInteger')\n")
elif atype == self._PGenr.NegativeIntegerType:
wrt(' if self.%s >= 0:\n' % mappedName)
wrt(" raise_parse_error(node, 'Invalid NegativeInteger')\n")
elif atype == self._PGenr.NonNegativeIntegerType:
wrt(' if self.%s < 0:\n' % mappedName)
wrt(" raise_parse_error(node, 'Invalid NonNegativeInteger')\n")
elif atype == self._PGenr.BooleanType:
wrt(" value = find_attr_value_('%s', node)\n" % (name, ))
wrt(" if value is not None and '%s' not in already_processed:\n" % (
name, ))
wrt(" already_processed.append('%s')\n" % (name, ))
wrt(" if value in ('true', '1'):\n")
wrt(" self.%s = True\n" % mappedName)
wrt(" elif value in ('false', '0'):\n")
wrt(" self.%s = False\n" % mappedName)
wrt(' else:\n')
wrt(" raise_parse_error(node, 'Bad boolean attribute')\n")
elif atype == self._PGenr.FloatType or atype == self._PGenr.DoubleType or atype == self._PGenr.DecimalType:
wrt(" value = find_attr_value_('%s', node)\n" % (name, ))
wrt(" if value is not None and '%s' not in already_processed:\n" % (
name, ))
wrt(" already_processed.append('%s')\n" % (name, ))
wrt(' try:\n')
wrt(" self.%s = float(value)\n" % \
(mappedName, ))
wrt(' except ValueError, exp:\n')
wrt(" raise ValueError('Bad float/double attribute (%s): %%s' %% exp)\n" % \
(name, ))
elif atype == self._PGenr.TokenType:
wrt(" value = find_attr_value_('%s', node)\n" % (name, ))
wrt(" if value is not None and '%s' not in already_processed:\n" % (
name, ))
wrt(" already_processed.append('%s')\n" % (name, ))
wrt(" self.%s = value\n" % (mappedName, ))
wrt(" self.%s = ' '.join(self.%s.split())\n" % \
(mappedName, mappedName, ))
else:
# Assume attr['type'] in StringType or attr['type'] == DateTimeType:
wrt(" value = find_attr_value_('%s', node)\n" % (name, ))
wrt(" if value is not None and '%s' not in already_processed:\n" % (
name, ))
wrt(" already_processed.append('%s')\n" % (name, ))
wrt(" self.%s = value\n" % (mappedName, ))
typeName = attrDef.getType()
if typeName and typeName in self._PGenr.SimpleTypeDict:
wrt(" self.validate_%s(self.%s) # validate type %s\n" % (
typeName, mappedName, typeName, ))
def _generateBuildAttributeForAny(self, wrt, element):
hasAttributes = 0
if element.getAnyAttribute():
hasAttributes += 1
wrt(' self.anyAttributes_ = {}\n')
wrt(' for name, value in attrs.items():\n')
wrt(" if name not in already_processed:\n")
wrt(' self.anyAttributes_[name] = value\n')
return hasAttributes
def _generateBuildAttributeForExt(self, wrt, element):
hasAttributes = 0
if element.getExtended():
hasAttributes += 1
wrt(" value = find_attr_value_('xsi:type', node)\n")
wrt(" if value is not None and 'xsi:type' not in already_processed:\n")
wrt(" already_processed.append('xsi:type')\n")
wrt(" self.extensiontype_ = value\n")
return hasAttributes
def generateBuildChildren(self, wrt, element, prefix, delayed):
wrt(' def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):\n')
keyword = 'if'
hasChildren = 0
if element.isMixed():
hasChildren = self._generateBuildMixed(wrt, prefix, element, keyword,
delayed, hasChildren)
else: # not element.isMixed()
hasChildren = self._generateBuildStandard(wrt, prefix, element, keyword,
delayed, hasChildren)
# Generate call to buildChildren in the superclass only if it is
# an extension, but *not* if it is a restriction.
base = element.getBase()
if base and not element.getSimpleContent():
elName = element.getCleanName()
wrt(" super(%s, self).buildChildren(child_, node, nodeName_, True)\n" % (elName, ))
eltype = element.getType()
if hasChildren == 0:
wrt(" pass\n")
def _generateBuildStandard(self, wrt, prefix, element, keyword, delayed, hasChildren):
any_type_child = None
for child in element.getChildren():
if child.getType() == self._PGenr.AnyTypeIdentifier:
any_type_child = child
else:
self._generateBuildStandard_1(wrt, prefix, child, child,
element, keyword, delayed)
hasChildren += 1
keyword = 'elif'
# Does this element have a substitutionGroup?
# If so generate a clause for each element in the substitutionGroup.
childName = child.getName()
if childName in self._PGenr.SubstitutionGroups:
for memberName in transitiveClosure(self._PGenr.SubstitutionGroups, childName):
memberName = self._PGenr.cleanupName(memberName)
if memberName in self._PGenr.ElementDict:
member = self._PGenr.ElementDict[memberName]
self._generateBuildStandard_1(wrt, prefix, member, child,
element, keyword, delayed)
hasChildren += self._generateBuildAnyType(wrt, element, any_type_child)
return hasChildren
def _generateBuildStandard_1(self, wrt, prefix, child, headChild,
element, keyword, delayed):
origName = child.getName()
name = self._PGenr.cleanupName(child.getName())
mappedName = self._PGenr.mapName(name)
headName = self._PGenr.cleanupName(headChild.getName())
attrCount = len(child.getAttributeDefs())
childType = child.getType()
base = child.getBase()
self._generateBuildStandard_1_ForType(wrt, prefix, child, headChild, keyword, delayed)
#
# If this child is defined in a simpleType, then generate
# a validator method.
self._generateBuildValidator(wrt, child)
def _generateBuildMixed(wrt, prefix, element, keyword, delayed, hasChildren):
for child in element.getChildren():
self._generateBuildMixed_1(wrt, prefix, child, child, keyword, delayed)
hasChildren += 1
keyword = 'elif'
# Does this element have a substitutionGroup?
# If so generate a clause for each element in the substitutionGroup.
if child.getName() in self._PGenr.SubstitutionGroups:
for memberName in self._PGenr.SubstitutionGroups[child.getName()]:
if memberName in self._PGenr.ElementDict:
member = self._PGenr.ElementDict[memberName]
self._generateBuildMixed_1(wrt, prefix, member, child,
keyword, delayed)
wrt(" if not fromsubclass_ and child_.tail is not None:\n")
wrt(" obj_ = self.mixedclass_(MixedContainer.CategoryText,\n")
wrt(" MixedContainer.TypeNone, '', child_.tail)\n")
wrt(" self.content_.append(obj_)\n")
## base = element.getBase()
## if base and base in ElementDict:
## parent = ElementDict[base]
## hasChildren = generateBuildMixed(wrt, prefix, parent, keyword, delayed, hasChildren)
return hasChildren
def _generateBuildMixed_1(wrt, prefix, child, headChild, keyword, delayed):
nestedElements = 1
origName = child.getName()
name = child.getCleanName()
headName = self._PGenr.cleanupName(headChild.getName())
childType = child.getType()
mappedName = self._PGenr.mapName(name)
base = child.getBase()
if childType in self._PGenr.StringType or \
childType == self._PGenr.TokenType or \
childType == self._PGenr.DateTimeType or \
childType == self._PGenr.TimeType or \
childType == self._PGenr.DateType:
wrt(" %s nodeName_ == '%s' and child_.text is not None:\n" % (
keyword, origName, ))
wrt(" valuestr_ = child_.text\n")
if childType == TokenType:
wrt(' valuestr_ = re_.sub(String_cleanup_pat_, " ", valuestr_).strip()\n')
wrt(" obj_ = self.mixedclass_(MixedContainer.CategorySimple,\n")
wrt(" MixedContainer.TypeString, '%s', valuestr_)\n" % \
origName)
wrt(" self.content_.append(obj_)\n")
elif childType in self._PGenr.IntegerType or \
childType == self._PGenr.PositiveIntegerType or \
childType == self._PGenr.NonPositiveIntegerType or \
childType == self._PGenr.NegativeIntegerType or \
childType == self._PGenr.NonNegativeIntegerType:
wrt(" %s nodeName_ == '%s' and child_.text is not None:\n" % (
keyword, origName, ))
wrt(" sval_ = child_.text\n")
wrt(" try:\n")
wrt(" ival_ = int(sval_)\n")
wrt(" except (TypeError, ValueError), exp:\n")
wrt(" raise_parse_error(child_, 'requires integer: %s' % exp)\n")
if childType == self._PGenr.PositiveIntegerType:
wrt(" if ival_ <= 0:\n")
wrt(" raise_parse_error(child_, 'Invalid positiveInteger')\n")
if childType == self._PGenr.NonPositiveIntegerType:
wrt(" if ival_ > 0:\n")
wrt(" raise_parse_error(child_, 'Invalid nonPositiveInteger)\n")
if childType == self._PGenr.NegativeIntegerType:
wrt(" if ival_ >= 0:\n")
wrt(" raise_parse_error(child_, 'Invalid negativeInteger')\n")
if childType == self._PGenr.NonNegativeIntegerType:
wrt(" if ival_ < 0:\n")
wrt(" raise_parse_error(child_, 'Invalid nonNegativeInteger')\n")
wrt(" obj_ = self.mixedclass_(MixedContainer.CategorySimple,\n")
wrt(" MixedContainer.TypeInteger, '%s', ival_)\n" % (
origName, ))
wrt(" self.content_.append(obj_)\n")
elif childType == self._PGenr.BooleanType:
wrt(" %s nodeName_ == '%s' and child_.text is not None:\n" % (
keyword, origName, ))
wrt(" sval_ = child_.text\n")
wrt(" if sval_ in ('true', '1'):\n")
wrt(" ival_ = True\n")
wrt(" elif sval_ in ('false', '0'):\n")
wrt(" ival_ = False\n")
wrt(" else:\n")
wrt(" raise_parse_error(child_, 'requires boolean')\n")
wrt(" obj_ = self.mixedclass_(MixedContainer.CategorySimple,\n")
wrt(" MixedContainer.TypeInteger, '%s', ival_)\n" % \
origName)
wrt(" self.content_.append(obj_)\n")
elif childType == self._PGenr.FloatType or \
childType == self._PGenr.DoubleType or \
childType == self._PGenr.DecimalType:
wrt(" %s nodeName_ == '%s' and child_.text is not None:\n" % (
keyword, origName, ))
wrt(" sval_ = child_.text\n")
wrt(" try:\n")
wrt(" fval_ = float(sval_)\n")
wrt(" except (TypeError, ValueError), exp:\n")
wrt(" raise_parse_error(child_, 'requires float or double: %s' % exp)\n")
wrt(" obj_ = self.mixedclass_(MixedContainer.CategorySimple,\n")
wrt(" MixedContainer.TypeFloat, '%s', fval_)\n" % \
origName)
wrt(" self.content_.append(obj_)\n")
else:
# Perhaps it's a complexType that is defined right here.
# Generate (later) a class for the nested types.
type_element = None
abstract_child = False
type_name = child.getAttrs().get('type')
if type_name:
type_element = self._PGenr.ElementDict.get(type_name)
if type_element and type_element.isAbstract():
abstract_child = True
if not delayed and not child in self._PGenr.DelayedElements:
self._PGenr.DelayedElements.append(child)
self._PGenr.DelayedElements_subclass.append(child)
wrt(" %s nodeName_ == '%s':\n" % (keyword, origName, ))
if abstract_child:
wrt(TEMPLATE_ABSTRACT_CHILD % (mappedName, ))
else:
type_obj = self._PGenr.ElementDict.get(childType)
if type_obj is not None and type_obj.getExtended():
wrt(" class_obj_ = self.get_class_obj_(child_, %s%s)\n" % (
prefix, self._PGenr.cleanupName(self._PGenr.mapName(childType)), ))
wrt(" class_obj_ = %s%s.factory()\n")
else:
wrt(" obj_ = %s%s.factory()\n" % (
prefix, self._PGenr.cleanupName(self._PGenr.mapName(childType))))
wrt(" obj_.build(child_)\n")
wrt(" obj_ = self.mixedclass_(MixedContainer.CategoryComplex,\n")
wrt(" MixedContainer.TypeNone, '%s', obj_)\n" % \
origName)
wrt(" self.content_.append(obj_)\n")
# Generate code to sort mixed content in their class
# containers
s1 = " if hasattr(self, 'add_%s'):\n" % (origName, )
s1 +=" self.add_%s(obj_.value)\n" % (origName, )
s1 +=" elif hasattr(self, 'set_%s'):\n" % (origName, )
s1 +=" self.set_%s(obj_.value)\n" % (origName, )
wrt(s1)
def _generateBuildStandard_1_ForType(self, wrt, prefix, child, headChild, keyword, delayed):
origName = child.getName()
name = self._PGenr.cleanupName(child.getName())
mappedName = self._PGenr.mapName(name)
headName = self._PGenr.cleanupName(headChild.getName())
childType = child.getType()
attrCount = len(child.getAttributeDefs())
if (attrCount == 0 and
((childType in self._PGenr.StringType or
childType == self._PGenr.TokenType or
childType == self._PGenr.DateTimeType or
childType == self._PGenr.TimeType or
childType == self._PGenr.DateType or
child.isListType()
))
):
wrt(" %s nodeName_ == '%s':\n" % (keyword, origName, ))
wrt(" %s_ = child_.text\n" % name)
if childType == self._PGenr.TokenType:
wrt(' %s_ = re_.sub(String_cleanup_pat_, " ", %s_).strip()\n' %(name, name))
if child.isListType():
if childType in self._PGenr.IntegerType or \
childType == self._PGenr.PositiveIntegerType or \
childType == self._PGenr.NonPositiveIntegerType or \
childType == self._PGenr.NegativeIntegerType or \
childType == self._PGenr.NonNegativeIntegerType:
wrt(" %s_ = self.gds_validate_integer_list(%s_, node, '%s')\n" % (
name, name, name, ))
elif childType == self._PGenr.BooleanType:
wrt(" %s_ = self.gds_validate_boolean_list(%s_, node, '%s')\n" % (
name, name, name, ))
elif childType == self._PGenr.FloatType or \
childType == self._PGenr.DecimalType:
wrt(" %s_ = self.gds_validate_float_list(%s_, node, '%s')\n" % (
name, name, name, ))
elif childType == self._PGenr.DoubleType:
wrt(" %s_ = self.gds_validate_double_list(%s_, node, '%s')\n" % (
name, name, name, ))
else:
wrt(" %s_ = self.gds_validate_string(%s_, node, '%s')\n" % (
name, name, name, ))
if child.getMaxOccurs() > 1:
wrt(" self.%s.append(%s_)\n" % (mappedName, name, ))
else:
wrt(" self.%s = %s_\n" % (mappedName, name, ))
elif childType in self._PGenr.IntegerType or \
childType == self._PGenr.PositiveIntegerType or \
childType == self._PGenr.NonPositiveIntegerType or \
childType == self._PGenr.NegativeIntegerType or \
childType == self._PGenr.NonNegativeIntegerType:
wrt(" %s nodeName_ == '%s':\n" % (keyword, origName, ))
wrt(" sval_ = child_.text\n")
wrt(" try:\n")
wrt(" ival_ = int(sval_)\n")
wrt(" except (TypeError, ValueError), exp:\n")
wrt(" raise_parse_error(child_, 'requires integer: %s' % exp)\n")
if childType == self._PGenr.PositiveIntegerType:
wrt(" if ival_ <= 0:\n")
wrt(" raise_parse_error(child_, 'requires positiveInteger')\n")
elif childType == self._PGenr.NonPositiveIntegerType:
wrt(" if ival_ > 0:\n")
wrt(" raise_parse_error(child_, 'requires nonPositiveInteger')\n")
elif childType == self._PGenr.NegativeIntegerType:
wrt(" if ival_ >= 0:\n")
wrt(" raise_parse_error(child_, 'requires negativeInteger')\n")
elif childType == self._PGenr.NonNegativeIntegerType:
wrt(" if ival_ < 0:\n")
wrt(" raise_parse_error(child_, 'requires nonNegativeInteger')\n")
wrt(" ival_ = self.gds_validate_integer(ival_, node, '%s')\n" % (
name, ))
if child.getMaxOccurs() > 1:
wrt(" self.%s.append(ival_)\n" % (mappedName, ))
else:
wrt(" self.%s = ival_\n" % (mappedName, ))
elif childType == self._PGenr.BooleanType:
wrt(" %s nodeName_ == '%s':\n" % (keyword, origName, ))
wrt(" sval_ = child_.text\n")
wrt(" if sval_ in ('true', '1'):\n")
wrt(" ival_ = True\n")
wrt(" elif sval_ in ('false', '0'):\n")
wrt(" ival_ = False\n")
wrt(" else:\n")
wrt(" raise_parse_error(child_, 'requires boolean')\n")
wrt(" ival_ = self.gds_validate_boolean(ival_, node, '%s')\n" % (
name, ))
if child.getMaxOccurs() > 1:
wrt(" self.%s.append(ival_)\n" % (mappedName, ))
else:
wrt(" self.%s = ival_\n" % (mappedName, ))
elif childType == self._PGenr.FloatType or \
childType == self._PGenr.DoubleType or \
childType == self._PGenr.DecimalType:
wrt(" %s nodeName_ == '%s':\n" % (keyword, origName, ))
wrt(" sval_ = child_.text\n")
wrt(" try:\n")
wrt(" fval_ = float(sval_)\n")
wrt(" except (TypeError, ValueError), exp:\n")
wrt(" raise_parse_error(child_, 'requires float or double: %s' % exp)\n")
wrt(" fval_ = self.gds_validate_float(fval_, node, '%s')\n" % (
name, ))
if child.getMaxOccurs() > 1:
wrt(" self.%s.append(fval_)\n" % (mappedName, ))
else:
wrt(" self.%s = fval_\n" % (mappedName, ))
else:
# Perhaps it's a complexType that is defined right here.
# Generate (later) a class for the nested types.
# fix_abstract
type_element = None
abstract_child = False
type_name = child.getAttrs().get('type')
if type_name:
type_element = self._PGenr.ElementDict.get(type_name)
if type_element and type_element.isAbstract():
abstract_child = True
if not delayed and not child in self._PGenr.DelayedElements:
self._PGenr.DelayedElements.append(child)
self._PGenr.DelayedElements_subclass.append(child)
wrt(" %s nodeName_ == '%s':\n" % (keyword, origName, ))
# Is this a simple type?
base = child.getBase()
if child.getSimpleType():
wrt(" obj_ = None\n")
else:
# name_type_problem
# fix_abstract
if type_element:
type_name = type_element.getType()
elif origName in self._PGenr.ElementDict:
type_name = self._PGenr.ElementDict[origName].getType()
else:
type_name = childType
type_name = self._PGenr.cleanupName(self._PGenr.mapName(type_name))
if abstract_child:
wrt(TEMPLATE_ABSTRACT_CHILD % (mappedName, ))
else:
type_obj = self._PGenr.ElementDict.get(type_name)
if type_obj is not None and type_obj.getExtended():
wrt(" class_obj_ = self.get_class_obj_(child_, %s%s)\n" % (
prefix, type_name, ))
wrt(" obj_ = class_obj_.factory()\n")
else:
wrt(" obj_ = %s%s.factory()\n" % (
prefix, type_name, ))
wrt(" obj_.build(child_)\n")
if headChild.getMaxOccurs() > 1:
substitutionGroup = child.getAttrs().get('substitutionGroup')
if substitutionGroup is not None:
name = substitutionGroup
else:
name = mappedName
s1 = " self.%s.append(obj_)\n" % (name, )
else:
substitutionGroup = child.getAttrs().get('substitutionGroup')
if substitutionGroup is not None:
name = substitutionGroup
else:
name = headName
s1 = " self.set%s(obj_)\n" % (self._PGenr.make_gs_name(name), )
wrt(s1)
def _generateBuildValidator(self, wrt, child):
typeName = None
childType = child.getType()
if child.getSimpleType():
#typeName = child.getSimpleType()
typeName = self._PGenr.cleanupName(child.getName())
elif (childType in self._PGenr.ElementDict and
self._PGenr.ElementDict[childType].getSimpleType()):
typeName = self._PGenr.ElementDict[childType].getType()
# fixlist
mappedName = self._PGenr.mapName(child.getName())
cleanupName = self._PGenr.cleanupName(mappedName)
if (child.getSimpleType() in self._PGenr.SimpleTypeDict and
self._PGenr.SimpleTypeDict[child.getSimpleType()].isListType()):
wrt(" self.%s = self.%s.split()\n" % (
cleanupName, cleanupName, ))
typeName = child.getSimpleType()
if typeName and typeName in self._PGenr.SimpleTypeDict:
wrt(" self.validate_%s(self.%s) # validate type %s\n" % (
typeName, cleanupName, typeName, ))
def _generateBuildAnyType(self, wrt, element, any_type_child):
hasChildren = 0
if any_type_child is not None:
type_name = element.getType()
if any_type_child.getMaxOccurs() > 1:
if keyword == 'if':
fill = ''
else:
fill = ' '
wrt(" else:\n")
wrt(" %sobj_ = self.gds_build_any(child_, '%s')\n" % (
fill, type_name, ))
wrt(" %sif obj_ is not None:\n" % (fill, ))
wrt(' %sself.add_anytypeobjs_(obj_)\n' % (fill, ))
else:
if keyword == 'if':
fill = ''
else:
fill = ' '
wrt(" else:\n")
wrt(" %sobj_ = self.gds_build_any(child_, '%s')\n" % (
fill, type_name, ))
wrt(" %sif obj_ is not None:\n" % (fill, ))
wrt(' %sself.set_anytypeobjs_(obj_)\n' % (fill, ))
hasChildren += 1
return hasChildren
def generateMain(self, outfile, prefix, root):
name = self._PGenr.RootElement or root.getChildren()[0].getName()
elType = self._PGenr.cleanupName(root.getChildren()[0].getType())
if self._PGenr.RootElement:
rootElement = self._PGenr.RootElement
else:
rootElement = elType
params = {
'prefix': prefix,
'cap_name': self._PGenr.cleanupName(self._PGenr.make_gs_name(name)),
'name': name,
'cleanname': self._PGenr.cleanupName(name),
'module_name': os.path.splitext(os.path.basename(outfile.name))[0],
'root': rootElement,
'namespacedef': self._PGenr.Namespacedef,
}
s1 = self._PGenr.TEMPLATE_MAIN % params
outfile.write(s1)
class CppGenerator(object):
def __init__(self, parser_generator):
self._PGenr = parser_generator
def generateHeader(self, wrt, prefix):
pass
def generateClassDefLine(self, wrt, parentName, prefix, name):
self._Prefix = prefix
if parentName:
s1 = 'class %s%s: public %s {\n' % (prefix, name, parentName,)
self._ParentName = parentName
else:
s1 = 'class %s%s: public GeneratedsSuper {\n' % (prefix, name)
self._ParentName = 'GeneratedsSuper'
wrt(s1)
def generateElemDoc(self, wrt, element):
pass
def generateSubSuperInit(self, wrt, superclass_name):
pass
def generateCtor(self, wrt, element):
elName = element.getCleanName()
childCount = self._PGenr.countChildren(element, 0)
#import pdb; pdb.set_trace()
(s2, s3, s4) = self.buildCtorArgs_multilevel(element, childCount)
wrt('public:\n')
wrt(' %s%s(%s):\n' % (self._Prefix, elName, s2))
wrt(' %s' % (s3))
wrt(' {\n')
wrt(' }\n')
return (s4)
#wrt('private:\n')
#wrt('%s\n' % (s4))
def buildCtorArgs_multilevel(self, element, childCount):
content = []
content_s = []
content_d = []
addedArgs = {}
add = content.append
add_s = content_s.append
add_d = content_d.append
self.buildCtorArgs_multilevel_aux(addedArgs, add, add_s, add_d, element)
#CHKeltype = element.getType()
#CHK if (element.getSimpleContent() or
#CHK element.isMixed() or
#CHK eltype in SimpleTypeDict or
#CHK CurrentNamespacePrefix + eltype in OtherSimpleTypes
#CHK ):
#CHK add(", valueOf_=None")
#CHK if element.isMixed():
#CHK add(', mixedclass_=None')
#CHK add(', content_=None')
#CHK if element.getExtended():
#CHK add(', extensiontype_=None')
s2 = ''.join(content)
s3 = ''.join(content_s) # Ctor Assign
s4 = ''.join(content_d) # Ctor Declare
return (s2,s3,s4)
def buildCtorArgs_multilevel_aux(self, addedArgs, add, add_s, add_d, element):
parentName, parentObj = self._PGenr.getParentName(element)
if parentName:
self.buildCtorArgs_multilevel_aux(addedArgs, add, add_s, add_d, parentObj)
self.buildCtorArgs_aux(addedArgs, add, add_s, add_d, element)
def buildCtorArgs_aux(self, addedArgs, add, add_s, add_d, element):
attrDefs = element.getAttributeDefs()
for key in attrDefs:
attrDef = attrDefs[key]
name = attrDef.getName()
default = attrDef.getDefault()
mappedName = name.replace(':', '_')
mappedName = self._PGenr.cleanupName(mapName(mappedName))
if mappedName in addedArgs:
continue
addedArgs[mappedName] = 1
try:
atype = attrDef.getData_type()
except KeyError:
atype = StringType
mappedType = SchemaToCppTypeMap.get(atype)
if atype in StringType or \
atype == TokenType or \
atype == DateTimeType or \
atype == TimeType or \
atype == DateType:
if default is None:
add("%s %s, " % (mappedType, mappedName))
else:
default1 = escape_string(default)
add("%s %s='%s', " % (mappedType, mappedName, default1))
elif atype in IntegerType:
if default is None:
add('%s %s, ' % (mappedType, mappedName))
else:
add('%s %s=%s, ' % (mappedType, mappedName, default))
elif atype == PositiveIntegerType:
if default is None:
add('%s %s, ' % (mappedType, mappedName))
else:
add('%s %s=%s, ' % (mappedType, mappedName, default))
elif atype == NonPositiveIntegerType:
if default is None:
add('%s %s, ' % (mappedType,mappedName))
else:
add('%s %s=%s, ' % (mappedType, mappedName, default))
elif atype == NegativeIntegerType:
if default is None:
add('%s %s, ' % (mappedType, mappedName))
else:
add('%s %s=%s, ' % (mappedType, mappedName, default))
elif atype == NonNegativeIntegerType:
if default is None:
add('%s %s, ' % (mappedType, mappedName))
else:
add('%s %s=%s, ' % (mappedType, mappedName, default))
elif atype == BooleanType:
if default is None:
add('%s %s, ' % (mappedType, mappedName))
else:
if default in ('false', '0'):
add('%s %s=%s, ' % (mappedType, mappedName, "false"))
else:
add('%s %s=%s, ' % (mappedType, mappedName, "true"))
elif atype == FloatType or atype == DoubleType or atype == DecimalType:
if default is None:
add('%s %s, ' % (mappedType, mappedName))
else:
add('%s %s=%s, ' % (mappedType, mappedName, default))
else:
if default is None:
add('%s %s, ' % (mappedType, mappedName))
else:
add("%s %s='%s', " % (mappedType, mappedName, default, ))
nestedElements = 0
firstChild = 0;
for child in element.getChildren():
cleanName = child.getCleanName()
firstChild = firstChild + 1;
if cleanName in addedArgs:
continue
addedArgs[cleanName] = 1
default = child.getDefault()
nestedElements = 1
if firstChild > 1:
add(", ")
add_s(", ")
if child.getType() == self._PGenr.AnyTypeIdentifier:
add('anytypeobjs_=NULL, ')
elif child.getMaxOccurs() > 1:
add(", %s %s" % (mappedType, cleanName))
else:
childType = child.getType()
mappedType = self._PGenr.SchemaToCppTypeMap.get(childType)
if childType in self._PGenr.StringType or \
childType == self._PGenr.TokenType or \
childType == self._PGenr.DateTimeType or \
childType == self._PGenr.TimeType or \
childType == self._PGenr.DateType:
if default is None:
add("%s %s" % (mappedType, cleanName))
add_s("%s_(%s)" % (cleanName, cleanName))
add_d(" %s %s_;\n" % (mappedType, cleanName))
else:
default1 = escape_string(default)
add("%s %s='%s'" % (mappedType, cleanName, default1, ))
add_s("%s_(%s)" % (cleanName_, cleanName))
add_d(" %s %s_;\n" % (mappedType, cleanName))
elif (childType in self._PGenr.IntegerType or
childType == self._PGenr.PositiveIntegerType or
childType == self._PGenr.NonPositiveIntegerType or
childType == self._PGenr.NegativeIntegerType or
childType == self._PGenr.NonNegativeIntegerType
):
if default is None:
add('%s %s' % (mappedType, cleanName))
add_s('%s_(%s)' % (cleanName, cleanName))
add_d(' %s %s_;\n' % (mappedType, cleanName))
else:
add('%s %s=%s' % (mappedType, cleanName, default, ))
add_s('%s_(%s)' % (cleanName, cleanName))
add_d(' %s %s_;\n' % (mappedType, cleanName))
## elif childType in IntegerType:
## if default is None:
## add(', %s=-1' % cleanName)
## else:
## add(', %s=%s' % (cleanName, default, ))
## elif childType == PositiveIntegerType:
## if default is None:
## add(', %s=1' % cleanName)
## else:
## add(', %s=%s' % (cleanName, default, ))
## elif childType == NonPositiveIntegerType:
## if default is None:
## add(', %s=0' % cleanName)
## else:
## add(', %s=%s' % (cleanName, default, ))
## elif childType == NegativeIntegerType:
## if default is None:
## add(', %s=-1' % cleanName)
## else:
## add(', %s=%s' % (cleanName, default, ))
## elif childType == NonNegativeIntegerType:
## if default is None:
## add(', %s=0' % cleanName)
## else:
## add(', %s=%s' % (cleanName, default, ))
elif childType == self._PGenr.BooleanType:
if default is None:
add('%s %s' % (mappedType, cleanName))
add_s('%s_(%s)' % (cleanName, cleanName))
add_d(' %s %s_;\n' % (mappedType, cleanName))
else:
if default in ('false', '0'):
add('%s %s=%s' % (mappedType, cleanName, "false", ))
add_s("%s_(%s)" % (cleanName_, cleanName))
add_d(' %s %s_;\n' % (mappedType, cleanName))
else:
add('%s %s=%s' % (mappedType, cleanName, "true", ))
add_s("%s_(%s)" % (cleanName_, cleanName))
add_d(' %s %s_;\n' % (mappedType, cleanName))
elif childType == self._PGenr.FloatType or \
childType == self._PGenr.DoubleType or \
childType == self._PGenr.DecimalType:
if default is None:
add('%s %s' % (mappedType, cleanName))
add_s("%s_(%s)" % (cleanName_, cleanName))
add_d(' %s %s_;\n' % (mappedType, cleanName))
else:
add('%s %s;\n' % (mappedType, cleanName))
add_s("%s_(%s)" % (cleanName_, cleanName))
add_d(' %s %s_;\n' % (mappedType, cleanName))
else:
add('%s %s' % (childType, cleanName))
add_s("%s_(%s)" % (cleanName, cleanName))
add_d(' %s %s_;\n' % (childType, cleanName))
# end buildCtorArgs_aux
def generateFactory(self, wrt, prefix, name):
pass
def generateGetter(self, wrt, capName, name, childType):
mappedType = self._PGenr.SchemaToCppTypeMap.get(childType)
if mappedType == None:
mappedType = childType
wrt(' %s get_%s() { return %s_; }\n' % (mappedType, name, name))
pass
def generateSetter(self, wrt, capName, name, childType):
mappedType = self._PGenr.SchemaToCppTypeMap.get(childType)
if mappedType == None:
mappedType = childType
wrt(' void set_%s(%s %s) { %s_ = %s; }\n\n' %
(name, mappedType, name, name, name))
pass
def generateExport(self, wrt, namespace, element):
pass
def generateExportAttributesFn(self, wrt, namespace, element):
pass
def generateExportChildrenFn(self, wrt, namespace, element):
pass
def generateBuild2_ReadNode(self, wrt, element):
wrt(' void build(pugi::xml_document *doc) {\n')
for child in element.getChildren():
cleanName = child.getCleanName()
wrt(' std::string str_%s = doc->ReadNode(%s); \n' % (cleanName, cleanName))
childType = child.getType()
mappedType = self._PGenr.SchemaToCppTypeMap.get(childType)
if mappedType == 'int':
wrt(' %s_ = atoi(%s); \n\n' %(cleanName, cleanName))
elif mappedType == 'string':
wrt(' %s_ = %s; \n\n' %(cleanName, cleanName))
wrt(' }\n\n')
pass
def generateBuild(self, wrt, element):
#traversal based build element = subnet_type, iq
wrt('\n\n')
wrt(' void build(pugi::xml_document *doc) {\n')
wrt(' struct walker: pugi::xml_tree_walker { \n')
wrt(' virtual bool for_each(pugi::xml_node& node) { \n')
wrt(' buildChildren(node);\n')
wrt(' return true;\n')
wrt(' }\n')
wrt(' };\n\n')
wrt(' struct walker walker_;\n')
wrt(' doc.traverse(walker_);\n')
wrt(' }\n')
pass
def generateBuildAttributesFn(self, wrt, element):
wrt('\n\n')
wrt(' void buildAttributes(pugi::xml_node node) {\n')
wrt(' pugi::xml_attribute att = node.first_attribute(); \n')
wrt(' while (att != NULL) { \n')
attrDefs = element.getAttributeDefs()
for key in attrDefs:
attrDef = attrDefs[key]
name = attrDef.getName()
cleanName = self._PGenr.cleanupName(name)
atype = attrDef.getType()
mappedType = self._PGenr.SchemaToCppTypeMap.get(childType)
wrt(' if (strcmp(att.name(), "%s") == 0) { \n' % (cleanName) )
if mappedType == 'int':
wrt(' %s_ = atoi(%s); \n' %(cleanName, cleanName))
elif mappedType == 'string':
wrt(' %s_ = %s; \n' %(cleanName, cleanName))
wrt(' }\n')
wrt(' att = att.next_attribute(); \n')
wrt(' }\n')
wrt(' }\n')
pass
def generateBuildChildren(self, wrt, element, prefix, delayed):
wrt('\n\n')
wrt(' void buildChildren(pugi::xml_node node) {\n')
for child in element.getChildren():
cleanName = child.getCleanName()
childType = child.getType()
mappedType = self._PGenr.SchemaToCppTypeMap.get(childType)
wrt(' if (strcmp(node.name(), "%s") == 0) { \n' % (cleanName) )
if mappedType == 'int':
wrt(' %s_ = atoi(%s); \n' %(cleanName, cleanName))
elif mappedType == 'string':
wrt(' %s_ = %s; \n' %(cleanName, cleanName))
wrt(' }\n')
wrt(' buildAttributes(node); \n')
wrt(' }\n')
wrt(' }\n')
pass
def generateEnd(self, wrt, name, s4):
wrt('private:\n')
wrt('%s' % (s4))
wrt('} // end of class %s' % (name))
wrt('\n\n\n')
pass
def generateMain(self, outfile, prefix, root):
pass
| mit |
minhtuancn/odoo | addons/account/wizard/account_move_line_reconcile_select.py | 385 | 2362 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_move_line_reconcile_select(osv.osv_memory):
_name = "account.move.line.reconcile.select"
_description = "Move line reconcile select"
_columns = {
'account_id': fields.many2one('account.account', 'Account', \
domain = [('reconcile', '=', 1)], required=True),
}
def action_open_window(self, cr, uid, ids, context=None):
"""
This function Open account move line window for reconcile on given account id
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: account move line reconcile select’s ID or list of IDs
@return: dictionary of Open account move line window for reconcile on given account id
"""
data = self.read(cr, uid, ids, context=context)[0]
return {
'domain': "[('account_id','=',%d),('reconcile_id','=',False),('state','<>','draft')]" % data['account_id'],
'name': _('Reconciliation'),
'view_type': 'form',
'view_mode': 'tree,form',
'view_id': False,
'res_model': 'account.move.line',
'type': 'ir.actions.act_window'
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
caplio/sc02c | tools/perf/scripts/python/syscall-counts-by-pid.py | 944 | 1744 | # system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
usage = "perf trace -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
pass
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38d %10d\n" % (id, val),
| gpl-2.0 |
aliutkus/commonfate | examples/cfm_decompose.py | 1 | 1202 | import os
import numpy as np
import soundfile as sf
import argparse
from commonfate import decompose
def export(input, input_file, output_path, samplerate):
if not os.path.exists(output_path):
os.makedirs(output_path)
basepath = os.path.join(
output_path, os.path.splitext(os.path.basename(input_file))[0]
)
# Write out all components
for i in range(input.shape[0]):
sf.write(
basepath + "_cpnt-" + str(i) + ".wav",
input[i],
samplerate
)
out_sum = np.sum(input, axis=0)
sf.write(basepath + '_reconstruction.wav', out_sum, samplerate)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Source Separation based on Common Fate Model')
parser.add_argument('input', type=str, help='Input Audio File')
args = parser.parse_args()
filename = args.input
# loading signal
(audio, fs) = sf.read(filename, always_2d=True)
out = decompose.process(
audio,
nb_iter=10,
nb_components=2,
n_fft=1024,
n_hop=256,
cft_patch=(32, 48),
cft_hop=(16, 24)
)
export(out, filename, 'output', fs)
| bsd-3-clause |
frouty/odoogoeen | addons/point_of_sale/wizard/pos_payment_report.py | 55 | 1808 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class pos_payment_report(osv.osv_memory):
_name = 'pos.payment.report'
_description = 'Payment Report'
def print_report(self, cr, uid, ids, context=None):
"""
To get the date and print the report
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return : retrun report
"""
if context is None:
context = {}
datas = {'ids': context.get('active_ids', [])}
return {
'type': 'ir.actions.report.xml',
'report_name': 'pos.payment.report',
'datas': datas,
}
pos_payment_report()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ntonjeta/iidea-Docker | examples/sobel/src/boost_1_63_0/tools/build/test/build_file.py | 44 | 5117 | #!/usr/bin/python
# Copyright (C) 2006. Vladimir Prus
# Copyright (C) 2008. Jurko Gospodnetic
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# Tests that we explicitly request a file (not target) to be built by
# specifying its name on the command line.
import BoostBuild
###############################################################################
#
# test_building_file_from_specific_project()
# ------------------------------------------
#
###############################################################################
def test_building_file_from_specific_project():
t = BoostBuild.Tester(use_test_config=False)
t.write("jamroot.jam", """\
exe hello : hello.cpp ;
exe hello2 : hello.cpp ;
build-project sub ;
""")
t.write("hello.cpp", "int main() {}\n")
t.write("sub/jamfile.jam", """
exe hello : hello.cpp ;
exe hello2 : hello.cpp ;
exe sub : hello.cpp ;
""")
t.write("sub/hello.cpp", "int main() {}\n")
t.run_build_system(["sub", t.adjust_suffix("hello.obj")])
t.expect_output_lines("*depends on itself*", False)
t.expect_addition("sub/bin/$toolset/debug/hello.obj")
t.expect_nothing_more()
t.cleanup()
###############################################################################
#
# test_building_file_from_specific_target()
# -----------------------------------------
#
###############################################################################
def test_building_file_from_specific_target():
t = BoostBuild.Tester(use_test_config=False)
t.write("jamroot.jam", """\
exe hello1 : hello1.cpp ;
exe hello2 : hello2.cpp ;
exe hello3 : hello3.cpp ;
""")
t.write("hello1.cpp", "int main() {}\n")
t.write("hello2.cpp", "int main() {}\n")
t.write("hello3.cpp", "int main() {}\n")
t.run_build_system(["hello1", t.adjust_suffix("hello1.obj")])
t.expect_addition("bin/$toolset/debug/hello1.obj")
t.expect_nothing_more()
t.cleanup()
###############################################################################
#
# test_building_missing_file_from_specific_target()
# -------------------------------------------------
#
###############################################################################
def test_building_missing_file_from_specific_target():
t = BoostBuild.Tester(use_test_config=False)
t.write("jamroot.jam", """\
exe hello1 : hello1.cpp ;
exe hello2 : hello2.cpp ;
exe hello3 : hello3.cpp ;
""")
t.write("hello1.cpp", "int main() {}\n")
t.write("hello2.cpp", "int main() {}\n")
t.write("hello3.cpp", "int main() {}\n")
obj = t.adjust_suffix("hello2.obj")
t.run_build_system(["hello1", obj], status=1)
t.expect_output_lines("don't know how to make*" + obj)
t.expect_nothing_more()
t.cleanup()
###############################################################################
#
# test_building_multiple_files_with_different_names()
# ---------------------------------------------------
#
###############################################################################
def test_building_multiple_files_with_different_names():
t = BoostBuild.Tester(use_test_config=False)
t.write("jamroot.jam", """\
exe hello1 : hello1.cpp ;
exe hello2 : hello2.cpp ;
exe hello3 : hello3.cpp ;
""")
t.write("hello1.cpp", "int main() {}\n")
t.write("hello2.cpp", "int main() {}\n")
t.write("hello3.cpp", "int main() {}\n")
t.run_build_system([t.adjust_suffix("hello1.obj"), t.adjust_suffix(
"hello2.obj")])
t.expect_addition("bin/$toolset/debug/hello1.obj")
t.expect_addition("bin/$toolset/debug/hello2.obj")
t.expect_nothing_more()
t.cleanup()
###############################################################################
#
# test_building_multiple_files_with_the_same_name()
# -------------------------------------------------
#
###############################################################################
def test_building_multiple_files_with_the_same_name():
t = BoostBuild.Tester(use_test_config=False)
t.write("jamroot.jam", """\
exe hello : hello.cpp ;
exe hello2 : hello.cpp ;
build-project sub ;
""")
t.write("hello.cpp", "int main() {}\n")
t.write("sub/jamfile.jam", """
exe hello : hello.cpp ;
exe hello2 : hello.cpp ;
exe sub : hello.cpp ;
""")
t.write("sub/hello.cpp", "int main() {}\n")
t.run_build_system([t.adjust_suffix("hello.obj")])
t.expect_output_lines("*depends on itself*", False)
t.expect_addition("bin/$toolset/debug/hello.obj")
t.expect_addition("sub/bin/$toolset/debug/hello.obj")
t.expect_nothing_more()
t.cleanup()
###############################################################################
#
# main()
# ------
#
###############################################################################
test_building_file_from_specific_project()
test_building_file_from_specific_target()
test_building_missing_file_from_specific_target()
test_building_multiple_files_with_different_names()
test_building_multiple_files_with_the_same_name()
| agpl-3.0 |
flwh/KK_mt6589_iq451 | prebuilts/python/darwin-x86/2.7.5/lib/python2.7/test/test_richcmp.py | 129 | 11466 | # Tests for rich comparisons
import unittest
from test import test_support
import operator
class Number:
def __init__(self, x):
self.x = x
def __lt__(self, other):
return self.x < other
def __le__(self, other):
return self.x <= other
def __eq__(self, other):
return self.x == other
def __ne__(self, other):
return self.x != other
def __gt__(self, other):
return self.x > other
def __ge__(self, other):
return self.x >= other
def __cmp__(self, other):
raise test_support.TestFailed, "Number.__cmp__() should not be called"
def __repr__(self):
return "Number(%r)" % (self.x, )
class Vector:
def __init__(self, data):
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, i):
return self.data[i]
def __setitem__(self, i, v):
self.data[i] = v
__hash__ = None # Vectors cannot be hashed
def __nonzero__(self):
raise TypeError, "Vectors cannot be used in Boolean contexts"
def __cmp__(self, other):
raise test_support.TestFailed, "Vector.__cmp__() should not be called"
def __repr__(self):
return "Vector(%r)" % (self.data, )
def __lt__(self, other):
return Vector([a < b for a, b in zip(self.data, self.__cast(other))])
def __le__(self, other):
return Vector([a <= b for a, b in zip(self.data, self.__cast(other))])
def __eq__(self, other):
return Vector([a == b for a, b in zip(self.data, self.__cast(other))])
def __ne__(self, other):
return Vector([a != b for a, b in zip(self.data, self.__cast(other))])
def __gt__(self, other):
return Vector([a > b for a, b in zip(self.data, self.__cast(other))])
def __ge__(self, other):
return Vector([a >= b for a, b in zip(self.data, self.__cast(other))])
def __cast(self, other):
if isinstance(other, Vector):
other = other.data
if len(self.data) != len(other):
raise ValueError, "Cannot compare vectors of different length"
return other
opmap = {
"lt": (lambda a,b: a< b, operator.lt, operator.__lt__),
"le": (lambda a,b: a<=b, operator.le, operator.__le__),
"eq": (lambda a,b: a==b, operator.eq, operator.__eq__),
"ne": (lambda a,b: a!=b, operator.ne, operator.__ne__),
"gt": (lambda a,b: a> b, operator.gt, operator.__gt__),
"ge": (lambda a,b: a>=b, operator.ge, operator.__ge__)
}
class VectorTest(unittest.TestCase):
def checkfail(self, error, opname, *args):
for op in opmap[opname]:
self.assertRaises(error, op, *args)
def checkequal(self, opname, a, b, expres):
for op in opmap[opname]:
realres = op(a, b)
# can't use assertEqual(realres, expres) here
self.assertEqual(len(realres), len(expres))
for i in xrange(len(realres)):
# results are bool, so we can use "is" here
self.assertTrue(realres[i] is expres[i])
def test_mixed(self):
# check that comparisons involving Vector objects
# which return rich results (i.e. Vectors with itemwise
# comparison results) work
a = Vector(range(2))
b = Vector(range(3))
# all comparisons should fail for different length
for opname in opmap:
self.checkfail(ValueError, opname, a, b)
a = range(5)
b = 5 * [2]
# try mixed arguments (but not (a, b) as that won't return a bool vector)
args = [(a, Vector(b)), (Vector(a), b), (Vector(a), Vector(b))]
for (a, b) in args:
self.checkequal("lt", a, b, [True, True, False, False, False])
self.checkequal("le", a, b, [True, True, True, False, False])
self.checkequal("eq", a, b, [False, False, True, False, False])
self.checkequal("ne", a, b, [True, True, False, True, True ])
self.checkequal("gt", a, b, [False, False, False, True, True ])
self.checkequal("ge", a, b, [False, False, True, True, True ])
for ops in opmap.itervalues():
for op in ops:
# calls __nonzero__, which should fail
self.assertRaises(TypeError, bool, op(a, b))
class NumberTest(unittest.TestCase):
def test_basic(self):
# Check that comparisons involving Number objects
# give the same results give as comparing the
# corresponding ints
for a in xrange(3):
for b in xrange(3):
for typea in (int, Number):
for typeb in (int, Number):
if typea==typeb==int:
continue # the combination int, int is useless
ta = typea(a)
tb = typeb(b)
for ops in opmap.itervalues():
for op in ops:
realoutcome = op(a, b)
testoutcome = op(ta, tb)
self.assertEqual(realoutcome, testoutcome)
def checkvalue(self, opname, a, b, expres):
for typea in (int, Number):
for typeb in (int, Number):
ta = typea(a)
tb = typeb(b)
for op in opmap[opname]:
realres = op(ta, tb)
realres = getattr(realres, "x", realres)
self.assertTrue(realres is expres)
def test_values(self):
# check all operators and all comparison results
self.checkvalue("lt", 0, 0, False)
self.checkvalue("le", 0, 0, True )
self.checkvalue("eq", 0, 0, True )
self.checkvalue("ne", 0, 0, False)
self.checkvalue("gt", 0, 0, False)
self.checkvalue("ge", 0, 0, True )
self.checkvalue("lt", 0, 1, True )
self.checkvalue("le", 0, 1, True )
self.checkvalue("eq", 0, 1, False)
self.checkvalue("ne", 0, 1, True )
self.checkvalue("gt", 0, 1, False)
self.checkvalue("ge", 0, 1, False)
self.checkvalue("lt", 1, 0, False)
self.checkvalue("le", 1, 0, False)
self.checkvalue("eq", 1, 0, False)
self.checkvalue("ne", 1, 0, True )
self.checkvalue("gt", 1, 0, True )
self.checkvalue("ge", 1, 0, True )
class MiscTest(unittest.TestCase):
def test_misbehavin(self):
class Misb:
def __lt__(self_, other): return 0
def __gt__(self_, other): return 0
def __eq__(self_, other): return 0
def __le__(self_, other): self.fail("This shouldn't happen")
def __ge__(self_, other): self.fail("This shouldn't happen")
def __ne__(self_, other): self.fail("This shouldn't happen")
def __cmp__(self_, other): raise RuntimeError, "expected"
a = Misb()
b = Misb()
self.assertEqual(a<b, 0)
self.assertEqual(a==b, 0)
self.assertEqual(a>b, 0)
self.assertRaises(RuntimeError, cmp, a, b)
def test_not(self):
# Check that exceptions in __nonzero__ are properly
# propagated by the not operator
import operator
class Exc(Exception):
pass
class Bad:
def __nonzero__(self):
raise Exc
def do(bad):
not bad
for func in (do, operator.not_):
self.assertRaises(Exc, func, Bad())
def test_recursion(self):
# Check that comparison for recursive objects fails gracefully
from UserList import UserList
a = UserList()
b = UserList()
a.append(b)
b.append(a)
self.assertRaises(RuntimeError, operator.eq, a, b)
self.assertRaises(RuntimeError, operator.ne, a, b)
self.assertRaises(RuntimeError, operator.lt, a, b)
self.assertRaises(RuntimeError, operator.le, a, b)
self.assertRaises(RuntimeError, operator.gt, a, b)
self.assertRaises(RuntimeError, operator.ge, a, b)
b.append(17)
# Even recursive lists of different lengths are different,
# but they cannot be ordered
self.assertTrue(not (a == b))
self.assertTrue(a != b)
self.assertRaises(RuntimeError, operator.lt, a, b)
self.assertRaises(RuntimeError, operator.le, a, b)
self.assertRaises(RuntimeError, operator.gt, a, b)
self.assertRaises(RuntimeError, operator.ge, a, b)
a.append(17)
self.assertRaises(RuntimeError, operator.eq, a, b)
self.assertRaises(RuntimeError, operator.ne, a, b)
a.insert(0, 11)
b.insert(0, 12)
self.assertTrue(not (a == b))
self.assertTrue(a != b)
self.assertTrue(a < b)
class DictTest(unittest.TestCase):
def test_dicts(self):
# Verify that __eq__ and __ne__ work for dicts even if the keys and
# values don't support anything other than __eq__ and __ne__ (and
# __hash__). Complex numbers are a fine example of that.
import random
imag1a = {}
for i in range(50):
imag1a[random.randrange(100)*1j] = random.randrange(100)*1j
items = imag1a.items()
random.shuffle(items)
imag1b = {}
for k, v in items:
imag1b[k] = v
imag2 = imag1b.copy()
imag2[k] = v + 1.0
self.assertTrue(imag1a == imag1a)
self.assertTrue(imag1a == imag1b)
self.assertTrue(imag2 == imag2)
self.assertTrue(imag1a != imag2)
for opname in ("lt", "le", "gt", "ge"):
for op in opmap[opname]:
self.assertRaises(TypeError, op, imag1a, imag2)
class ListTest(unittest.TestCase):
def test_coverage(self):
# exercise all comparisons for lists
x = [42]
self.assertIs(x<x, False)
self.assertIs(x<=x, True)
self.assertIs(x==x, True)
self.assertIs(x!=x, False)
self.assertIs(x>x, False)
self.assertIs(x>=x, True)
y = [42, 42]
self.assertIs(x<y, True)
self.assertIs(x<=y, True)
self.assertIs(x==y, False)
self.assertIs(x!=y, True)
self.assertIs(x>y, False)
self.assertIs(x>=y, False)
def test_badentry(self):
# make sure that exceptions for item comparison are properly
# propagated in list comparisons
class Exc(Exception):
pass
class Bad:
def __eq__(self, other):
raise Exc
x = [Bad()]
y = [Bad()]
for op in opmap["eq"]:
self.assertRaises(Exc, op, x, y)
def test_goodentry(self):
# This test exercises the final call to PyObject_RichCompare()
# in Objects/listobject.c::list_richcompare()
class Good:
def __lt__(self, other):
return True
x = [Good()]
y = [Good()]
for op in opmap["lt"]:
self.assertIs(op(x, y), True)
def test_main():
test_support.run_unittest(VectorTest, NumberTest, MiscTest, ListTest)
with test_support.check_py3k_warnings(("dict inequality comparisons "
"not supported in 3.x",
DeprecationWarning)):
test_support.run_unittest(DictTest)
if __name__ == "__main__":
test_main()
| gpl-2.0 |
arquetype/thus | src/jobs/job_5.py | 1 | 3412 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# job_cleanup_drivers
#
# Copyright 2014 KaOS (http://kaosx.us)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
""" Clean up unused drivers """
from jobs.helpers import *
import logging
import os
import shutil
import subprocess
def job_cleanup_drivers(self):
msg_job_start('job_cleanup_drivers')
###########################################################################
# CLEANUP XORG DRIVERS
###########################################################################
msg('cleaning up video drivers')
# remove any db.lck
db_lock = os.path.join(self.dest_dir, "var/lib/pacman/db.lck")
if os.path.exists(db_lock):
with misc.raised_privileges():
os.remove(db_lock)
logging.debug(_("%s deleted"), db_lock)
if os.path.exists("/tmp/used_drivers"):
with open("/tmp/used_drivers", "r") as searchfile:
for line in searchfile:
if "intel" in line:
print(line)
else:
try:
self.chroot(['pacman', '-Rns', '--noconfirm', 'xf86-video-vmware'])
except Exception as e:
pass
if "nouveau" in line:
print(line)
else:
try:
self.chroot(['pacman', '-Rns', '--noconfirm', 'xf86-video-nouveau', 'xf86-video-vmware'])
except Exception as e:
pass
if "ati" in line or "radeon" in line:
print(line)
else:
try:
self.chroot(['pacman', '-Rns', '--noconfirm', 'xf86-video-ati', 'xf86-video-vmware'])
except Exception as e:
pass
searchfile.close()
else:
try:
self.chroot(['pacman', '-Rns', '--noconfirm', 'xf86-video-ati', 'xf86-video-vmware'])
except Exception as e:
pass
msg('video driver removal complete')
###########################################################################
# CLEANUP INPUT DRIVERS
###########################################################################
msg('cleaning up input drivers')
with open("/var/log/Xorg.0.log", "r") as f:
has_synaptics, has_wacom = False, False
for line in f:
if not has_synaptics and "synaptics" in line:
has_synaptics = True
if not has_wacom and "wacom" in line:
has_wacom = True
if not has_synaptics:
try:
self.chroot(['pacman', '-Rncs', '--noconfirm', 'xf86-input-synaptics'])
except Exception as e:
pass
if not has_wacom:
try:
self.chroot(['pacman', '-Rncs', '--noconfirm', 'xf86-input-wacom'])
except Exception as e:
pass
f.close()
msg_job_done('job_cleanup_drivers')
msg('input driver removal complete')
msg_job_done('job_cleanup_drivers')
| gpl-3.0 |
ahamilton55/ansible | test/units/modules/network/iosxr/test_iosxr_facts.py | 62 | 3226 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from .iosxr_module import TestIosxrModule, load_fixture, set_module_args
from ansible.modules.network.iosxr import iosxr_facts
class TestIosxrFacts(TestIosxrModule):
module = iosxr_facts
def setUp(self):
self.mock_run_commands = patch(
'ansible.modules.network.iosxr.iosxr_facts.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
self.mock_run_commands.stop()
def load_fixtures(self, commands=None):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for item in commands:
try:
obj = json.loads(item)
command = obj['command']
except ValueError:
command = item
filename = str(command).replace(' ', '_')
filename = filename.replace('/', '7')
output.append(load_fixture(filename))
return output
self.run_commands.side_effect = load_from_file
def test_iosxr_facts_gather_subset_default(self):
set_module_args(dict())
result = self.execute_module()
ansible_facts = result['ansible_facts']
self.assertIn('hardware', ansible_facts['ansible_net_gather_subset'])
self.assertIn('default', ansible_facts['ansible_net_gather_subset'])
self.assertIn('interfaces', ansible_facts['ansible_net_gather_subset'])
self.assertEquals('iosxr01', ansible_facts['ansible_net_hostname'])
self.assertEquals(['disk0:', 'flash0:'], ansible_facts['ansible_net_filesystems'])
self.assertIn('GigabitEthernet0/0/0/0', ansible_facts['ansible_net_interfaces'].keys())
self.assertEquals('3095', ansible_facts['ansible_net_memtotal_mb'])
self.assertEquals('1499', ansible_facts['ansible_net_memfree_mb'])
def test_iosxr_facts_gather_subset_config(self):
set_module_args({'gather_subset': 'config'})
result = self.execute_module()
ansible_facts = result['ansible_facts']
self.assertIn('default', ansible_facts['ansible_net_gather_subset'])
self.assertIn('config', ansible_facts['ansible_net_gather_subset'])
self.assertEquals('iosxr01', ansible_facts['ansible_net_hostname'])
self.assertIn('ansible_net_config', ansible_facts)
| gpl-3.0 |
EUDAT-B2SHARE/invenio-old | modules/webstyle/lib/goto_plugins/goto_plugin_latest_record.py | 8 | 2360 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Demostrative PURL implementing a redirection to the very last record
(of a collection)
"""
from invenio.config import CFG_SITE_NAME, CFG_SITE_RECORD
from invenio.search_engine import perform_request_search
from invenio.bibdocfile import BibRecDocs, InvenioBibDocFileError
def goto(cc=CFG_SITE_NAME, p='', f='', sf='', so='d', docname='', format=''):
"""
Redirect the user to the latest record in the given collection,
optionally within the specified pattern and field. If docname
and format are specified, redirect the user to the corresponding
docname and format. If docname it is not specified, but there is
only a single bibdoc attached to the record will redirect to that
one.
"""
recids = perform_request_search(cc=cc, p=p, f=f, sf=sf, so=so)
if recids:
## We shall take the last recid. This is the last one
recid = recids[-1]
url = '/%s/%s' % (CFG_SITE_RECORD, recid)
if format:
bibrecdocs = BibRecDocs(recid)
if not docname:
if len(bibrecdocs.get_bibdoc_names()) == 1:
docname = bibrecdocs.get_bibdoc_names()[0]
else:
return url
try:
bibdoc = BibRecDocs(recid).get_bibdoc(docname)
except InvenioBibDocFileError:
return url
try:
bibdocfile = bibdoc.get_file(format=format)
return bibdocfile.get_url()
except InvenioBibDocFileError:
return url
return url
| gpl-2.0 |
xinhunbie/NS3- | src/mesh/doc/source/conf.py | 87 | 7452 | # -*- coding: utf-8 -*-
#
# ns-3 documentation build configuration file, created by
# sphinx-quickstart on Tue Dec 14 09:00:39 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.pngmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'mesh'
# General information about the project.
project = u'ns-3'
copyright = u'ns-3 project'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'ns-3-dev'
# The full version, including alpha/beta/rc tags.
release = 'ns-3-dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
#htmlhelp_basename = 'ns-3doc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
# ('mesh-testing', 'mesh-doc-testing.tex', u'Mesh Wi-Fi Testing Documentation', u'ns-3 project', 'manual'),
# ('mesh-design', 'mesh-doc-design.tex', u'Mesh Wi-Fi Design Documentation', u'ns-3 project', 'manual'),
# ('mesh-user', 'mesh-doc-user.tex', u'Mesh Wi-Fi User Documentation', u'ns-3 project', 'manual'),
('mesh', 'mesh-module-doc.tex', u'The ns-3 Mesh Wi-Fi Module Documentation', u'ns-3 project', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# add page breaks in the pdf. Level 1 is for top-level sections, level 2 for subsections, and so on.
pdf_break_level = 4
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ns-3-model-library', u'ns-3 Model Library',
[u'ns-3 project'], 1)
]
| gpl-2.0 |
demonchild2112/travis-test | grr/core/grr_response_core/lib/parsers/linux_release_parser_test.py | 2 | 6232 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""Unit test for the linux distribution parser."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import io
import os
from absl import app
from grr_response_core.lib.parsers import linux_release_parser
from grr_response_core.lib.rdfvalues import anomaly as rdf_anomaly
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_core.lib.rdfvalues import protodict as rdf_protodict
from grr.test_lib import test_lib
class LinuxReleaseParserTest(test_lib.GRRBaseTest):
"""Test parsing of linux distribution collection."""
def setUp(self):
super(LinuxReleaseParserTest, self).setUp()
self.parser_test_dir = os.path.join(self.base_path, "parser_test")
def testMalformedLsbReleaseFile(self):
path = os.path.join(self.parser_test_dir, "lsb-release-bad")
with io.open(path, "r") as f:
data = f.read()
parser = linux_release_parser.LsbReleaseParseHandler(data)
complete, result = parser.Parse()
self.assertFalse(complete)
self.assertTupleEqual((None, 0, 0), result)
def testGoodLsbReleaseFile(self):
path = os.path.join(self.parser_test_dir, "lsb-release")
with io.open(path, "r") as f:
data = f.read()
parser = linux_release_parser.LsbReleaseParseHandler(data)
complete, result = parser.Parse()
self.assertTrue(complete)
self.assertTupleEqual(("Ubuntu", 14, 4), result)
def testFallbackLsbReleaseFile(self):
path = os.path.join(self.parser_test_dir, "lsb-release-notubuntu")
with io.open(path, "r") as f:
data = f.read()
parser = linux_release_parser.LsbReleaseParseHandler(data)
complete, result = parser.Parse()
self.assertFalse(complete)
self.assertTupleEqual(("NotUbuntu", 0, 0), result)
def testReleaseFileRedHatish(self):
path = os.path.join(self.parser_test_dir, "oracle-release")
with io.open(path, "r") as f:
data = f.read()
parser = linux_release_parser.ReleaseFileParseHandler("OracleLinux")
parser(data)
complete, result = parser.Parse()
self.assertTrue(complete)
self.assertTupleEqual(("OracleLinux", 6, 5), result)
def testMalformedReleaseFileRedHatish(self):
path = os.path.join(self.parser_test_dir, "oracle-release-bad")
with io.open(path, "r") as f:
data = f.read()
parser = linux_release_parser.ReleaseFileParseHandler("OracleLinux")
parser(data)
complete, result = parser.Parse()
self.assertFalse(complete)
self.assertTupleEqual(("OracleLinux", 0, 0), result)
def _CreateTestData(self, testdata):
"""Create 'stats' and 'file_objects' lists for passing to ParseMultiple."""
pathspecs = []
files = []
for filepath, localfile in testdata:
files.append(open(localfile, "rb"))
p = rdf_paths.PathSpec(path=filepath)
pathspecs.append(p)
return pathspecs, files
def testEndToEndUbuntu(self):
parser = linux_release_parser.LinuxReleaseParser()
testdata = [
("/etc/lsb-release", os.path.join(self.parser_test_dir, "lsb-release")),
]
pathspecs, files = self._CreateTestData(testdata)
result = list(parser.ParseFiles(None, pathspecs, files)).pop()
self.assertIsInstance(result, rdf_protodict.Dict)
self.assertEqual("Ubuntu", result["os_release"])
self.assertEqual(14, result["os_major_version"])
self.assertEqual(4, result["os_minor_version"])
def testEndToEndOracleLinux(self):
parser = linux_release_parser.LinuxReleaseParser()
testdata = [
("/etc/lsb-release",
os.path.join(self.parser_test_dir, "lsb-release-notubuntu")),
("/etc/oracle-release",
os.path.join(self.parser_test_dir, "oracle-release")),
]
pathspecs, files = self._CreateTestData(testdata)
result = list(parser.ParseFiles(None, pathspecs, files)).pop()
self.assertIsInstance(result, rdf_protodict.Dict)
self.assertEqual("OracleLinux", result["os_release"])
self.assertEqual(6, result["os_major_version"])
self.assertEqual(5, result["os_minor_version"])
def testEndToEndAmazon(self):
parser = linux_release_parser.LinuxReleaseParser()
test_data = [
("/etc/system-release",
os.path.join(self.parser_test_dir, "amazon-system-release")),
]
pathspecs, file_objects = self._CreateTestData(test_data)
actual_result = list(parser.ParseFiles(None, pathspecs, file_objects))
expected_result = [
rdf_protodict.Dict({
"os_release": "AmazonLinuxAMI",
"os_major_version": 2018,
"os_minor_version": 3,
})
]
self.assertCountEqual(actual_result, expected_result)
def testEndToEndCoreOS(self):
parser = linux_release_parser.LinuxReleaseParser()
test_data = [
("/etc/os-release",
os.path.join(self.parser_test_dir, "coreos-os-release")),
]
pathspecs, file_objects = self._CreateTestData(test_data)
actual_result = list(parser.ParseFiles(None, pathspecs, file_objects))
expected_result = [
rdf_protodict.Dict({
"os_release": "Container Linux by CoreOS",
"os_major_version": 2023,
"os_minor_version": 4,
})
]
self.assertCountEqual(actual_result, expected_result)
def testEndToEndGoogleCOS(self):
parser = linux_release_parser.LinuxReleaseParser()
test_data = [
("/etc/os-release",
os.path.join(self.parser_test_dir, "google-cos-os-release")),
]
pathspecs, file_objects = self._CreateTestData(test_data)
actual_result = list(parser.ParseFiles(None, pathspecs, file_objects))
expected_result = [
rdf_protodict.Dict({
"os_release": "Container-Optimized OS",
"os_major_version": 69,
"os_minor_version": 0,
})
]
self.assertCountEqual(actual_result, expected_result)
def testAnomaly(self):
parser = linux_release_parser.LinuxReleaseParser()
result = list(parser.ParseFiles(None, [], []))
self.assertLen(result, 1)
self.assertIsInstance(result[0], rdf_anomaly.Anomaly)
def main(args):
test_lib.main(args)
if __name__ == "__main__":
app.run(main)
| apache-2.0 |
ChristineLaMuse/mozillians | mozillians/phonebook/tests/test_views/test_views_login.py | 12 | 1179 | from django.core.urlresolvers import reverse
from django.test import Client
from mozillians.common.tests import TestCase, requires_login
from mozillians.users.tests import UserFactory
class LoginTests(TestCase):
@requires_login()
def test_login_anonymous(self):
client = Client()
client.get(reverse('phonebook:login'), follow=True)
def test_login_unvouched(self):
user = UserFactory.create(vouched=False)
with self.login(user) as client:
response = client.get(reverse('phonebook:login'), follow=True)
self.assertTemplateUsed(response, 'phonebook/home.html')
def test_login_vouched(self):
user = UserFactory.create()
with self.login(user) as client:
response = client.get(reverse('phonebook:login'), follow=True)
self.assertTemplateUsed(response, 'phonebook/home.html')
def test_login_incomplete_profile(self):
user = UserFactory.create(userprofile={'full_name': ''})
with self.login(user) as client:
response = client.get(reverse('phonebook:login'), follow=True)
self.assertTemplateUsed(response, 'phonebook/edit_profile.html')
| bsd-3-clause |
asimonet/website | reveal.js/node_modules/node-gyp/gyp/pylib/gyp/generator/android.py | 960 | 45344 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Notes:
#
# This generates makefiles suitable for inclusion into the Android build system
# via an Android.mk file. It is based on make.py, the standard makefile
# generator.
#
# The code below generates a separate .mk file for each target, but
# all are sourced by the top-level GypAndroid.mk. This means that all
# variables in .mk-files clobber one another, and furthermore that any
# variables set potentially clash with other Android build system variables.
# Try to avoid setting global variables where possible.
import gyp
import gyp.common
import gyp.generator.make as make # Reuse global functions from make backend.
import os
import re
import subprocess
generator_default_variables = {
'OS': 'android',
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_SUFFIX': '.so',
'INTERMEDIATE_DIR': '$(gyp_intermediate_dir)',
'SHARED_INTERMEDIATE_DIR': '$(gyp_shared_intermediate_dir)',
'PRODUCT_DIR': '$(gyp_shared_intermediate_dir)',
'SHARED_LIB_DIR': '$(builddir)/lib.$(TOOLSET)',
'LIB_DIR': '$(obj).$(TOOLSET)',
'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python.
'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python.
'RULE_INPUT_PATH': '$(RULE_SOURCES)',
'RULE_INPUT_EXT': '$(suffix $<)',
'RULE_INPUT_NAME': '$(notdir $<)',
'CONFIGURATION_NAME': '$(GYP_CONFIGURATION)',
}
# Make supports multiple toolsets
generator_supports_multiple_toolsets = True
# Generator-specific gyp specs.
generator_additional_non_configuration_keys = [
# Boolean to declare that this target does not want its name mangled.
'android_unmangled_name',
# Map of android build system variables to set.
'aosp_build_settings',
]
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
ALL_MODULES_FOOTER = """\
# "gyp_all_modules" is a concatenation of the "gyp_all_modules" targets from
# all the included sub-makefiles. This is just here to clarify.
gyp_all_modules:
"""
header = """\
# This file is generated by gyp; do not edit.
"""
# Map gyp target types to Android module classes.
MODULE_CLASSES = {
'static_library': 'STATIC_LIBRARIES',
'shared_library': 'SHARED_LIBRARIES',
'executable': 'EXECUTABLES',
}
def IsCPPExtension(ext):
return make.COMPILABLE_EXTENSIONS.get(ext) == 'cxx'
def Sourceify(path):
"""Convert a path to its source directory form. The Android backend does not
support options.generator_output, so this function is a noop."""
return path
# Map from qualified target to path to output.
# For Android, the target of these maps is a tuple ('static', 'modulename'),
# ('dynamic', 'modulename'), or ('path', 'some/path') instead of a string,
# since we link by module.
target_outputs = {}
# Map from qualified target to any linkable output. A subset
# of target_outputs. E.g. when mybinary depends on liba, we want to
# include liba in the linker line; when otherbinary depends on
# mybinary, we just want to build mybinary first.
target_link_deps = {}
class AndroidMkWriter(object):
"""AndroidMkWriter packages up the writing of one target-specific Android.mk.
Its only real entry point is Write(), and is mostly used for namespacing.
"""
def __init__(self, android_top_dir):
self.android_top_dir = android_top_dir
def Write(self, qualified_target, relative_target, base_path, output_filename,
spec, configs, part_of_all, write_alias_target, sdk_version):
"""The main entry point: writes a .mk file for a single target.
Arguments:
qualified_target: target we're generating
relative_target: qualified target name relative to the root
base_path: path relative to source root we're building in, used to resolve
target-relative paths
output_filename: output .mk file name to write
spec, configs: gyp info
part_of_all: flag indicating this target is part of 'all'
write_alias_target: flag indicating whether to create short aliases for
this target
sdk_version: what to emit for LOCAL_SDK_VERSION in output
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
self.qualified_target = qualified_target
self.relative_target = relative_target
self.path = base_path
self.target = spec['target_name']
self.type = spec['type']
self.toolset = spec['toolset']
deps, link_deps = self.ComputeDeps(spec)
# Some of the generation below can add extra output, sources, or
# link dependencies. All of the out params of the functions that
# follow use names like extra_foo.
extra_outputs = []
extra_sources = []
self.android_class = MODULE_CLASSES.get(self.type, 'GYP')
self.android_module = self.ComputeAndroidModule(spec)
(self.android_stem, self.android_suffix) = self.ComputeOutputParts(spec)
self.output = self.output_binary = self.ComputeOutput(spec)
# Standard header.
self.WriteLn('include $(CLEAR_VARS)\n')
# Module class and name.
self.WriteLn('LOCAL_MODULE_CLASS := ' + self.android_class)
self.WriteLn('LOCAL_MODULE := ' + self.android_module)
# Only emit LOCAL_MODULE_STEM if it's different to LOCAL_MODULE.
# The library module classes fail if the stem is set. ComputeOutputParts
# makes sure that stem == modulename in these cases.
if self.android_stem != self.android_module:
self.WriteLn('LOCAL_MODULE_STEM := ' + self.android_stem)
self.WriteLn('LOCAL_MODULE_SUFFIX := ' + self.android_suffix)
if self.toolset == 'host':
self.WriteLn('LOCAL_IS_HOST_MODULE := true')
self.WriteLn('LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)')
elif sdk_version > 0:
self.WriteLn('LOCAL_MODULE_TARGET_ARCH := '
'$(TARGET_$(GYP_VAR_PREFIX)ARCH)')
self.WriteLn('LOCAL_SDK_VERSION := %s' % sdk_version)
# Grab output directories; needed for Actions and Rules.
if self.toolset == 'host':
self.WriteLn('gyp_intermediate_dir := '
'$(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))')
else:
self.WriteLn('gyp_intermediate_dir := '
'$(call local-intermediates-dir,,$(GYP_VAR_PREFIX))')
self.WriteLn('gyp_shared_intermediate_dir := '
'$(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))')
self.WriteLn()
# List files this target depends on so that actions/rules/copies/sources
# can depend on the list.
# TODO: doesn't pull in things through transitive link deps; needed?
target_dependencies = [x[1] for x in deps if x[0] == 'path']
self.WriteLn('# Make sure our deps are built first.')
self.WriteList(target_dependencies, 'GYP_TARGET_DEPENDENCIES',
local_pathify=True)
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
self.WriteActions(spec['actions'], extra_sources, extra_outputs)
# Rules must be early like actions.
if 'rules' in spec:
self.WriteRules(spec['rules'], extra_sources, extra_outputs)
if 'copies' in spec:
self.WriteCopies(spec['copies'], extra_outputs)
# GYP generated outputs.
self.WriteList(extra_outputs, 'GYP_GENERATED_OUTPUTS', local_pathify=True)
# Set LOCAL_ADDITIONAL_DEPENDENCIES so that Android's build rules depend
# on both our dependency targets and our generated files.
self.WriteLn('# Make sure our deps and generated files are built first.')
self.WriteLn('LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) '
'$(GYP_GENERATED_OUTPUTS)')
self.WriteLn()
# Sources.
if spec.get('sources', []) or extra_sources:
self.WriteSources(spec, configs, extra_sources)
self.WriteTarget(spec, configs, deps, link_deps, part_of_all,
write_alias_target)
# Update global list of target outputs, used in dependency tracking.
target_outputs[qualified_target] = ('path', self.output_binary)
# Update global list of link dependencies.
if self.type == 'static_library':
target_link_deps[qualified_target] = ('static', self.android_module)
elif self.type == 'shared_library':
target_link_deps[qualified_target] = ('shared', self.android_module)
self.fp.close()
return self.android_module
def WriteActions(self, actions, extra_sources, extra_outputs):
"""Write Makefile code for any 'actions' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
actions (used to make other pieces dependent on these
actions)
"""
for action in actions:
name = make.StringToMakefileVariable('%s_%s' % (self.relative_target,
action['action_name']))
self.WriteLn('### Rules for action "%s":' % action['action_name'])
inputs = action['inputs']
outputs = action['outputs']
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set()
for out in outputs:
if not out.startswith('$'):
print ('WARNING: Action for target "%s" writes output to local path '
'"%s".' % (self.target, out))
dir = os.path.split(out)[0]
if dir:
dirs.add(dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources += outputs
# Prepare the actual command.
command = gyp.common.EncodePOSIXShellList(action['action'])
if 'message' in action:
quiet_cmd = 'Gyp action: %s ($@)' % action['message']
else:
quiet_cmd = 'Gyp action: %s ($@)' % name
if len(dirs) > 0:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
cd_action = 'cd $(gyp_local_path)/%s; ' % self.path
command = cd_action + command
# The makefile rules are all relative to the top dir, but the gyp actions
# are defined relative to their containing dir. This replaces the gyp_*
# variables for the action rule with an absolute version so that the
# output goes in the right place.
# Only write the gyp_* rules for the "primary" output (:1);
# it's superfluous for the "extra outputs", and this avoids accidentally
# writing duplicate dummy rules for those outputs.
main_output = make.QuoteSpaces(self.LocalPathify(outputs[0]))
self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output)
self.WriteLn('%s: gyp_var_prefix := $(GYP_VAR_PREFIX)' % main_output)
self.WriteLn('%s: gyp_intermediate_dir := '
'$(abspath $(gyp_intermediate_dir))' % main_output)
self.WriteLn('%s: gyp_shared_intermediate_dir := '
'$(abspath $(gyp_shared_intermediate_dir))' % main_output)
# Android's envsetup.sh adds a number of directories to the path including
# the built host binary directory. This causes actions/rules invoked by
# gyp to sometimes use these instead of system versions, e.g. bison.
# The built host binaries may not be suitable, and can cause errors.
# So, we remove them from the PATH using the ANDROID_BUILD_PATHS variable
# set by envsetup.
self.WriteLn('%s: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))'
% main_output)
# Don't allow spaces in input/output filenames, but make an exception for
# filenames which start with '$(' since it's okay for there to be spaces
# inside of make function/macro invocations.
for input in inputs:
if not input.startswith('$(') and ' ' in input:
raise gyp.common.GypError(
'Action input filename "%s" in target %s contains a space' %
(input, self.target))
for output in outputs:
if not output.startswith('$(') and ' ' in output:
raise gyp.common.GypError(
'Action output filename "%s" in target %s contains a space' %
(output, self.target))
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES)' %
(main_output, ' '.join(map(self.LocalPathify, inputs))))
self.WriteLn('\t@echo "%s"' % quiet_cmd)
self.WriteLn('\t$(hide)%s\n' % command)
for output in outputs[1:]:
# Make each output depend on the main output, with an empty command
# to force make to notice that the mtime has changed.
self.WriteLn('%s: %s ;' % (self.LocalPathify(output), main_output))
extra_outputs += outputs
self.WriteLn()
self.WriteLn()
def WriteRules(self, rules, extra_sources, extra_outputs):
"""Write Makefile code for any 'rules' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
rules (used to make other pieces dependent on these rules)
"""
if len(rules) == 0:
return
for rule in rules:
if len(rule.get('rule_sources', [])) == 0:
continue
name = make.StringToMakefileVariable('%s_%s' % (self.relative_target,
rule['rule_name']))
self.WriteLn('\n### Generated for rule "%s":' % name)
self.WriteLn('# "%s":' % rule)
inputs = rule.get('inputs')
for rule_source in rule.get('rule_sources', []):
(rule_source_dirname, rule_source_basename) = os.path.split(rule_source)
(rule_source_root, rule_source_ext) = \
os.path.splitext(rule_source_basename)
outputs = [self.ExpandInputRoot(out, rule_source_root,
rule_source_dirname)
for out in rule['outputs']]
dirs = set()
for out in outputs:
if not out.startswith('$'):
print ('WARNING: Rule for target %s writes output to local path %s'
% (self.target, out))
dir = os.path.dirname(out)
if dir:
dirs.add(dir)
extra_outputs += outputs
if int(rule.get('process_outputs_as_sources', False)):
extra_sources.extend(outputs)
components = []
for component in rule['action']:
component = self.ExpandInputRoot(component, rule_source_root,
rule_source_dirname)
if '$(RULE_SOURCES)' in component:
component = component.replace('$(RULE_SOURCES)',
rule_source)
components.append(component)
command = gyp.common.EncodePOSIXShellList(components)
cd_action = 'cd $(gyp_local_path)/%s; ' % self.path
command = cd_action + command
if dirs:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
# We set up a rule to build the first output, and then set up
# a rule for each additional output to depend on the first.
outputs = map(self.LocalPathify, outputs)
main_output = outputs[0]
self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output)
self.WriteLn('%s: gyp_var_prefix := $(GYP_VAR_PREFIX)' % main_output)
self.WriteLn('%s: gyp_intermediate_dir := '
'$(abspath $(gyp_intermediate_dir))' % main_output)
self.WriteLn('%s: gyp_shared_intermediate_dir := '
'$(abspath $(gyp_shared_intermediate_dir))' % main_output)
# See explanation in WriteActions.
self.WriteLn('%s: export PATH := '
'$(subst $(ANDROID_BUILD_PATHS),,$(PATH))' % main_output)
main_output_deps = self.LocalPathify(rule_source)
if inputs:
main_output_deps += ' '
main_output_deps += ' '.join([self.LocalPathify(f) for f in inputs])
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES)' %
(main_output, main_output_deps))
self.WriteLn('\t%s\n' % command)
for output in outputs[1:]:
# Make each output depend on the main output, with an empty command
# to force make to notice that the mtime has changed.
self.WriteLn('%s: %s ;' % (output, main_output))
self.WriteLn()
self.WriteLn()
def WriteCopies(self, copies, extra_outputs):
"""Write Makefile code for any 'copies' from the gyp input.
extra_outputs: a list that will be filled in with any outputs of this action
(used to make other pieces dependent on this action)
"""
self.WriteLn('### Generated for copy rule.')
variable = make.StringToMakefileVariable(self.relative_target + '_copies')
outputs = []
for copy in copies:
for path in copy['files']:
# The Android build system does not allow generation of files into the
# source tree. The destination should start with a variable, which will
# typically be $(gyp_intermediate_dir) or
# $(gyp_shared_intermediate_dir). Note that we can't use an assertion
# because some of the gyp tests depend on this.
if not copy['destination'].startswith('$'):
print ('WARNING: Copy rule for target %s writes output to '
'local path %s' % (self.target, copy['destination']))
# LocalPathify() calls normpath, stripping trailing slashes.
path = Sourceify(self.LocalPathify(path))
filename = os.path.split(path)[1]
output = Sourceify(self.LocalPathify(os.path.join(copy['destination'],
filename)))
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES) | $(ACP)' %
(output, path))
self.WriteLn('\t@echo Copying: $@')
self.WriteLn('\t$(hide) mkdir -p $(dir $@)')
self.WriteLn('\t$(hide) $(ACP) -rpf $< $@')
self.WriteLn()
outputs.append(output)
self.WriteLn('%s = %s' % (variable,
' '.join(map(make.QuoteSpaces, outputs))))
extra_outputs.append('$(%s)' % variable)
self.WriteLn()
def WriteSourceFlags(self, spec, configs):
"""Write out the flags and include paths used to compile source files for
the current target.
Args:
spec, configs: input from gyp.
"""
for configname, config in sorted(configs.iteritems()):
extracted_includes = []
self.WriteLn('\n# Flags passed to both C and C++ files.')
cflags, includes_from_cflags = self.ExtractIncludesFromCFlags(
config.get('cflags', []) + config.get('cflags_c', []))
extracted_includes.extend(includes_from_cflags)
self.WriteList(cflags, 'MY_CFLAGS_%s' % configname)
self.WriteList(config.get('defines'), 'MY_DEFS_%s' % configname,
prefix='-D', quoter=make.EscapeCppDefine)
self.WriteLn('\n# Include paths placed before CFLAGS/CPPFLAGS')
includes = list(config.get('include_dirs', []))
includes.extend(extracted_includes)
includes = map(Sourceify, map(self.LocalPathify, includes))
includes = self.NormalizeIncludePaths(includes)
self.WriteList(includes, 'LOCAL_C_INCLUDES_%s' % configname)
self.WriteLn('\n# Flags passed to only C++ (and not C) files.')
self.WriteList(config.get('cflags_cc'), 'LOCAL_CPPFLAGS_%s' % configname)
self.WriteLn('\nLOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) '
'$(MY_DEFS_$(GYP_CONFIGURATION))')
# Undefine ANDROID for host modules
# TODO: the source code should not use macro ANDROID to tell if it's host
# or target module.
if self.toolset == 'host':
self.WriteLn('# Undefine ANDROID for host modules')
self.WriteLn('LOCAL_CFLAGS += -UANDROID')
self.WriteLn('LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) '
'$(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))')
self.WriteLn('LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))')
# Android uses separate flags for assembly file invocations, but gyp expects
# the same CFLAGS to be applied:
self.WriteLn('LOCAL_ASFLAGS := $(LOCAL_CFLAGS)')
def WriteSources(self, spec, configs, extra_sources):
"""Write Makefile code for any 'sources' from the gyp input.
These are source files necessary to build the current target.
We need to handle shared_intermediate directory source files as
a special case by copying them to the intermediate directory and
treating them as a genereated sources. Otherwise the Android build
rules won't pick them up.
Args:
spec, configs: input from gyp.
extra_sources: Sources generated from Actions or Rules.
"""
sources = filter(make.Compilable, spec.get('sources', []))
generated_not_sources = [x for x in extra_sources if not make.Compilable(x)]
extra_sources = filter(make.Compilable, extra_sources)
# Determine and output the C++ extension used by these sources.
# We simply find the first C++ file and use that extension.
all_sources = sources + extra_sources
local_cpp_extension = '.cpp'
for source in all_sources:
(root, ext) = os.path.splitext(source)
if IsCPPExtension(ext):
local_cpp_extension = ext
break
if local_cpp_extension != '.cpp':
self.WriteLn('LOCAL_CPP_EXTENSION := %s' % local_cpp_extension)
# We need to move any non-generated sources that are coming from the
# shared intermediate directory out of LOCAL_SRC_FILES and put them
# into LOCAL_GENERATED_SOURCES. We also need to move over any C++ files
# that don't match our local_cpp_extension, since Android will only
# generate Makefile rules for a single LOCAL_CPP_EXTENSION.
local_files = []
for source in sources:
(root, ext) = os.path.splitext(source)
if '$(gyp_shared_intermediate_dir)' in source:
extra_sources.append(source)
elif '$(gyp_intermediate_dir)' in source:
extra_sources.append(source)
elif IsCPPExtension(ext) and ext != local_cpp_extension:
extra_sources.append(source)
else:
local_files.append(os.path.normpath(os.path.join(self.path, source)))
# For any generated source, if it is coming from the shared intermediate
# directory then we add a Make rule to copy them to the local intermediate
# directory first. This is because the Android LOCAL_GENERATED_SOURCES
# must be in the local module intermediate directory for the compile rules
# to work properly. If the file has the wrong C++ extension, then we add
# a rule to copy that to intermediates and use the new version.
final_generated_sources = []
# If a source file gets copied, we still need to add the orginal source
# directory as header search path, for GCC searches headers in the
# directory that contains the source file by default.
origin_src_dirs = []
for source in extra_sources:
local_file = source
if not '$(gyp_intermediate_dir)/' in local_file:
basename = os.path.basename(local_file)
local_file = '$(gyp_intermediate_dir)/' + basename
(root, ext) = os.path.splitext(local_file)
if IsCPPExtension(ext) and ext != local_cpp_extension:
local_file = root + local_cpp_extension
if local_file != source:
self.WriteLn('%s: %s' % (local_file, self.LocalPathify(source)))
self.WriteLn('\tmkdir -p $(@D); cp $< $@')
origin_src_dirs.append(os.path.dirname(source))
final_generated_sources.append(local_file)
# We add back in all of the non-compilable stuff to make sure that the
# make rules have dependencies on them.
final_generated_sources.extend(generated_not_sources)
self.WriteList(final_generated_sources, 'LOCAL_GENERATED_SOURCES')
origin_src_dirs = gyp.common.uniquer(origin_src_dirs)
origin_src_dirs = map(Sourceify, map(self.LocalPathify, origin_src_dirs))
self.WriteList(origin_src_dirs, 'GYP_COPIED_SOURCE_ORIGIN_DIRS')
self.WriteList(local_files, 'LOCAL_SRC_FILES')
# Write out the flags used to compile the source; this must be done last
# so that GYP_COPIED_SOURCE_ORIGIN_DIRS can be used as an include path.
self.WriteSourceFlags(spec, configs)
def ComputeAndroidModule(self, spec):
"""Return the Android module name used for a gyp spec.
We use the complete qualified target name to avoid collisions between
duplicate targets in different directories. We also add a suffix to
distinguish gyp-generated module names.
"""
if int(spec.get('android_unmangled_name', 0)):
assert self.type != 'shared_library' or self.target.startswith('lib')
return self.target
if self.type == 'shared_library':
# For reasons of convention, the Android build system requires that all
# shared library modules are named 'libfoo' when generating -l flags.
prefix = 'lib_'
else:
prefix = ''
if spec['toolset'] == 'host':
suffix = '_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp'
else:
suffix = '_gyp'
if self.path:
middle = make.StringToMakefileVariable('%s_%s' % (self.path, self.target))
else:
middle = make.StringToMakefileVariable(self.target)
return ''.join([prefix, middle, suffix])
def ComputeOutputParts(self, spec):
"""Return the 'output basename' of a gyp spec, split into filename + ext.
Android libraries must be named the same thing as their module name,
otherwise the linker can't find them, so product_name and so on must be
ignored if we are building a library, and the "lib" prepending is
not done for Android.
"""
assert self.type != 'loadable_module' # TODO: not supported?
target = spec['target_name']
target_prefix = ''
target_ext = ''
if self.type == 'static_library':
target = self.ComputeAndroidModule(spec)
target_ext = '.a'
elif self.type == 'shared_library':
target = self.ComputeAndroidModule(spec)
target_ext = '.so'
elif self.type == 'none':
target_ext = '.stamp'
elif self.type != 'executable':
print ("ERROR: What output file should be generated?",
"type", self.type, "target", target)
if self.type != 'static_library' and self.type != 'shared_library':
target_prefix = spec.get('product_prefix', target_prefix)
target = spec.get('product_name', target)
product_ext = spec.get('product_extension')
if product_ext:
target_ext = '.' + product_ext
target_stem = target_prefix + target
return (target_stem, target_ext)
def ComputeOutputBasename(self, spec):
"""Return the 'output basename' of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'libfoobar.so'
"""
return ''.join(self.ComputeOutputParts(spec))
def ComputeOutput(self, spec):
"""Return the 'output' (full output path) of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'$(obj)/baz/libfoobar.so'
"""
if self.type == 'executable':
# We install host executables into shared_intermediate_dir so they can be
# run by gyp rules that refer to PRODUCT_DIR.
path = '$(gyp_shared_intermediate_dir)'
elif self.type == 'shared_library':
if self.toolset == 'host':
path = '$($(GYP_HOST_VAR_PREFIX)HOST_OUT_INTERMEDIATE_LIBRARIES)'
else:
path = '$($(GYP_VAR_PREFIX)TARGET_OUT_INTERMEDIATE_LIBRARIES)'
else:
# Other targets just get built into their intermediate dir.
if self.toolset == 'host':
path = ('$(call intermediates-dir-for,%s,%s,true,,'
'$(GYP_HOST_VAR_PREFIX))' % (self.android_class,
self.android_module))
else:
path = ('$(call intermediates-dir-for,%s,%s,,,$(GYP_VAR_PREFIX))'
% (self.android_class, self.android_module))
assert spec.get('product_dir') is None # TODO: not supported?
return os.path.join(path, self.ComputeOutputBasename(spec))
def NormalizeIncludePaths(self, include_paths):
""" Normalize include_paths.
Convert absolute paths to relative to the Android top directory.
Args:
include_paths: A list of unprocessed include paths.
Returns:
A list of normalized include paths.
"""
normalized = []
for path in include_paths:
if path[0] == '/':
path = gyp.common.RelativePath(path, self.android_top_dir)
normalized.append(path)
return normalized
def ExtractIncludesFromCFlags(self, cflags):
"""Extract includes "-I..." out from cflags
Args:
cflags: A list of compiler flags, which may be mixed with "-I.."
Returns:
A tuple of lists: (clean_clfags, include_paths). "-I.." is trimmed.
"""
clean_cflags = []
include_paths = []
for flag in cflags:
if flag.startswith('-I'):
include_paths.append(flag[2:])
else:
clean_cflags.append(flag)
return (clean_cflags, include_paths)
def FilterLibraries(self, libraries):
"""Filter the 'libraries' key to separate things that shouldn't be ldflags.
Library entries that look like filenames should be converted to android
module names instead of being passed to the linker as flags.
Args:
libraries: the value of spec.get('libraries')
Returns:
A tuple (static_lib_modules, dynamic_lib_modules, ldflags)
"""
static_lib_modules = []
dynamic_lib_modules = []
ldflags = []
for libs in libraries:
# Libs can have multiple words.
for lib in libs.split():
# Filter the system libraries, which are added by default by the Android
# build system.
if (lib == '-lc' or lib == '-lstdc++' or lib == '-lm' or
lib.endswith('libgcc.a')):
continue
match = re.search(r'([^/]+)\.a$', lib)
if match:
static_lib_modules.append(match.group(1))
continue
match = re.search(r'([^/]+)\.so$', lib)
if match:
dynamic_lib_modules.append(match.group(1))
continue
if lib.startswith('-l'):
ldflags.append(lib)
return (static_lib_modules, dynamic_lib_modules, ldflags)
def ComputeDeps(self, spec):
"""Compute the dependencies of a gyp spec.
Returns a tuple (deps, link_deps), where each is a list of
filenames that will need to be put in front of make for either
building (deps) or linking (link_deps).
"""
deps = []
link_deps = []
if 'dependencies' in spec:
deps.extend([target_outputs[dep] for dep in spec['dependencies']
if target_outputs[dep]])
for dep in spec['dependencies']:
if dep in target_link_deps:
link_deps.append(target_link_deps[dep])
deps.extend(link_deps)
return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps))
def WriteTargetFlags(self, spec, configs, link_deps):
"""Write Makefile code to specify the link flags and library dependencies.
spec, configs: input from gyp.
link_deps: link dependency list; see ComputeDeps()
"""
# Libraries (i.e. -lfoo)
# These must be included even for static libraries as some of them provide
# implicit include paths through the build system.
libraries = gyp.common.uniquer(spec.get('libraries', []))
static_libs, dynamic_libs, ldflags_libs = self.FilterLibraries(libraries)
if self.type != 'static_library':
for configname, config in sorted(configs.iteritems()):
ldflags = list(config.get('ldflags', []))
self.WriteLn('')
self.WriteList(ldflags, 'LOCAL_LDFLAGS_%s' % configname)
self.WriteList(ldflags_libs, 'LOCAL_GYP_LIBS')
self.WriteLn('LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION)) '
'$(LOCAL_GYP_LIBS)')
# Link dependencies (i.e. other gyp targets this target depends on)
# These need not be included for static libraries as within the gyp build
# we do not use the implicit include path mechanism.
if self.type != 'static_library':
static_link_deps = [x[1] for x in link_deps if x[0] == 'static']
shared_link_deps = [x[1] for x in link_deps if x[0] == 'shared']
else:
static_link_deps = []
shared_link_deps = []
# Only write the lists if they are non-empty.
if static_libs or static_link_deps:
self.WriteLn('')
self.WriteList(static_libs + static_link_deps,
'LOCAL_STATIC_LIBRARIES')
self.WriteLn('# Enable grouping to fix circular references')
self.WriteLn('LOCAL_GROUP_STATIC_LIBRARIES := true')
if dynamic_libs or shared_link_deps:
self.WriteLn('')
self.WriteList(dynamic_libs + shared_link_deps,
'LOCAL_SHARED_LIBRARIES')
def WriteTarget(self, spec, configs, deps, link_deps, part_of_all,
write_alias_target):
"""Write Makefile code to produce the final target of the gyp spec.
spec, configs: input from gyp.
deps, link_deps: dependency lists; see ComputeDeps()
part_of_all: flag indicating this target is part of 'all'
write_alias_target: flag indicating whether to create short aliases for this
target
"""
self.WriteLn('### Rules for final target.')
if self.type != 'none':
self.WriteTargetFlags(spec, configs, link_deps)
settings = spec.get('aosp_build_settings', {})
if settings:
self.WriteLn('### Set directly by aosp_build_settings.')
for k, v in settings.iteritems():
if isinstance(v, list):
self.WriteList(v, k)
else:
self.WriteLn('%s := %s' % (k, make.QuoteIfNecessary(v)))
self.WriteLn('')
# Add to the set of targets which represent the gyp 'all' target. We use the
# name 'gyp_all_modules' as the Android build system doesn't allow the use
# of the Make target 'all' and because 'all_modules' is the equivalent of
# the Make target 'all' on Android.
if part_of_all and write_alias_target:
self.WriteLn('# Add target alias to "gyp_all_modules" target.')
self.WriteLn('.PHONY: gyp_all_modules')
self.WriteLn('gyp_all_modules: %s' % self.android_module)
self.WriteLn('')
# Add an alias from the gyp target name to the Android module name. This
# simplifies manual builds of the target, and is required by the test
# framework.
if self.target != self.android_module and write_alias_target:
self.WriteLn('# Alias gyp target name.')
self.WriteLn('.PHONY: %s' % self.target)
self.WriteLn('%s: %s' % (self.target, self.android_module))
self.WriteLn('')
# Add the command to trigger build of the target type depending
# on the toolset. Ex: BUILD_STATIC_LIBRARY vs. BUILD_HOST_STATIC_LIBRARY
# NOTE: This has to come last!
modifier = ''
if self.toolset == 'host':
modifier = 'HOST_'
if self.type == 'static_library':
self.WriteLn('include $(BUILD_%sSTATIC_LIBRARY)' % modifier)
elif self.type == 'shared_library':
self.WriteLn('LOCAL_PRELINK_MODULE := false')
self.WriteLn('include $(BUILD_%sSHARED_LIBRARY)' % modifier)
elif self.type == 'executable':
self.WriteLn('LOCAL_CXX_STL := libc++_static')
# Executables are for build and test purposes only, so they're installed
# to a directory that doesn't get included in the system image.
self.WriteLn('LOCAL_MODULE_PATH := $(gyp_shared_intermediate_dir)')
self.WriteLn('include $(BUILD_%sEXECUTABLE)' % modifier)
else:
self.WriteLn('LOCAL_MODULE_PATH := $(PRODUCT_OUT)/gyp_stamp')
self.WriteLn('LOCAL_UNINSTALLABLE_MODULE := true')
if self.toolset == 'target':
self.WriteLn('LOCAL_2ND_ARCH_VAR_PREFIX := $(GYP_VAR_PREFIX)')
else:
self.WriteLn('LOCAL_2ND_ARCH_VAR_PREFIX := $(GYP_HOST_VAR_PREFIX)')
self.WriteLn()
self.WriteLn('include $(BUILD_SYSTEM)/base_rules.mk')
self.WriteLn()
self.WriteLn('$(LOCAL_BUILT_MODULE): $(LOCAL_ADDITIONAL_DEPENDENCIES)')
self.WriteLn('\t$(hide) echo "Gyp timestamp: $@"')
self.WriteLn('\t$(hide) mkdir -p $(dir $@)')
self.WriteLn('\t$(hide) touch $@')
self.WriteLn()
self.WriteLn('LOCAL_2ND_ARCH_VAR_PREFIX :=')
def WriteList(self, value_list, variable=None, prefix='',
quoter=make.QuoteIfNecessary, local_pathify=False):
"""Write a variable definition that is a list of values.
E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out
foo = blaha blahb
but in a pretty-printed style.
"""
values = ''
if value_list:
value_list = [quoter(prefix + l) for l in value_list]
if local_pathify:
value_list = [self.LocalPathify(l) for l in value_list]
values = ' \\\n\t' + ' \\\n\t'.join(value_list)
self.fp.write('%s :=%s\n\n' % (variable, values))
def WriteLn(self, text=''):
self.fp.write(text + '\n')
def LocalPathify(self, path):
"""Convert a subdirectory-relative path into a normalized path which starts
with the make variable $(LOCAL_PATH) (i.e. the top of the project tree).
Absolute paths, or paths that contain variables, are just normalized."""
if '$(' in path or os.path.isabs(path):
# path is not a file in the project tree in this case, but calling
# normpath is still important for trimming trailing slashes.
return os.path.normpath(path)
local_path = os.path.join('$(LOCAL_PATH)', self.path, path)
local_path = os.path.normpath(local_path)
# Check that normalizing the path didn't ../ itself out of $(LOCAL_PATH)
# - i.e. that the resulting path is still inside the project tree. The
# path may legitimately have ended up containing just $(LOCAL_PATH), though,
# so we don't look for a slash.
assert local_path.startswith('$(LOCAL_PATH)'), (
'Path %s attempts to escape from gyp path %s !)' % (path, self.path))
return local_path
def ExpandInputRoot(self, template, expansion, dirname):
if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template:
return template
path = template % {
'INPUT_ROOT': expansion,
'INPUT_DIRNAME': dirname,
}
return os.path.normpath(path)
def PerformBuild(data, configurations, params):
# The android backend only supports the default configuration.
options = params['options']
makefile = os.path.abspath(os.path.join(options.toplevel_dir,
'GypAndroid.mk'))
env = dict(os.environ)
env['ONE_SHOT_MAKEFILE'] = makefile
arguments = ['make', '-C', os.environ['ANDROID_BUILD_TOP'], 'gyp_all_modules']
print 'Building: %s' % arguments
subprocess.check_call(arguments, env=env)
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
generator_flags = params.get('generator_flags', {})
builddir_name = generator_flags.get('output_dir', 'out')
limit_to_target_all = generator_flags.get('limit_to_target_all', False)
write_alias_targets = generator_flags.get('write_alias_targets', True)
sdk_version = generator_flags.get('aosp_sdk_version', 0)
android_top_dir = os.environ.get('ANDROID_BUILD_TOP')
assert android_top_dir, '$ANDROID_BUILD_TOP not set; you need to run lunch.'
def CalculateMakefilePath(build_file, base_name):
"""Determine where to write a Makefile for a given gyp file."""
# Paths in gyp files are relative to the .gyp file, but we want
# paths relative to the source root for the master makefile. Grab
# the path of the .gyp file as the base to relativize against.
# E.g. "foo/bar" when we're constructing targets for "foo/bar/baz.gyp".
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.depth)
# We write the file in the base_path directory.
output_file = os.path.join(options.depth, base_path, base_name)
assert not options.generator_output, (
'The Android backend does not support options.generator_output.')
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.toplevel_dir)
return base_path, output_file
# TODO: search for the first non-'Default' target. This can go
# away when we add verification that all targets have the
# necessary configurations.
default_configuration = None
toolsets = set([target_dicts[target]['toolset'] for target in target_list])
for target in target_list:
spec = target_dicts[target]
if spec['default_configuration'] != 'Default':
default_configuration = spec['default_configuration']
break
if not default_configuration:
default_configuration = 'Default'
srcdir = '.'
makefile_name = 'GypAndroid' + options.suffix + '.mk'
makefile_path = os.path.join(options.toplevel_dir, makefile_name)
assert not options.generator_output, (
'The Android backend does not support options.generator_output.')
gyp.common.EnsureDirExists(makefile_path)
root_makefile = open(makefile_path, 'w')
root_makefile.write(header)
# We set LOCAL_PATH just once, here, to the top of the project tree. This
# allows all the other paths we use to be relative to the Android.mk file,
# as the Android build system expects.
root_makefile.write('\nLOCAL_PATH := $(call my-dir)\n')
# Find the list of targets that derive from the gyp file(s) being built.
needed_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list, target_dicts, build_file):
needed_targets.add(target)
build_files = set()
include_list = set()
android_modules = {}
for qualified_target in target_list:
build_file, target, toolset = gyp.common.ParseQualifiedTarget(
qualified_target)
relative_build_file = gyp.common.RelativePath(build_file,
options.toplevel_dir)
build_files.add(relative_build_file)
included_files = data[build_file]['included_files']
for included_file in included_files:
# The included_files entries are relative to the dir of the build file
# that included them, so we have to undo that and then make them relative
# to the root dir.
relative_include_file = gyp.common.RelativePath(
gyp.common.UnrelativePath(included_file, build_file),
options.toplevel_dir)
abs_include_file = os.path.abspath(relative_include_file)
# If the include file is from the ~/.gyp dir, we should use absolute path
# so that relocating the src dir doesn't break the path.
if (params['home_dot_gyp'] and
abs_include_file.startswith(params['home_dot_gyp'])):
build_files.add(abs_include_file)
else:
build_files.add(relative_include_file)
base_path, output_file = CalculateMakefilePath(build_file,
target + '.' + toolset + options.suffix + '.mk')
spec = target_dicts[qualified_target]
configs = spec['configurations']
part_of_all = qualified_target in needed_targets
if limit_to_target_all and not part_of_all:
continue
relative_target = gyp.common.QualifiedTarget(relative_build_file, target,
toolset)
writer = AndroidMkWriter(android_top_dir)
android_module = writer.Write(qualified_target, relative_target, base_path,
output_file, spec, configs,
part_of_all=part_of_all,
write_alias_target=write_alias_targets,
sdk_version=sdk_version)
if android_module in android_modules:
print ('ERROR: Android module names must be unique. The following '
'targets both generate Android module name %s.\n %s\n %s' %
(android_module, android_modules[android_module],
qualified_target))
return
android_modules[android_module] = qualified_target
# Our root_makefile lives at the source root. Compute the relative path
# from there to the output_file for including.
mkfile_rel_path = gyp.common.RelativePath(output_file,
os.path.dirname(makefile_path))
include_list.add(mkfile_rel_path)
root_makefile.write('GYP_CONFIGURATION ?= %s\n' % default_configuration)
root_makefile.write('GYP_VAR_PREFIX ?=\n')
root_makefile.write('GYP_HOST_VAR_PREFIX ?=\n')
root_makefile.write('GYP_HOST_MULTILIB ?= first\n')
# Write out the sorted list of includes.
root_makefile.write('\n')
for include_file in sorted(include_list):
root_makefile.write('include $(LOCAL_PATH)/' + include_file + '\n')
root_makefile.write('\n')
if write_alias_targets:
root_makefile.write(ALL_MODULES_FOOTER)
root_makefile.close()
| cc0-1.0 |
rooshilp/CMPUT410W15-project | testenv/lib/python2.7/site-packages/distribute-0.6.24-py2.7.egg/setuptools/tests/doctest.py | 69 | 99730 | # Module doctest.
# Released to the public domain 16-Jan-2001, by Tim Peters (tim@python.org).
# Major enhancements and refactoring by:
# Jim Fulton
# Edward Loper
# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
try:
basestring
except NameError:
basestring = str,unicode
try:
enumerate
except NameError:
def enumerate(seq):
return zip(range(len(seq)),seq)
r"""Module doctest -- a framework for running examples in docstrings.
In simplest use, end each module M to be tested with:
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
Then running the module as a script will cause the examples in the
docstrings to get executed and verified:
python M.py
This won't display anything unless an example fails, in which case the
failing example(s) and the cause(s) of the failure(s) are printed to stdout
(why not stderr? because stderr is a lame hack <0.2 wink>), and the final
line of output is "Test failed.".
Run it with the -v switch instead:
python M.py -v
and a detailed report of all examples tried is printed to stdout, along
with assorted summaries at the end.
You can force verbose mode by passing "verbose=True" to testmod, or prohibit
it by passing "verbose=False". In either of those cases, sys.argv is not
examined by testmod.
There are a variety of other ways to run doctests, including integration
with the unittest framework, and support for running non-Python text
files containing doctests. There are also many ways to override parts
of doctest's default behaviors. See the Library Reference Manual for
details.
"""
__docformat__ = 'reStructuredText en'
__all__ = [
# 0, Option Flags
'register_optionflag',
'DONT_ACCEPT_TRUE_FOR_1',
'DONT_ACCEPT_BLANKLINE',
'NORMALIZE_WHITESPACE',
'ELLIPSIS',
'IGNORE_EXCEPTION_DETAIL',
'COMPARISON_FLAGS',
'REPORT_UDIFF',
'REPORT_CDIFF',
'REPORT_NDIFF',
'REPORT_ONLY_FIRST_FAILURE',
'REPORTING_FLAGS',
# 1. Utility Functions
'is_private',
# 2. Example & DocTest
'Example',
'DocTest',
# 3. Doctest Parser
'DocTestParser',
# 4. Doctest Finder
'DocTestFinder',
# 5. Doctest Runner
'DocTestRunner',
'OutputChecker',
'DocTestFailure',
'UnexpectedException',
'DebugRunner',
# 6. Test Functions
'testmod',
'testfile',
'run_docstring_examples',
# 7. Tester
'Tester',
# 8. Unittest Support
'DocTestSuite',
'DocFileSuite',
'set_unittest_reportflags',
# 9. Debugging Support
'script_from_examples',
'testsource',
'debug_src',
'debug',
]
import __future__
import sys, traceback, inspect, linecache, os, re, types
import unittest, difflib, pdb, tempfile
import warnings
from StringIO import StringIO
# Don't whine about the deprecated is_private function in this
# module's tests.
warnings.filterwarnings("ignore", "is_private", DeprecationWarning,
__name__, 0)
# There are 4 basic classes:
# - Example: a <source, want> pair, plus an intra-docstring line number.
# - DocTest: a collection of examples, parsed from a docstring, plus
# info about where the docstring came from (name, filename, lineno).
# - DocTestFinder: extracts DocTests from a given object's docstring and
# its contained objects' docstrings.
# - DocTestRunner: runs DocTest cases, and accumulates statistics.
#
# So the basic picture is:
#
# list of:
# +------+ +---------+ +-------+
# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|
# +------+ +---------+ +-------+
# | Example |
# | ... |
# | Example |
# +---------+
# Option constants.
OPTIONFLAGS_BY_NAME = {}
def register_optionflag(name):
flag = 1 << len(OPTIONFLAGS_BY_NAME)
OPTIONFLAGS_BY_NAME[name] = flag
return flag
DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1')
DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE')
NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE')
ELLIPSIS = register_optionflag('ELLIPSIS')
IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL')
COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 |
DONT_ACCEPT_BLANKLINE |
NORMALIZE_WHITESPACE |
ELLIPSIS |
IGNORE_EXCEPTION_DETAIL)
REPORT_UDIFF = register_optionflag('REPORT_UDIFF')
REPORT_CDIFF = register_optionflag('REPORT_CDIFF')
REPORT_NDIFF = register_optionflag('REPORT_NDIFF')
REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE')
REPORTING_FLAGS = (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF |
REPORT_ONLY_FIRST_FAILURE)
# Special string markers for use in `want` strings:
BLANKLINE_MARKER = '<BLANKLINE>'
ELLIPSIS_MARKER = '...'
######################################################################
## Table of Contents
######################################################################
# 1. Utility Functions
# 2. Example & DocTest -- store test cases
# 3. DocTest Parser -- extracts examples from strings
# 4. DocTest Finder -- extracts test cases from objects
# 5. DocTest Runner -- runs test cases
# 6. Test Functions -- convenient wrappers for testing
# 7. Tester Class -- for backwards compatibility
# 8. Unittest Support
# 9. Debugging Support
# 10. Example Usage
######################################################################
## 1. Utility Functions
######################################################################
def is_private(prefix, base):
"""prefix, base -> true iff name prefix + "." + base is "private".
Prefix may be an empty string, and base does not contain a period.
Prefix is ignored (although functions you write conforming to this
protocol may make use of it).
Return true iff base begins with an (at least one) underscore, but
does not both begin and end with (at least) two underscores.
>>> is_private("a.b", "my_func")
False
>>> is_private("____", "_my_func")
True
>>> is_private("someclass", "__init__")
False
>>> is_private("sometypo", "__init_")
True
>>> is_private("x.y.z", "_")
True
>>> is_private("_x.y.z", "__")
False
>>> is_private("", "") # senseless but consistent
False
"""
warnings.warn("is_private is deprecated; it wasn't useful; "
"examine DocTestFinder.find() lists instead",
DeprecationWarning, stacklevel=2)
return base[:1] == "_" and not base[:2] == "__" == base[-2:]
def _extract_future_flags(globs):
"""
Return the compiler-flags associated with the future features that
have been imported into the given namespace (globs).
"""
flags = 0
for fname in __future__.all_feature_names:
feature = globs.get(fname, None)
if feature is getattr(__future__, fname):
flags |= feature.compiler_flag
return flags
def _normalize_module(module, depth=2):
"""
Return the module specified by `module`. In particular:
- If `module` is a module, then return module.
- If `module` is a string, then import and return the
module with that name.
- If `module` is None, then return the calling module.
The calling module is assumed to be the module of
the stack frame at the given depth in the call stack.
"""
if inspect.ismodule(module):
return module
elif isinstance(module, (str, unicode)):
return __import__(module, globals(), locals(), ["*"])
elif module is None:
return sys.modules[sys._getframe(depth).f_globals['__name__']]
else:
raise TypeError("Expected a module, string, or None")
def _indent(s, indent=4):
"""
Add the given number of space characters to the beginning every
non-blank line in `s`, and return the result.
"""
# This regexp matches the start of non-blank lines:
return re.sub('(?m)^(?!$)', indent*' ', s)
def _exception_traceback(exc_info):
"""
Return a string containing a traceback message for the given
exc_info tuple (as returned by sys.exc_info()).
"""
# Get a traceback message.
excout = StringIO()
exc_type, exc_val, exc_tb = exc_info
traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
return excout.getvalue()
# Override some StringIO methods.
class _SpoofOut(StringIO):
def getvalue(self):
result = StringIO.getvalue(self)
# If anything at all was written, make sure there's a trailing
# newline. There's no way for the expected output to indicate
# that a trailing newline is missing.
if result and not result.endswith("\n"):
result += "\n"
# Prevent softspace from screwing up the next test case, in
# case they used print with a trailing comma in an example.
if hasattr(self, "softspace"):
del self.softspace
return result
def truncate(self, size=None):
StringIO.truncate(self, size)
if hasattr(self, "softspace"):
del self.softspace
# Worst-case linear-time ellipsis matching.
def _ellipsis_match(want, got):
"""
Essentially the only subtle case:
>>> _ellipsis_match('aa...aa', 'aaa')
False
"""
if want.find(ELLIPSIS_MARKER)==-1:
return want == got
# Find "the real" strings.
ws = want.split(ELLIPSIS_MARKER)
assert len(ws) >= 2
# Deal with exact matches possibly needed at one or both ends.
startpos, endpos = 0, len(got)
w = ws[0]
if w: # starts with exact match
if got.startswith(w):
startpos = len(w)
del ws[0]
else:
return False
w = ws[-1]
if w: # ends with exact match
if got.endswith(w):
endpos -= len(w)
del ws[-1]
else:
return False
if startpos > endpos:
# Exact end matches required more characters than we have, as in
# _ellipsis_match('aa...aa', 'aaa')
return False
# For the rest, we only need to find the leftmost non-overlapping
# match for each piece. If there's no overall match that way alone,
# there's no overall match period.
for w in ws:
# w may be '' at times, if there are consecutive ellipses, or
# due to an ellipsis at the start or end of `want`. That's OK.
# Search for an empty string succeeds, and doesn't change startpos.
startpos = got.find(w, startpos, endpos)
if startpos < 0:
return False
startpos += len(w)
return True
def _comment_line(line):
"Return a commented form of the given line"
line = line.rstrip()
if line:
return '# '+line
else:
return '#'
class _OutputRedirectingPdb(pdb.Pdb):
"""
A specialized version of the python debugger that redirects stdout
to a given stream when interacting with the user. Stdout is *not*
redirected when traced code is executed.
"""
def __init__(self, out):
self.__out = out
pdb.Pdb.__init__(self)
def trace_dispatch(self, *args):
# Redirect stdout to the given stream.
save_stdout = sys.stdout
sys.stdout = self.__out
# Call Pdb's trace dispatch method.
try:
return pdb.Pdb.trace_dispatch(self, *args)
finally:
sys.stdout = save_stdout
# [XX] Normalize with respect to os.path.pardir?
def _module_relative_path(module, path):
if not inspect.ismodule(module):
raise TypeError, 'Expected a module: %r' % module
if path.startswith('/'):
raise ValueError, 'Module-relative files may not have absolute paths'
# Find the base directory for the path.
if hasattr(module, '__file__'):
# A normal module/package
basedir = os.path.split(module.__file__)[0]
elif module.__name__ == '__main__':
# An interactive session.
if len(sys.argv)>0 and sys.argv[0] != '':
basedir = os.path.split(sys.argv[0])[0]
else:
basedir = os.curdir
else:
# A module w/o __file__ (this includes builtins)
raise ValueError("Can't resolve paths relative to the module " +
module + " (it has no __file__)")
# Combine the base directory and the path.
return os.path.join(basedir, *(path.split('/')))
######################################################################
## 2. Example & DocTest
######################################################################
## - An "example" is a <source, want> pair, where "source" is a
## fragment of source code, and "want" is the expected output for
## "source." The Example class also includes information about
## where the example was extracted from.
##
## - A "doctest" is a collection of examples, typically extracted from
## a string (such as an object's docstring). The DocTest class also
## includes information about where the string was extracted from.
class Example:
"""
A single doctest example, consisting of source code and expected
output. `Example` defines the following attributes:
- source: A single Python statement, always ending with a newline.
The constructor adds a newline if needed.
- want: The expected output from running the source code (either
from stdout, or a traceback in case of exception). `want` ends
with a newline unless it's empty, in which case it's an empty
string. The constructor adds a newline if needed.
- exc_msg: The exception message generated by the example, if
the example is expected to generate an exception; or `None` if
it is not expected to generate an exception. This exception
message is compared against the return value of
`traceback.format_exception_only()`. `exc_msg` ends with a
newline unless it's `None`. The constructor adds a newline
if needed.
- lineno: The line number within the DocTest string containing
this Example where the Example begins. This line number is
zero-based, with respect to the beginning of the DocTest.
- indent: The example's indentation in the DocTest string.
I.e., the number of space characters that preceed the
example's first prompt.
- options: A dictionary mapping from option flags to True or
False, which is used to override default options for this
example. Any option flags not contained in this dictionary
are left at their default value (as specified by the
DocTestRunner's optionflags). By default, no options are set.
"""
def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
options=None):
# Normalize inputs.
if not source.endswith('\n'):
source += '\n'
if want and not want.endswith('\n'):
want += '\n'
if exc_msg is not None and not exc_msg.endswith('\n'):
exc_msg += '\n'
# Store properties.
self.source = source
self.want = want
self.lineno = lineno
self.indent = indent
if options is None: options = {}
self.options = options
self.exc_msg = exc_msg
class DocTest:
"""
A collection of doctest examples that should be run in a single
namespace. Each `DocTest` defines the following attributes:
- examples: the list of examples.
- globs: The namespace (aka globals) that the examples should
be run in.
- name: A name identifying the DocTest (typically, the name of
the object whose docstring this DocTest was extracted from).
- filename: The name of the file that this DocTest was extracted
from, or `None` if the filename is unknown.
- lineno: The line number within filename where this DocTest
begins, or `None` if the line number is unavailable. This
line number is zero-based, with respect to the beginning of
the file.
- docstring: The string that the examples were extracted from,
or `None` if the string is unavailable.
"""
def __init__(self, examples, globs, name, filename, lineno, docstring):
"""
Create a new DocTest containing the given examples. The
DocTest's globals are initialized with a copy of `globs`.
"""
assert not isinstance(examples, basestring), \
"DocTest no longer accepts str; use DocTestParser instead"
self.examples = examples
self.docstring = docstring
self.globs = globs.copy()
self.name = name
self.filename = filename
self.lineno = lineno
def __repr__(self):
if len(self.examples) == 0:
examples = 'no examples'
elif len(self.examples) == 1:
examples = '1 example'
else:
examples = '%d examples' % len(self.examples)
return ('<DocTest %s from %s:%s (%s)>' %
(self.name, self.filename, self.lineno, examples))
# This lets us sort tests by name:
def __cmp__(self, other):
if not isinstance(other, DocTest):
return -1
return cmp((self.name, self.filename, self.lineno, id(self)),
(other.name, other.filename, other.lineno, id(other)))
######################################################################
## 3. DocTestParser
######################################################################
class DocTestParser:
"""
A class used to parse strings containing doctest examples.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
.*$\n? # But any other line
)*)
''', re.MULTILINE | re.VERBOSE)
# A regular expression for handling `want` strings that contain
# expected exceptions. It divides `want` into three pieces:
# - the traceback header line (`hdr`)
# - the traceback stack (`stack`)
# - the exception message (`msg`), as generated by
# traceback.format_exception_only()
# `msg` may have multiple lines. We assume/require that the
# exception message is the first non-indented line starting with a word
# character following the traceback header line.
_EXCEPTION_RE = re.compile(r"""
# Grab the traceback header. Different versions of Python have
# said different things on the first traceback line.
^(?P<hdr> Traceback\ \(
(?: most\ recent\ call\ last
| innermost\ last
) \) :
)
\s* $ # toss trailing whitespace on the header.
(?P<stack> .*?) # don't blink: absorb stuff until...
^ (?P<msg> \w+ .*) # a line *starts* with alphanum.
""", re.VERBOSE | re.MULTILINE | re.DOTALL)
# A callable returning a true value iff its argument is a blank line
# or contains a single comment.
_IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match
def parse(self, string, name='<string>'):
"""
Divide the given string into examples and intervening text,
and return them as a list of alternating Examples and strings.
Line numbers for the Examples are 0-based. The optional
argument `name` is a name identifying this string, and is only
used for error messages.
"""
string = string.expandtabs()
# If all lines begin with the same indentation, then strip it.
min_indent = self._min_indent(string)
if min_indent > 0:
string = '\n'.join([l[min_indent:] for l in string.split('\n')])
output = []
charno, lineno = 0, 0
# Find all doctest examples in the string:
for m in self._EXAMPLE_RE.finditer(string):
# Add the pre-example text to `output`.
output.append(string[charno:m.start()])
# Update lineno (lines before this example)
lineno += string.count('\n', charno, m.start())
# Extract info from the regexp match.
(source, options, want, exc_msg) = \
self._parse_example(m, name, lineno)
# Create an Example, and add it to the list.
if not self._IS_BLANK_OR_COMMENT(source):
output.append( Example(source, want, exc_msg,
lineno=lineno,
indent=min_indent+len(m.group('indent')),
options=options) )
# Update lineno (lines inside this example)
lineno += string.count('\n', m.start(), m.end())
# Update charno.
charno = m.end()
# Add any remaining post-example text to `output`.
output.append(string[charno:])
return output
def get_doctest(self, string, globs, name, filename, lineno):
"""
Extract all doctest examples from the given string, and
collect them into a `DocTest` object.
`globs`, `name`, `filename`, and `lineno` are attributes for
the new `DocTest` object. See the documentation for `DocTest`
for more information.
"""
return DocTest(self.get_examples(string, name), globs,
name, filename, lineno, string)
def get_examples(self, string, name='<string>'):
"""
Extract all doctest examples from the given string, and return
them as a list of `Example` objects. Line numbers are
0-based, because it's most common in doctests that nothing
interesting appears on the same line as opening triple-quote,
and so the first interesting line is called \"line 1\" then.
The optional argument `name` is a name identifying this
string, and is only used for error messages.
"""
return [x for x in self.parse(string, name)
if isinstance(x, Example)]
def _parse_example(self, m, name, lineno):
"""
Given a regular expression match from `_EXAMPLE_RE` (`m`),
return a pair `(source, want)`, where `source` is the matched
example's source code (with prompts and indentation stripped);
and `want` is the example's expected output (with indentation
stripped).
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
# Get the example's indentation level.
indent = len(m.group('indent'))
# Divide source into lines; check that they're properly
# indented; and then strip their indentation & prompts.
source_lines = m.group('source').split('\n')
self._check_prompt_blank(source_lines, indent, name, lineno)
self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)
source = '\n'.join([sl[indent+4:] for sl in source_lines])
# Divide want into lines; check that it's properly indented; and
# then strip the indentation. Spaces before the last newline should
# be preserved, so plain rstrip() isn't good enough.
want = m.group('want')
want_lines = want.split('\n')
if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
del want_lines[-1] # forget final newline & spaces after it
self._check_prefix(want_lines, ' '*indent, name,
lineno + len(source_lines))
want = '\n'.join([wl[indent:] for wl in want_lines])
# If `want` contains a traceback message, then extract it.
m = self._EXCEPTION_RE.match(want)
if m:
exc_msg = m.group('msg')
else:
exc_msg = None
# Extract options from the source.
options = self._find_options(source, name, lineno)
return source, options, want, exc_msg
# This regular expression looks for option directives in the
# source code of an example. Option directives are comments
# starting with "doctest:". Warning: this may give false
# positives for string-literals that contain the string
# "#doctest:". Eliminating these false positives would require
# actually parsing the string; but we limit them by ignoring any
# line containing "#doctest:" that is *followed* by a quote mark.
_OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',
re.MULTILINE)
def _find_options(self, source, name, lineno):
"""
Return a dictionary containing option overrides extracted from
option directives in the given source string.
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
options = {}
# (note: with the current regexp, this will match at most once:)
for m in self._OPTION_DIRECTIVE_RE.finditer(source):
option_strings = m.group(1).replace(',', ' ').split()
for option in option_strings:
if (option[0] not in '+-' or
option[1:] not in OPTIONFLAGS_BY_NAME):
raise ValueError('line %r of the doctest for %s '
'has an invalid option: %r' %
(lineno+1, name, option))
flag = OPTIONFLAGS_BY_NAME[option[1:]]
options[flag] = (option[0] == '+')
if options and self._IS_BLANK_OR_COMMENT(source):
raise ValueError('line %r of the doctest for %s has an option '
'directive on a line with no example: %r' %
(lineno, name, source))
return options
# This regular expression finds the indentation of every non-blank
# line in a string.
_INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE)
def _min_indent(self, s):
"Return the minimum indentation of any non-blank line in `s`"
indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
if len(indents) > 0:
return min(indents)
else:
return 0
def _check_prompt_blank(self, lines, indent, name, lineno):
"""
Given the lines of a source string (including prompts and
leading indentation), check to make sure that every prompt is
followed by a space character. If any line is not followed by
a space character, then raise ValueError.
"""
for i, line in enumerate(lines):
if len(line) >= indent+4 and line[indent+3] != ' ':
raise ValueError('line %r of the docstring for %s '
'lacks blank after %s: %r' %
(lineno+i+1, name,
line[indent:indent+3], line))
def _check_prefix(self, lines, prefix, name, lineno):
"""
Check that every line in the given list starts with the given
prefix; if any line does not, then raise a ValueError.
"""
for i, line in enumerate(lines):
if line and not line.startswith(prefix):
raise ValueError('line %r of the docstring for %s has '
'inconsistent leading whitespace: %r' %
(lineno+i+1, name, line))
######################################################################
## 4. DocTest Finder
######################################################################
class DocTestFinder:
"""
A class used to extract the DocTests that are relevant to a given
object, from its docstring and the docstrings of its contained
objects. Doctests can currently be extracted from the following
object types: modules, functions, classes, methods, staticmethods,
classmethods, and properties.
"""
def __init__(self, verbose=False, parser=DocTestParser(),
recurse=True, _namefilter=None, exclude_empty=True):
"""
Create a new doctest finder.
The optional argument `parser` specifies a class or
function that should be used to create new DocTest objects (or
objects that implement the same interface as DocTest). The
signature for this factory function should match the signature
of the DocTest constructor.
If the optional argument `recurse` is false, then `find` will
only examine the given object, and not any contained objects.
If the optional argument `exclude_empty` is false, then `find`
will include tests for objects with empty docstrings.
"""
self._parser = parser
self._verbose = verbose
self._recurse = recurse
self._exclude_empty = exclude_empty
# _namefilter is undocumented, and exists only for temporary backward-
# compatibility support of testmod's deprecated isprivate mess.
self._namefilter = _namefilter
def find(self, obj, name=None, module=None, globs=None,
extraglobs=None):
"""
Return a list of the DocTests that are defined by the given
object's docstring, or by any of its contained objects'
docstrings.
The optional parameter `module` is the module that contains
the given object. If the module is not specified or is None, then
the test finder will attempt to automatically determine the
correct module. The object's module is used:
- As a default namespace, if `globs` is not specified.
- To prevent the DocTestFinder from extracting DocTests
from objects that are imported from other modules.
- To find the name of the file containing the object.
- To help find the line number of the object within its
file.
Contained objects whose module does not match `module` are ignored.
If `module` is False, no attempt to find the module will be made.
This is obscure, of use mostly in tests: if `module` is False, or
is None but cannot be found automatically, then all objects are
considered to belong to the (non-existent) module, so all contained
objects will (recursively) be searched for doctests.
The globals for each DocTest is formed by combining `globs`
and `extraglobs` (bindings in `extraglobs` override bindings
in `globs`). A new copy of the globals dictionary is created
for each DocTest. If `globs` is not specified, then it
defaults to the module's `__dict__`, if specified, or {}
otherwise. If `extraglobs` is not specified, then it defaults
to {}.
"""
# If name was not specified, then extract it from the object.
if name is None:
name = getattr(obj, '__name__', None)
if name is None:
raise ValueError("DocTestFinder.find: name must be given "
"when obj.__name__ doesn't exist: %r" %
(type(obj),))
# Find the module that contains the given object (if obj is
# a module, then module=obj.). Note: this may fail, in which
# case module will be None.
if module is False:
module = None
elif module is None:
module = inspect.getmodule(obj)
# Read the module's source code. This is used by
# DocTestFinder._find_lineno to find the line number for a
# given object's docstring.
try:
file = inspect.getsourcefile(obj) or inspect.getfile(obj)
source_lines = linecache.getlines(file)
if not source_lines:
source_lines = None
except TypeError:
source_lines = None
# Initialize globals, and merge in extraglobs.
if globs is None:
if module is None:
globs = {}
else:
globs = module.__dict__.copy()
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
# Recursively expore `obj`, extracting DocTests.
tests = []
self._find(tests, obj, name, module, source_lines, globs, {})
return tests
def _filter(self, obj, prefix, base):
"""
Return true if the given object should not be examined.
"""
return (self._namefilter is not None and
self._namefilter(prefix, base))
def _from_module(self, module, object):
"""
Return true if the given object is defined in the given
module.
"""
if module is None:
return True
elif inspect.isfunction(object):
return module.__dict__ is object.func_globals
elif inspect.isclass(object):
return module.__name__ == object.__module__
elif inspect.getmodule(object) is not None:
return module is inspect.getmodule(object)
elif hasattr(object, '__module__'):
return module.__name__ == object.__module__
elif isinstance(object, property):
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function")
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to `tests`.
"""
if self._verbose:
print 'Finding tests in %s' % name
# If we've already processed this object, then ignore it.
if id(obj) in seen:
return
seen[id(obj)] = 1
# Find a test for this object, and add it to the list of tests.
test = self._get_test(obj, name, module, globs, source_lines)
if test is not None:
tests.append(test)
# Look for tests in a module's contained objects.
if inspect.ismodule(obj) and self._recurse:
for valname, val in obj.__dict__.items():
# Check if this contained object should be ignored.
if self._filter(val, name, valname):
continue
valname = '%s.%s' % (name, valname)
# Recurse to functions & classes.
if ((inspect.isfunction(val) or inspect.isclass(val)) and
self._from_module(module, val)):
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a module's __test__ dictionary.
if inspect.ismodule(obj) and self._recurse:
for valname, val in getattr(obj, '__test__', {}).items():
if not isinstance(valname, basestring):
raise ValueError("DocTestFinder.find: __test__ keys "
"must be strings: %r" %
(type(valname),))
if not (inspect.isfunction(val) or inspect.isclass(val) or
inspect.ismethod(val) or inspect.ismodule(val) or
isinstance(val, basestring)):
raise ValueError("DocTestFinder.find: __test__ values "
"must be strings, functions, methods, "
"classes, or modules: %r" %
(type(val),))
valname = '%s.__test__.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if inspect.isclass(obj) and self._recurse:
for valname, val in obj.__dict__.items():
# Check if this contained object should be ignored.
if self._filter(val, name, valname):
continue
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).im_func
# Recurse to methods, properties, and nested classes.
if ((inspect.isfunction(val) or inspect.isclass(val) or
isinstance(val, property)) and
self._from_module(module, val)):
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
def _get_test(self, obj, name, module, globs, source_lines):
"""
Return a DocTest for the given object, if it defines a docstring;
otherwise, return None.
"""
# Extract the object's docstring. If it doesn't have one,
# then return None (no test for this object).
if isinstance(obj, basestring):
docstring = obj
else:
try:
if obj.__doc__ is None:
docstring = ''
else:
docstring = obj.__doc__
if not isinstance(docstring, basestring):
docstring = str(docstring)
except (TypeError, AttributeError):
docstring = ''
# Find the docstring's location in the file.
lineno = self._find_lineno(obj, source_lines)
# Don't bother if the docstring is empty.
if self._exclude_empty and not docstring:
return None
# Return a DocTest for this object.
if module is None:
filename = None
else:
filename = getattr(module, '__file__', module.__name__)
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
return self._parser.get_doctest(docstring, globs, name,
filename, lineno)
def _find_lineno(self, obj, source_lines):
"""
Return a line number of the given object's docstring. Note:
this method assumes that the object has a docstring.
"""
lineno = None
# Find the line number for modules.
if inspect.ismodule(obj):
lineno = 0
# Find the line number for classes.
# Note: this could be fooled if a class is defined multiple
# times in a single file.
if inspect.isclass(obj):
if source_lines is None:
return None
pat = re.compile(r'^\s*class\s*%s\b' %
getattr(obj, '__name__', '-'))
for i, line in enumerate(source_lines):
if pat.match(line):
lineno = i
break
# Find the line number for functions & methods.
if inspect.ismethod(obj): obj = obj.im_func
if inspect.isfunction(obj): obj = obj.func_code
if inspect.istraceback(obj): obj = obj.tb_frame
if inspect.isframe(obj): obj = obj.f_code
if inspect.iscode(obj):
lineno = getattr(obj, 'co_firstlineno', None)-1
# Find the line number where the docstring starts. Assume
# that it's the first line that begins with a quote mark.
# Note: this could be fooled by a multiline function
# signature, where a continuation line begins with a quote
# mark.
if lineno is not None:
if source_lines is None:
return lineno+1
pat = re.compile('(^|.*:)\s*\w*("|\')')
for lineno in range(lineno, len(source_lines)):
if pat.match(source_lines[lineno]):
return lineno
# We couldn't find the line number.
return None
######################################################################
## 5. DocTest Runner
######################################################################
class DocTestRunner:
"""
A class used to run DocTest test cases, and accumulate statistics.
The `run` method is used to process a single DocTest case. It
returns a tuple `(f, t)`, where `t` is the number of test cases
tried, and `f` is the number of test cases that failed.
>>> tests = DocTestFinder().find(_TestClass)
>>> runner = DocTestRunner(verbose=False)
>>> for test in tests:
... print runner.run(test)
(0, 2)
(0, 1)
(0, 2)
(0, 2)
The `summarize` method prints a summary of all the test cases that
have been run by the runner, and returns an aggregated `(f, t)`
tuple:
>>> runner.summarize(verbose=1)
4 items passed all tests:
2 tests in _TestClass
2 tests in _TestClass.__init__
2 tests in _TestClass.get
1 tests in _TestClass.square
7 tests in 4 items.
7 passed and 0 failed.
Test passed.
(0, 7)
The aggregated number of tried examples and failed examples is
also available via the `tries` and `failures` attributes:
>>> runner.tries
7
>>> runner.failures
0
The comparison between expected outputs and actual outputs is done
by an `OutputChecker`. This comparison may be customized with a
number of option flags; see the documentation for `testmod` for
more information. If the option flags are insufficient, then the
comparison may also be customized by passing a subclass of
`OutputChecker` to the constructor.
The test runner's display output can be controlled in two ways.
First, an output function (`out) can be passed to
`TestRunner.run`; this function will be called with strings that
should be displayed. It defaults to `sys.stdout.write`. If
capturing the output is not sufficient, then the display output
can be also customized by subclassing DocTestRunner, and
overriding the methods `report_start`, `report_success`,
`report_unexpected_exception`, and `report_failure`.
"""
# This divider string is used to separate failure messages, and to
# separate sections of the summary.
DIVIDER = "*" * 70
def __init__(self, checker=None, verbose=None, optionflags=0):
"""
Create a new test runner.
Optional keyword arg `checker` is the `OutputChecker` that
should be used to compare the expected outputs and actual
outputs of doctest examples.
Optional keyword arg 'verbose' prints lots of stuff if true,
only failures if false; by default, it's true iff '-v' is in
sys.argv.
Optional argument `optionflags` can be used to control how the
test runner compares expected output to actual output, and how
it displays failures. See the documentation for `testmod` for
more information.
"""
self._checker = checker or OutputChecker()
if verbose is None:
verbose = '-v' in sys.argv
self._verbose = verbose
self.optionflags = optionflags
self.original_optionflags = optionflags
# Keep track of the examples we've run.
self.tries = 0
self.failures = 0
self._name2ft = {}
# Create a fake output target for capturing doctest output.
self._fakeout = _SpoofOut()
#/////////////////////////////////////////////////////////////////
# Reporting methods
#/////////////////////////////////////////////////////////////////
def report_start(self, out, test, example):
"""
Report that the test runner is about to process the given
example. (Only displays a message if verbose=True)
"""
if self._verbose:
if example.want:
out('Trying:\n' + _indent(example.source) +
'Expecting:\n' + _indent(example.want))
else:
out('Trying:\n' + _indent(example.source) +
'Expecting nothing\n')
def report_success(self, out, test, example, got):
"""
Report that the given example ran successfully. (Only
displays a message if verbose=True)
"""
if self._verbose:
out("ok\n")
def report_failure(self, out, test, example, got):
"""
Report that the given example failed.
"""
out(self._failure_header(test, example) +
self._checker.output_difference(example, got, self.optionflags))
def report_unexpected_exception(self, out, test, example, exc_info):
"""
Report that the given example raised an unexpected exception.
"""
out(self._failure_header(test, example) +
'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
def _failure_header(self, test, example):
out = [self.DIVIDER]
if test.filename:
if test.lineno is not None and example.lineno is not None:
lineno = test.lineno + example.lineno + 1
else:
lineno = '?'
out.append('File "%s", line %s, in %s' %
(test.filename, lineno, test.name))
else:
out.append('Line %s, in %s' % (example.lineno+1, test.name))
out.append('Failed example:')
source = example.source
out.append(_indent(source))
return '\n'.join(out)
#/////////////////////////////////////////////////////////////////
# DocTest Running
#/////////////////////////////////////////////////////////////////
def __run(self, test, compileflags, out):
"""
Run the examples in `test`. Write the outcome of each example
with one of the `DocTestRunner.report_*` methods, using the
writer function `out`. `compileflags` is the set of compiler
flags that should be used to execute examples. Return a tuple
`(f, t)`, where `t` is the number of examples tried, and `f`
is the number of examples that failed. The examples are run
in the namespace `test.globs`.
"""
# Keep track of the number of failures and tries.
failures = tries = 0
# Save the option flags (since option directives can be used
# to modify them).
original_optionflags = self.optionflags
SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
check = self._checker.check_output
# Process each example.
for examplenum, example in enumerate(test.examples):
# If REPORT_ONLY_FIRST_FAILURE is set, then supress
# reporting after the first failure.
quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
failures > 0)
# Merge in the example's options.
self.optionflags = original_optionflags
if example.options:
for (optionflag, val) in example.options.items():
if val:
self.optionflags |= optionflag
else:
self.optionflags &= ~optionflag
# Record that we started this example.
tries += 1
if not quiet:
self.report_start(out, test, example)
# Use a special filename for compile(), so we can retrieve
# the source code during interactive debugging (see
# __patched_linecache_getlines).
filename = '<doctest %s[%d]>' % (test.name, examplenum)
# Run the example in the given context (globs), and record
# any exception that gets raised. (But don't intercept
# keyboard interrupts.)
try:
# Don't blink! This is where the user's code gets run.
exec compile(example.source, filename, "single",
compileflags, 1) in test.globs
self.debugger.set_continue() # ==== Example Finished ====
exception = None
except KeyboardInterrupt:
raise
except:
exception = sys.exc_info()
self.debugger.set_continue() # ==== Example Finished ====
got = self._fakeout.getvalue() # the actual output
self._fakeout.truncate(0)
outcome = FAILURE # guilty until proved innocent or insane
# If the example executed without raising any exceptions,
# verify its output.
if exception is None:
if check(example.want, got, self.optionflags):
outcome = SUCCESS
# The example raised an exception: check if it was expected.
else:
exc_info = sys.exc_info()
exc_msg = traceback.format_exception_only(*exc_info[:2])[-1]
if not quiet:
got += _exception_traceback(exc_info)
# If `example.exc_msg` is None, then we weren't expecting
# an exception.
if example.exc_msg is None:
outcome = BOOM
# We expected an exception: see whether it matches.
elif check(example.exc_msg, exc_msg, self.optionflags):
outcome = SUCCESS
# Another chance if they didn't care about the detail.
elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
m1 = re.match(r'[^:]*:', example.exc_msg)
m2 = re.match(r'[^:]*:', exc_msg)
if m1 and m2 and check(m1.group(0), m2.group(0),
self.optionflags):
outcome = SUCCESS
# Report the outcome.
if outcome is SUCCESS:
if not quiet:
self.report_success(out, test, example, got)
elif outcome is FAILURE:
if not quiet:
self.report_failure(out, test, example, got)
failures += 1
elif outcome is BOOM:
if not quiet:
self.report_unexpected_exception(out, test, example,
exc_info)
failures += 1
else:
assert False, ("unknown outcome", outcome)
# Restore the option flags (in case they were modified)
self.optionflags = original_optionflags
# Record and return the number of failures and tries.
self.__record_outcome(test, failures, tries)
return failures, tries
def __record_outcome(self, test, f, t):
"""
Record the fact that the given DocTest (`test`) generated `f`
failures out of `t` tried examples.
"""
f2, t2 = self._name2ft.get(test.name, (0,0))
self._name2ft[test.name] = (f+f2, t+t2)
self.failures += f
self.tries += t
__LINECACHE_FILENAME_RE = re.compile(r'<doctest '
r'(?P<name>[\w\.]+)'
r'\[(?P<examplenum>\d+)\]>$')
def __patched_linecache_getlines(self, filename, module_globals=None):
m = self.__LINECACHE_FILENAME_RE.match(filename)
if m and m.group('name') == self.test.name:
example = self.test.examples[int(m.group('examplenum'))]
return example.source.splitlines(True)
elif self.save_linecache_getlines.func_code.co_argcount>1:
return self.save_linecache_getlines(filename, module_globals)
else:
return self.save_linecache_getlines(filename)
def run(self, test, compileflags=None, out=None, clear_globs=True):
"""
Run the examples in `test`, and display the results using the
writer function `out`.
The examples are run in the namespace `test.globs`. If
`clear_globs` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use `clear_globs=False`.
`compileflags` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to `globs`.
The output of each example is checked using
`DocTestRunner.check_output`, and the results are formatted by
the `DocTestRunner.report_*` methods.
"""
self.test = test
if compileflags is None:
compileflags = _extract_future_flags(test.globs)
save_stdout = sys.stdout
if out is None:
out = save_stdout.write
sys.stdout = self._fakeout
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
# Note that the interactive output will go to *our*
# save_stdout, even if that's not the real sys.stdout; this
# allows us to write test cases for the set_trace behavior.
save_set_trace = pdb.set_trace
self.debugger = _OutputRedirectingPdb(save_stdout)
self.debugger.reset()
pdb.set_trace = self.debugger.set_trace
# Patch linecache.getlines, so we can see the example's source
# when we're inside the debugger.
self.save_linecache_getlines = linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
try:
return self.__run(test, compileflags, out)
finally:
sys.stdout = save_stdout
pdb.set_trace = save_set_trace
linecache.getlines = self.save_linecache_getlines
if clear_globs:
test.globs.clear()
#/////////////////////////////////////////////////////////////////
# Summarization
#/////////////////////////////////////////////////////////////////
def summarize(self, verbose=None):
"""
Print a summary of all the test cases that have been run by
this DocTestRunner, and return a tuple `(f, t)`, where `f` is
the total number of failed examples, and `t` is the total
number of tried examples.
The optional `verbose` argument controls how detailed the
summary is. If the verbosity is not specified, then the
DocTestRunner's verbosity is used.
"""
if verbose is None:
verbose = self._verbose
notests = []
passed = []
failed = []
totalt = totalf = 0
for x in self._name2ft.items():
name, (f, t) = x
assert f <= t
totalt += t
totalf += f
if t == 0:
notests.append(name)
elif f == 0:
passed.append( (name, t) )
else:
failed.append(x)
if verbose:
if notests:
print len(notests), "items had no tests:"
notests.sort()
for thing in notests:
print " ", thing
if passed:
print len(passed), "items passed all tests:"
passed.sort()
for thing, count in passed:
print " %3d tests in %s" % (count, thing)
if failed:
print self.DIVIDER
print len(failed), "items had failures:"
failed.sort()
for thing, (f, t) in failed:
print " %3d of %3d in %s" % (f, t, thing)
if verbose:
print totalt, "tests in", len(self._name2ft), "items."
print totalt - totalf, "passed and", totalf, "failed."
if totalf:
print "***Test Failed***", totalf, "failures."
elif verbose:
print "Test passed."
return totalf, totalt
#/////////////////////////////////////////////////////////////////
# Backward compatibility cruft to maintain doctest.master.
#/////////////////////////////////////////////////////////////////
def merge(self, other):
d = self._name2ft
for name, (f, t) in other._name2ft.items():
if name in d:
print "*** DocTestRunner.merge: '" + name + "' in both" \
" testers; summing outcomes."
f2, t2 = d[name]
f = f + f2
t = t + t2
d[name] = f, t
class OutputChecker:
"""
A class used to check the whether the actual output from a doctest
example matches the expected output. `OutputChecker` defines two
methods: `check_output`, which compares a given pair of outputs,
and returns true if they match; and `output_difference`, which
returns a string describing the differences between two outputs.
"""
def check_output(self, want, got, optionflags):
"""
Return True iff the actual output from an example (`got`)
matches the expected output (`want`). These strings are
always considered to match if they are identical; but
depending on what option flags the test runner is using,
several non-exact match types are also possible. See the
documentation for `TestRunner` for more information about
option flags.
"""
# Handle the common case first, for efficiency:
# if they're string-identical, always return true.
if got == want:
return True
# The values True and False replaced 1 and 0 as the return
# value for boolean comparisons in Python 2.3.
if not (optionflags & DONT_ACCEPT_TRUE_FOR_1):
if (got,want) == ("True\n", "1\n"):
return True
if (got,want) == ("False\n", "0\n"):
return True
# <BLANKLINE> can be used as a special sequence to signify a
# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
# Replace <BLANKLINE> in want with a blank line.
want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
'', want)
# If a line in got contains only spaces, then remove the
# spaces.
got = re.sub('(?m)^\s*?$', '', got)
if got == want:
return True
# This flag causes doctest to ignore any differences in the
# contents of whitespace strings. Note that this can be used
# in conjunction with the ELLIPSIS flag.
if optionflags & NORMALIZE_WHITESPACE:
got = ' '.join(got.split())
want = ' '.join(want.split())
if got == want:
return True
# The ELLIPSIS flag says to let the sequence "..." in `want`
# match any substring in `got`.
if optionflags & ELLIPSIS:
if _ellipsis_match(want, got):
return True
# We didn't find any match; return false.
return False
# Should we do a fancy diff?
def _do_a_fancy_diff(self, want, got, optionflags):
# Not unless they asked for a fancy diff.
if not optionflags & (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF):
return False
# If expected output uses ellipsis, a meaningful fancy diff is
# too hard ... or maybe not. In two real-life failures Tim saw,
# a diff was a major help anyway, so this is commented out.
# [todo] _ellipsis_match() knows which pieces do and don't match,
# and could be the basis for a kick-ass diff in this case.
##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want:
## return False
# ndiff does intraline difference marking, so can be useful even
# for 1-line differences.
if optionflags & REPORT_NDIFF:
return True
# The other diff types need at least a few lines to be helpful.
return want.count('\n') > 2 and got.count('\n') > 2
def output_difference(self, example, got, optionflags):
"""
Return a string describing the differences between the
expected output for a given example (`example`) and the actual
output (`got`). `optionflags` is the set of option flags used
to compare `want` and `got`.
"""
want = example.want
# If <BLANKLINE>s are being used, then replace blank lines
# with <BLANKLINE> in the actual output string.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
# Check if we should use diff.
if self._do_a_fancy_diff(want, got, optionflags):
# Split want & got into lines.
want_lines = want.splitlines(True) # True == keep line ends
got_lines = got.splitlines(True)
# Use difflib to find their differences.
if optionflags & REPORT_UDIFF:
diff = difflib.unified_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'unified diff with -expected +actual'
elif optionflags & REPORT_CDIFF:
diff = difflib.context_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'context diff with expected followed by actual'
elif optionflags & REPORT_NDIFF:
engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
diff = list(engine.compare(want_lines, got_lines))
kind = 'ndiff with -expected +actual'
else:
assert 0, 'Bad diff option'
# Remove trailing whitespace on diff output.
diff = [line.rstrip() + '\n' for line in diff]
return 'Differences (%s):\n' % kind + _indent(''.join(diff))
# If we're not using diff, then simply list the expected
# output followed by the actual output.
if want and got:
return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got))
elif want:
return 'Expected:\n%sGot nothing\n' % _indent(want)
elif got:
return 'Expected nothing\nGot:\n%s' % _indent(got)
else:
return 'Expected nothing\nGot nothing\n'
class DocTestFailure(Exception):
"""A DocTest example has failed in debugging mode.
The exception instance has variables:
- test: the DocTest object being run
- excample: the Example object that failed
- got: the actual output
"""
def __init__(self, test, example, got):
self.test = test
self.example = example
self.got = got
def __str__(self):
return str(self.test)
class UnexpectedException(Exception):
"""A DocTest example has encountered an unexpected exception
The exception instance has variables:
- test: the DocTest object being run
- excample: the Example object that failed
- exc_info: the exception info
"""
def __init__(self, test, example, exc_info):
self.test = test
self.example = example
self.exc_info = exc_info
def __str__(self):
return str(self.test)
class DebugRunner(DocTestRunner):
r"""Run doc tests but raise an exception as soon as there is a failure.
If an unexpected exception occurs, an UnexpectedException is raised.
It contains the test, the example, and the original exception:
>>> runner = DebugRunner(verbose=False)
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except UnexpectedException, failure:
... pass
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
...
KeyError
We wrap the original exception to give the calling application
access to the test and example information.
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except DocTestFailure, failure:
... pass
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
If a failure or error occurs, the globals are left intact:
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 1}
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... >>> raise KeyError
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
Traceback (most recent call last):
...
UnexpectedException: <DocTest foo from foo.py:0 (2 examples)>
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 2}
But the globals are cleared if there is no error:
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
(0, 1)
>>> test.globs
{}
"""
def run(self, test, compileflags=None, out=None, clear_globs=True):
r = DocTestRunner.run(self, test, compileflags, out, False)
if clear_globs:
test.globs.clear()
return r
def report_unexpected_exception(self, out, test, example, exc_info):
raise UnexpectedException(test, example, exc_info)
def report_failure(self, out, test, example, got):
raise DocTestFailure(test, example, got)
######################################################################
## 6. Test Functions
######################################################################
# These should be backwards compatible.
# For backward compatibility, a global instance of a DocTestRunner
# class, updated by testmod.
master = None
def testmod(m=None, name=None, globs=None, verbose=None, isprivate=None,
report=True, optionflags=0, extraglobs=None,
raise_on_error=False, exclude_empty=False):
"""m=None, name=None, globs=None, verbose=None, isprivate=None,
report=True, optionflags=0, extraglobs=None, raise_on_error=False,
exclude_empty=False
Test examples in docstrings in functions and classes reachable
from module m (or the current module if m is not supplied), starting
with m.__doc__. Unless isprivate is specified, private names
are not skipped.
Also test examples reachable from dict m.__test__ if it exists and is
not None. m.__test__ maps names to functions, classes and strings;
function and class docstrings are tested even if the name is private;
strings are tested directly, as if they were docstrings.
Return (#failures, #tests).
See doctest.__doc__ for an overview.
Optional keyword arg "name" gives the name of the module; by default
use m.__name__.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use m.__dict__. A copy of this
dict is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used. This is new in 2.4.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. This is new in 2.3. Possible values (see the
docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Deprecated in Python 2.4:
Optional keyword arg "isprivate" specifies a function used to
determine whether a name is private. The default function is
treat all functions as public. Optionally, "isprivate" can be
set to doctest.is_private to skip over functions marked as private
using the underscore naming convention; see its docs for details.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
if isprivate is not None:
warnings.warn("the isprivate argument is deprecated; "
"examine DocTestFinder.find() lists instead",
DeprecationWarning)
# If no module was given, then use __main__.
if m is None:
# DWA - m will still be None if this wasn't invoked from the command
# line, in which case the following TypeError is about as good an error
# as we should expect
m = sys.modules.get('__main__')
# Check that we were actually given a module.
if not inspect.ismodule(m):
raise TypeError("testmod: module required; %r" % (m,))
# If no name was given, then use the module's name.
if name is None:
name = m.__name__
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(_namefilter=isprivate, exclude_empty=exclude_empty)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return runner.failures, runner.tries
def testfile(filename, module_relative=True, name=None, package=None,
globs=None, verbose=None, report=True, optionflags=0,
extraglobs=None, raise_on_error=False, parser=DocTestParser()):
"""
Test examples in the given file. Return (#failures, #tests).
Optional keyword arg "module_relative" specifies how filenames
should be interpreted:
- If "module_relative" is True (the default), then "filename"
specifies a module-relative path. By default, this path is
relative to the calling module's directory; but if the
"package" argument is specified, then it is relative to that
package. To ensure os-independence, "filename" should use
"/" characters to separate path segments, and should not
be an absolute path (i.e., it may not begin with "/").
- If "module_relative" is False, then "filename" specifies an
os-specific path. The path may be absolute or relative (to
the current working directory).
Optional keyword arg "name" gives the name of the test; by default
use the file's basename.
Optional keyword argument "package" is a Python package or the
name of a Python package whose directory should be used as the
base directory for a module relative filename. If no package is
specified, then the calling module's directory is used as the base
directory for module relative filenames. It is an error to
specify "package" if "module_relative" is False.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use {}. A copy of this dict
is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. Possible values (see the docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Optional keyword arg "parser" specifies a DocTestParser (or
subclass) that should be used to extract tests from the files.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path
if module_relative:
package = _normalize_module(package)
filename = _module_relative_path(package, filename)
# If no name was given, then use the file's name.
if name is None:
name = os.path.basename(filename)
# Assemble the globals.
if globs is None:
globs = {}
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
# Read the file, convert it to a test, and run it.
s = open(filename).read()
test = parser.get_doctest(s, globs, name, filename, 0)
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return runner.failures, runner.tries
def run_docstring_examples(f, globs, verbose=False, name="NoName",
compileflags=None, optionflags=0):
"""
Test examples in the given object's docstring (`f`), using `globs`
as globals. Optional argument `name` is used in failure messages.
If the optional argument `verbose` is true, then generate output
even if there are no failures.
`compileflags` gives the set of flags that should be used by the
Python compiler when running the examples. If not specified, then
it will default to the set of future-import flags that apply to
`globs`.
Optional keyword arg `optionflags` specifies options for the
testing and output. See the documentation for `testmod` for more
information.
"""
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(verbose=verbose, recurse=False)
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(f, name, globs=globs):
runner.run(test, compileflags=compileflags)
######################################################################
## 7. Tester
######################################################################
# This is provided only for backwards compatibility. It's not
# actually used in any way.
class Tester:
def __init__(self, mod=None, globs=None, verbose=None,
isprivate=None, optionflags=0):
warnings.warn("class Tester is deprecated; "
"use class doctest.DocTestRunner instead",
DeprecationWarning, stacklevel=2)
if mod is None and globs is None:
raise TypeError("Tester.__init__: must specify mod or globs")
if mod is not None and not inspect.ismodule(mod):
raise TypeError("Tester.__init__: mod must be a module; %r" %
(mod,))
if globs is None:
globs = mod.__dict__
self.globs = globs
self.verbose = verbose
self.isprivate = isprivate
self.optionflags = optionflags
self.testfinder = DocTestFinder(_namefilter=isprivate)
self.testrunner = DocTestRunner(verbose=verbose,
optionflags=optionflags)
def runstring(self, s, name):
test = DocTestParser().get_doctest(s, self.globs, name, None, None)
if self.verbose:
print "Running string", name
(f,t) = self.testrunner.run(test)
if self.verbose:
print f, "of", t, "examples failed in string", name
return (f,t)
def rundoc(self, object, name=None, module=None):
f = t = 0
tests = self.testfinder.find(object, name, module=module,
globs=self.globs)
for test in tests:
(f2, t2) = self.testrunner.run(test)
(f,t) = (f+f2, t+t2)
return (f,t)
def rundict(self, d, name, module=None):
import types
m = types.ModuleType(name)
m.__dict__.update(d)
if module is None:
module = False
return self.rundoc(m, name, module)
def run__test__(self, d, name):
import types
m = types.ModuleType(name)
m.__test__ = d
return self.rundoc(m, name)
def summarize(self, verbose=None):
return self.testrunner.summarize(verbose)
def merge(self, other):
self.testrunner.merge(other.testrunner)
######################################################################
## 8. Unittest Support
######################################################################
_unittest_reportflags = 0
def set_unittest_reportflags(flags):
"""Sets the unittest option flags.
The old flag is returned so that a runner could restore the old
value if it wished to:
>>> old = _unittest_reportflags
>>> set_unittest_reportflags(REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE) == old
True
>>> import doctest
>>> doctest._unittest_reportflags == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
Only reporting flags can be set:
>>> set_unittest_reportflags(ELLIPSIS)
Traceback (most recent call last):
...
ValueError: ('Only reporting flags allowed', 8)
>>> set_unittest_reportflags(old) == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
"""
global _unittest_reportflags
if (flags & REPORTING_FLAGS) != flags:
raise ValueError("Only reporting flags allowed", flags)
old = _unittest_reportflags
_unittest_reportflags = flags
return old
class DocTestCase(unittest.TestCase):
def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
checker=None):
unittest.TestCase.__init__(self)
self._dt_optionflags = optionflags
self._dt_checker = checker
self._dt_test = test
self._dt_setUp = setUp
self._dt_tearDown = tearDown
def setUp(self):
test = self._dt_test
if self._dt_setUp is not None:
self._dt_setUp(test)
def tearDown(self):
test = self._dt_test
if self._dt_tearDown is not None:
self._dt_tearDown(test)
test.globs.clear()
def runTest(self):
test = self._dt_test
old = sys.stdout
new = StringIO()
optionflags = self._dt_optionflags
if not (optionflags & REPORTING_FLAGS):
# The option flags don't include any reporting flags,
# so add the default reporting flags
optionflags |= _unittest_reportflags
runner = DocTestRunner(optionflags=optionflags,
checker=self._dt_checker, verbose=False)
try:
runner.DIVIDER = "-"*70
failures, tries = runner.run(
test, out=new.write, clear_globs=False)
finally:
sys.stdout = old
if failures:
raise self.failureException(self.format_failure(new.getvalue()))
def format_failure(self, err):
test = self._dt_test
if test.lineno is None:
lineno = 'unknown line number'
else:
lineno = '%s' % test.lineno
lname = '.'.join(test.name.split('.')[-1:])
return ('Failed doctest test for %s\n'
' File "%s", line %s, in %s\n\n%s'
% (test.name, test.filename, lineno, lname, err)
)
def debug(self):
r"""Run the test case without results and without catching exceptions
The unit test framework includes a debug method on test cases
and test suites to support post-mortem debugging. The test code
is run in such a way that errors are not caught. This way a
caller can catch the errors and initiate post-mortem debugging.
The DocTestCase provides a debug method that raises
UnexpectedException errors if there is an unexepcted
exception:
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except UnexpectedException, failure:
... pass
The UnexpectedException contains the test, the example, and
the original exception:
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
...
KeyError
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except DocTestFailure, failure:
... pass
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
"""
self.setUp()
runner = DebugRunner(optionflags=self._dt_optionflags,
checker=self._dt_checker, verbose=False)
runner.run(self._dt_test)
self.tearDown()
def id(self):
return self._dt_test.name
def __repr__(self):
name = self._dt_test.name.split('.')
return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
__str__ = __repr__
def shortDescription(self):
return "Doctest: " + self._dt_test.name
def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
**options):
"""
Convert doctest tests for a module to a unittest test suite.
This converts each documentation string in a module that
contains doctest tests to a unittest test case. If any of the
tests in a doc string fail, then the test case fails. An exception
is raised showing the name of the file containing the test and a
(sometimes approximate) line number.
The `module` argument provides the module to be tested. The argument
can be either a module or a module name.
If no argument is given, the calling module is used.
A number of options may be provided as keyword arguments:
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
"""
if test_finder is None:
test_finder = DocTestFinder()
module = _normalize_module(module)
tests = test_finder.find(module, globs=globs, extraglobs=extraglobs)
if globs is None:
globs = module.__dict__
if not tests:
# Why do we want to do this? Because it reveals a bug that might
# otherwise be hidden.
raise ValueError(module, "has no tests")
tests.sort()
suite = unittest.TestSuite()
for test in tests:
if len(test.examples) == 0:
continue
if not test.filename:
filename = module.__file__
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
test.filename = filename
suite.addTest(DocTestCase(test, **options))
return suite
class DocFileCase(DocTestCase):
def id(self):
return '_'.join(self._dt_test.name.split('.'))
def __repr__(self):
return self._dt_test.filename
__str__ = __repr__
def format_failure(self, err):
return ('Failed doctest test for %s\n File "%s", line 0\n\n%s'
% (self._dt_test.name, self._dt_test.filename, err)
)
def DocFileTest(path, module_relative=True, package=None,
globs=None, parser=DocTestParser(), **options):
if globs is None:
globs = {}
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path.
if module_relative:
package = _normalize_module(package)
path = _module_relative_path(package, path)
# Find the file and read it.
name = os.path.basename(path)
doc = open(path).read()
# Convert it to a test, and wrap it in a DocFileCase.
test = parser.get_doctest(doc, globs, name, path, 0)
return DocFileCase(test, **options)
def DocFileSuite(*paths, **kw):
"""A unittest suite for one or more doctest files.
The path to each doctest file is given as a string; the
interpretation of that string depends on the keyword argument
"module_relative".
A number of options may be provided as keyword arguments:
module_relative
If "module_relative" is True, then the given file paths are
interpreted as os-independent module-relative paths. By
default, these paths are relative to the calling module's
directory; but if the "package" argument is specified, then
they are relative to that package. To ensure os-independence,
"filename" should use "/" characters to separate path
segments, and may not be an absolute path (i.e., it may not
begin with "/").
If "module_relative" is False, then the given file paths are
interpreted as os-specific paths. These paths may be absolute
or relative (to the current working directory).
package
A Python package or the name of a Python package whose directory
should be used as the base directory for module relative paths.
If "package" is not specified, then the calling module's
directory is used as the base directory for module relative
filenames. It is an error to specify "package" if
"module_relative" is False.
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
parser
A DocTestParser (or subclass) that should be used to extract
tests from the files.
"""
suite = unittest.TestSuite()
# We do this here so that _normalize_module is called at the right
# level. If it were called in DocFileTest, then this function
# would be the caller and we might guess the package incorrectly.
if kw.get('module_relative', True):
kw['package'] = _normalize_module(kw.get('package'))
for path in paths:
suite.addTest(DocFileTest(path, **kw))
return suite
######################################################################
## 9. Debugging Support
######################################################################
def script_from_examples(s):
r"""Extract script from text with examples.
Converts text with examples to a Python script. Example input is
converted to regular code. Example output and all other words
are converted to comments:
>>> text = '''
... Here are examples of simple math.
...
... Python has super accurate integer addition
...
... >>> 2 + 2
... 5
...
... And very friendly error messages:
...
... >>> 1/0
... To Infinity
... And
... Beyond
...
... You can use logic if you want:
...
... >>> if 0:
... ... blah
... ... blah
... ...
...
... Ho hum
... '''
>>> print script_from_examples(text)
# Here are examples of simple math.
#
# Python has super accurate integer addition
#
2 + 2
# Expected:
## 5
#
# And very friendly error messages:
#
1/0
# Expected:
## To Infinity
## And
## Beyond
#
# You can use logic if you want:
#
if 0:
blah
blah
#
# Ho hum
"""
output = []
for piece in DocTestParser().parse(s):
if isinstance(piece, Example):
# Add the example's source code (strip trailing NL)
output.append(piece.source[:-1])
# Add the expected output:
want = piece.want
if want:
output.append('# Expected:')
output += ['## '+l for l in want.split('\n')[:-1]]
else:
# Add non-example text.
output += [_comment_line(l)
for l in piece.split('\n')[:-1]]
# Trim junk on both ends.
while output and output[-1] == '#':
output.pop()
while output and output[0] == '#':
output.pop(0)
# Combine the output, and return it.
return '\n'.join(output)
def testsource(module, name):
"""Extract the test sources from a doctest docstring as a script.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the doc string with tests to be debugged.
"""
module = _normalize_module(module)
tests = DocTestFinder().find(module)
test = [t for t in tests if t.name == name]
if not test:
raise ValueError(name, "not found in tests")
test = test[0]
testsrc = script_from_examples(test.docstring)
return testsrc
def debug_src(src, pm=False, globs=None):
"""Debug a single doctest docstring, in argument `src`'"""
testsrc = script_from_examples(src)
debug_script(testsrc, pm, globs)
def debug_script(src, pm=False, globs=None):
"Debug a test script. `src` is the script, as a string."
import pdb
# Note that tempfile.NameTemporaryFile() cannot be used. As the
# docs say, a file so created cannot be opened by name a second time
# on modern Windows boxes, and execfile() needs to open it.
srcfilename = tempfile.mktemp(".py", "doctestdebug")
f = open(srcfilename, 'w')
f.write(src)
f.close()
try:
if globs:
globs = globs.copy()
else:
globs = {}
if pm:
try:
execfile(srcfilename, globs, globs)
except:
print sys.exc_info()[1]
pdb.post_mortem(sys.exc_info()[2])
else:
# Note that %r is vital here. '%s' instead can, e.g., cause
# backslashes to get treated as metacharacters on Windows.
pdb.run("execfile(%r)" % srcfilename, globs, globs)
finally:
os.remove(srcfilename)
def debug(module, name, pm=False):
"""Debug a single doctest docstring.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the docstring with tests to be debugged.
"""
module = _normalize_module(module)
testsrc = testsource(module, name)
debug_script(testsrc, pm, module.__dict__)
######################################################################
## 10. Example Usage
######################################################################
class _TestClass:
"""
A pointless class, for sanity-checking of docstring testing.
Methods:
square()
get()
>>> _TestClass(13).get() + _TestClass(-12).get()
1
>>> hex(_TestClass(13).square().get())
'0xa9'
"""
def __init__(self, val):
"""val -> _TestClass object with associated value val.
>>> t = _TestClass(123)
>>> print t.get()
123
"""
self.val = val
def square(self):
"""square() -> square TestClass's associated value
>>> _TestClass(13).square().get()
169
"""
self.val = self.val ** 2
return self
def get(self):
"""get() -> return TestClass's associated value.
>>> x = _TestClass(-42)
>>> print x.get()
-42
"""
return self.val
__test__ = {"_TestClass": _TestClass,
"string": r"""
Example of a string object, searched as-is.
>>> x = 1; y = 2
>>> x + y, x * y
(3, 2)
""",
"bool-int equivalence": r"""
In 2.2, boolean expressions displayed
0 or 1. By default, we still accept
them. This can be disabled by passing
DONT_ACCEPT_TRUE_FOR_1 to the new
optionflags argument.
>>> 4 == 4
1
>>> 4 == 4
True
>>> 4 > 4
0
>>> 4 > 4
False
""",
"blank lines": r"""
Blank lines can be marked with <BLANKLINE>:
>>> print 'foo\n\nbar\n'
foo
<BLANKLINE>
bar
<BLANKLINE>
""",
"ellipsis": r"""
If the ellipsis flag is used, then '...' can be used to
elide substrings in the desired output:
>>> print range(1000) #doctest: +ELLIPSIS
[0, 1, 2, ..., 999]
""",
"whitespace normalization": r"""
If the whitespace normalization flag is used, then
differences in whitespace are ignored.
>>> print range(30) #doctest: +NORMALIZE_WHITESPACE
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29]
""",
}
def _test():
r = unittest.TextTestRunner()
r.run(DocTestSuite())
if __name__ == "__main__":
_test()
| gpl-2.0 |
luxnovalabs/enjigo_door | web_interface/django/contrib/gis/gdal/base.py | 224 | 1155 | from ctypes import c_void_p
from django.contrib.gis.gdal.error import GDALException
from django.utils import six
class GDALBase(object):
"""
Base object for GDAL objects that has a pointer access property
that controls access to the underlying C pointer.
"""
# Initially the pointer is NULL.
_ptr = None
# Default allowed pointer type.
ptr_type = c_void_p
# Pointer access property.
def _get_ptr(self):
# Raise an exception if the pointer isn't valid don't
# want to be passing NULL pointers to routines --
# that's very bad.
if self._ptr: return self._ptr
else: raise GDALException('GDAL %s pointer no longer valid.' % self.__class__.__name__)
def _set_ptr(self, ptr):
# Only allow the pointer to be set with pointers of the
# compatible type or None (NULL).
if isinstance(ptr, six.integer_types):
self._ptr = self.ptr_type(ptr)
elif ptr is None or isinstance(ptr, self.ptr_type):
self._ptr = ptr
else:
raise TypeError('Incompatible pointer type')
ptr = property(_get_ptr, _set_ptr)
| unlicense |
dbckz/ansible | lib/ansible/utils/module_docs_fragments/docker.py | 173 | 3598 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
class ModuleDocFragment(object):
# Docker doc fragment
DOCUMENTATION = '''
options:
docker_host:
description:
- "The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the
TCP connection string. For example, 'tcp://192.0.2.23:2376'. If TLS is used to encrypt the connection,
the module will automatically replace 'tcp' in the connection URL with 'https'."
required: false
default: "unix://var/run/docker.sock"
aliases:
- docker_url
tls_hostname:
description:
- When verifying the authenticity of the Docker Host server, provide the expected name of the server.
default: localhost
required: false
api_version:
description:
- The version of the Docker API running on the Docker Host. Defaults to the latest version of the API
supported by docker-py.
required: false
default: default provided by docker-py
aliases:
- docker_api_version
timeout:
description:
- The maximum amount of time in seconds to wait on a response from the API.
required: false
default: 60
cacert_path:
description:
- Use a CA certificate when performing server verification by providing the path to a CA certificate file.
required: false
default: null
aliases:
- tls_ca_cert
cert_path:
description:
- Path to the client's TLS certificate file.
required: false
default: null
aliases:
- tls_client_cert
key_path:
description:
- Path to the client's TLS key file.
required: false
default: null
aliases:
- tls_client_key
ssl_version:
description:
- Provide a valid SSL version number. Default value determined by docker-py, currently 1.0.
required: false
default: "1.0"
tls:
description:
- Secure the connection to the API by using TLS without verifying the authenticity of the Docker host
server.
default: false
tls_verify:
description:
- Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
default: false
notes:
- Connect to the Docker daemon by providing parameters with each task or by defining environment variables.
You can define DOCKER_HOST, DOCKER_TLS_HOSTNAME, DOCKER_API_VERSION, DOCKER_CERT_PATH, DOCKER_SSL_VERSION,
DOCKER_TLS, DOCKER_TLS_VERIFY and DOCKER_TIMEOUT. If you are using docker machine, run the script shipped
with the product that sets up the environment. It will set these variables for you. See
https://docker-py.readthedocs.org/en/stable/machine/ for more details.
'''
| gpl-3.0 |
lordmuffin/aws-cfn-plex | functions/credstash/botocore/docs/__init__.py | 26 | 1543 | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
from botocore.docs.service import ServiceDocumenter
def generate_docs(root_dir, session):
"""Generates the reference documentation for botocore
This will go through every available AWS service and output ReSTructured
text files documenting each service.
:param root_dir: The directory to write the reference files to. Each
service's reference documentation is loacated at
root_dir/reference/services/service-name.rst
"""
services_doc_path = os.path.join(root_dir, 'reference', 'services')
if not os.path.exists(services_doc_path):
os.makedirs(services_doc_path)
# Generate reference docs and write them out.
for service_name in session.get_available_services():
docs = ServiceDocumenter(service_name, session).document_service()
service_doc_path = os.path.join(
services_doc_path, service_name + '.rst')
with open(service_doc_path, 'wb') as f:
f.write(docs)
| mit |
milani/cycleindex | tests/test_cyclecount.py | 1 | 1444 | import pytest
import numpy as np
from cycleindex.cyclecount import (
cycle_count
)
@pytest.mark.parametrize("A, expected", [
(
np.array([[0, 0.5, 0],
[0, 0, -0.5],
[0.5, 0, 0]]),
([0, 0, -0.12500, 0, 0, 0, 0], [0, 0, 0.12500, 0, 0, 0, 0])
),
(
np.array([[0, 0.5, 0.5, 0],
[0.5, 0, 0.5, 0],
[0.5, 0.5, 0, 0.4],
[0, 0, 0.4, 0]]),
([0, 0.91, 0.25, 0, 0, 0, 0], [0, 0.91, 0.25, 0, 0, 0, 0])
),
(
np.array([[0, 0.5, 0, 0, 0, 0, 0],
[0.5, 0, 0.4, 0.4, 0, 0, 0],
[0, 0.4, 0, -0.5, 0.1, 0, 0],
[0, 0.4, -0.5, 0, 0, 0.6, 0],
[0, 0, 0.1, 0, 0, 0, 0.8],
[0, 0, 0, 0.6, 0, 0, 0.7],
[0, 0, 0, 0, 0.8, 0.7, 0]]),
([0, 2.3200, -0.1600, 0, -0.0336, 0.010752, 0], [0, 2.32, 0.16, 0, 0.0336, 0.010752, 0])
),
(
np.array([[0, 1, 0, 0, 0, 0, 0],
[1, 0, 1, 1, 0, 0, 0],
[0, 1, 0, 1, 1, 0, 0],
[0, 1, -1, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 0, 1],
[0, 0, 0, 1, 0, 0, 1],
[0, 0, 0, 0, 1, 1, 0]]),
([0, 6, 0, 0, 0, 2, 0], [0, 8, 2, 0, 2, 2, 0])
)
])
def test_cycle_count(A, expected):
print(cycle_count(A, 7))
assert np.allclose(cycle_count(A, 7), expected)
| bsd-3-clause |
mshameers/me | app/api/comment.py | 1 | 1794 | from flask import Blueprint, jsonify, request
from app import db
from app.api import InvalidUsage, LinkHeaderBuilder, parse_json, parse_arg, \
parse_pagination_args, sql_ordering, handle_invalid_usage
from app.api.auth import require_authorization
from app.api.comment_validators import validate_name
from app.models import Post, Comment
bp = Blueprint('comments', __name__, url_prefix='/api')
# @bp.route('/comment/<int:id>', methods=['GET', 'POST', 'PATCH', 'DELETE'])
# # @require_authorization
# def comments(id):
# post = Post.query.get(id)
# if request.method == 'POST':
# if post:
# return replace_post(post)
# else:
# return create_post()
# if not post:
# message = 'No post with id %s.' % id
# raise InvalidUsage(message, 404)
# if request.method == 'GET':
# return retrieve_post(post)
# if request.method == 'PATCH':
# return update_post(post)
# if request.method == 'DELETE':
# return delete_post(post)
@bp.route('/comments', methods=['GET', 'POST'])
# @require_authorization
def comments():
if request.method == 'POST':
return create_comment()
# else:
# return list_posts()
def create_comment():
name = parse_json(request.json, str, 'name')
content = parse_json(request.json, str, 'content')
post_id = parse_json(request.json, str, 'post_id')
validate_name(name)
comment = Comment(name, content, '192.167.1.1', post_id)
db.session.add(comment)
db.session.commit()
response = jsonify(comment.to_json(), message='Created Comment.')
response.status_code = 201
response.headers['Location'] = request.base_url + '/' + str(comment.id)
print dir(response), response.mimetype
return response | gpl-2.0 |
spirrello/spirrello-pynet-work | applied_python/lib/python2.7/site-packages/pylint/checkers/logging.py | 3 | 10888 | # Copyright (c) 2009-2010 Google, Inc.
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""checker for use of Python logging
"""
import six
import astroid
from pylint import checkers
from pylint import interfaces
from pylint.checkers import utils
from pylint.checkers.utils import check_messages
MSGS = {
'W1201': ('Specify string format arguments as logging function parameters',
'logging-not-lazy',
'Used when a logging statement has a call form of '
'"logging.<logging method>(format_string % (format_args...))". '
'Such calls should leave string interpolation to the logging '
'method itself and be written '
'"logging.<logging method>(format_string, format_args...)" '
'so that the program may avoid incurring the cost of the '
'interpolation in those cases in which no message will be '
'logged. For more, see '
'http://www.python.org/dev/peps/pep-0282/.'),
'W1202': ('Use % formatting in logging functions and pass the % '
'parameters as arguments',
'logging-format-interpolation',
'Used when a logging statement has a call form of '
'"logging.<logging method>(format_string.format(format_args...))"'
'. Such calls should use % formatting instead, but leave '
'interpolation to the logging function by passing the parameters '
'as arguments.'),
'E1200': ('Unsupported logging format character %r (%#02x) at index %d',
'logging-unsupported-format',
'Used when an unsupported format character is used in a logging\
statement format string.'),
'E1201': ('Logging format string ends in middle of conversion specifier',
'logging-format-truncated',
'Used when a logging statement format string terminates before\
the end of a conversion specifier.'),
'E1205': ('Too many arguments for logging format string',
'logging-too-many-args',
'Used when a logging format string is given too few arguments.'),
'E1206': ('Not enough arguments for logging format string',
'logging-too-few-args',
'Used when a logging format string is given too many arguments'),
}
CHECKED_CONVENIENCE_FUNCTIONS = set([
'critical', 'debug', 'error', 'exception', 'fatal', 'info', 'warn',
'warning'])
def is_method_call(callfunc_node, types=(), methods=()):
"""Determines if a CallFunc node represents a method call.
Args:
callfunc_node: The CallFunc AST node to check.
types: Optional sequence of caller type names to restrict check.
methods: Optional sequence of method names to restrict check.
Returns:
True, if the node represents a method call for the given type and
method names, False otherwise.
"""
if not isinstance(callfunc_node, astroid.Call):
return False
func = utils.safe_infer(callfunc_node.func)
return (isinstance(func, astroid.BoundMethod)
and isinstance(func.bound, astroid.Instance)
and (func.bound.name in types if types else True)
and (func.name in methods if methods else True))
class LoggingChecker(checkers.BaseChecker):
"""Checks use of the logging module."""
__implements__ = interfaces.IAstroidChecker
name = 'logging'
msgs = MSGS
options = (('logging-modules',
{'default': ('logging',),
'type': 'csv',
'metavar': '<comma separated list>',
'help': 'Logging modules to check that the string format '
'arguments are in logging function parameter format'}
),
)
def visit_module(self, node): # pylint: disable=unused-argument
"""Clears any state left in this checker from last module checked."""
# The code being checked can just as easily "import logging as foo",
# so it is necessary to process the imports and store in this field
# what name the logging module is actually given.
self._logging_names = set()
logging_mods = self.config.logging_modules
self._logging_modules = set(logging_mods)
self._from_imports = {}
for logging_mod in logging_mods:
parts = logging_mod.rsplit('.', 1)
if len(parts) > 1:
self._from_imports[parts[0]] = parts[1]
def visit_importfrom(self, node):
"""Checks to see if a module uses a non-Python logging module."""
try:
logging_name = self._from_imports[node.modname]
for module, as_name in node.names:
if module == logging_name:
self._logging_names.add(as_name or module)
except KeyError:
pass
def visit_import(self, node):
"""Checks to see if this module uses Python's built-in logging."""
for module, as_name in node.names:
if module in self._logging_modules:
self._logging_names.add(as_name or module)
@check_messages(*(MSGS.keys()))
def visit_call(self, node):
"""Checks calls to logging methods."""
def is_logging_name():
return (isinstance(node.func, astroid.Attribute) and
isinstance(node.func.expr, astroid.Name) and
node.func.expr.name in self._logging_names)
def is_logger_class():
try:
for inferred in node.func.infer():
if isinstance(inferred, astroid.BoundMethod):
parent = inferred._proxied.parent
if (isinstance(parent, astroid.ClassDef) and
(parent.qname() == 'logging.Logger' or
any(ancestor.qname() == 'logging.Logger'
for ancestor in parent.ancestors()))):
return True, inferred._proxied.name
except astroid.exceptions.InferenceError:
pass
return False, None
if is_logging_name():
name = node.func.attrname
else:
result, name = is_logger_class()
if not result:
return
self._check_log_method(node, name)
def _check_log_method(self, node, name):
"""Checks calls to logging.log(level, format, *format_args)."""
if name == 'log':
if node.starargs or node.kwargs or len(node.args) < 2:
# Either a malformed call, star args, or double-star args. Beyond
# the scope of this checker.
return
format_pos = 1
elif name in CHECKED_CONVENIENCE_FUNCTIONS:
if node.starargs or node.kwargs or not node.args:
# Either no args, star args, or double-star args. Beyond the
# scope of this checker.
return
format_pos = 0
else:
return
if isinstance(node.args[format_pos], astroid.BinOp) and node.args[format_pos].op == '%':
self.add_message('logging-not-lazy', node=node)
elif isinstance(node.args[format_pos], astroid.Call):
self._check_call_func(node.args[format_pos])
elif isinstance(node.args[format_pos], astroid.Const):
self._check_format_string(node, format_pos)
def _check_call_func(self, callfunc_node):
"""Checks that function call is not format_string.format().
Args:
callfunc_node: CallFunc AST node to be checked.
"""
if is_method_call(callfunc_node, ('str', 'unicode'), ('format',)):
self.add_message('logging-format-interpolation', node=callfunc_node)
def _check_format_string(self, node, format_arg):
"""Checks that format string tokens match the supplied arguments.
Args:
node: AST node to be checked.
format_arg: Index of the format string in the node arguments.
"""
num_args = _count_supplied_tokens(node.args[format_arg + 1:])
if not num_args:
# If no args were supplied, then all format strings are valid -
# don't check any further.
return
format_string = node.args[format_arg].value
if not isinstance(format_string, six.string_types):
# If the log format is constant non-string (e.g. logging.debug(5)),
# ensure there are no arguments.
required_num_args = 0
else:
try:
keyword_args, required_num_args = \
utils.parse_format_string(format_string)
if keyword_args:
# Keyword checking on logging strings is complicated by
# special keywords - out of scope.
return
except utils.UnsupportedFormatCharacter as ex:
char = format_string[ex.index]
self.add_message('logging-unsupported-format', node=node,
args=(char, ord(char), ex.index))
return
except utils.IncompleteFormatString:
self.add_message('logging-format-truncated', node=node)
return
if num_args > required_num_args:
self.add_message('logging-too-many-args', node=node)
elif num_args < required_num_args:
self.add_message('logging-too-few-args', node=node)
def _count_supplied_tokens(args):
"""Counts the number of tokens in an args list.
The Python log functions allow for special keyword arguments: func,
exc_info and extra. To handle these cases correctly, we only count
arguments that aren't keywords.
Args:
args: List of AST nodes that are arguments for a log format string.
Returns:
Number of AST nodes that aren't keywords.
"""
return sum(1 for arg in args if not isinstance(arg, astroid.Keyword))
def register(linter):
"""Required method to auto-register this checker."""
linter.register_checker(LoggingChecker(linter))
| gpl-3.0 |
jrumball/PyKoans | python 3/koans/about_generators.py | 3 | 4604 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Written in place of AboutBlocks in the Ruby Koans
#
# Note: Both blocks and generators use a yield keyword, but they behave
# a lot differently
#
from runner.koan import *
class AboutGenerators(Koan):
def test_generating_values_on_the_fly(self):
result = list()
bacon_generator = (n + ' bacon' for n in ['crunchy','veggie','danish'])
for bacon in bacon_generator:
result.append(bacon)
self.assertEqual(__, result)
def test_generators_are_different_to_list_comprehensions(self):
num_list = [x*2 for x in range(1,3)]
num_generator = (x*2 for x in range(1,3))
self.assertEqual(2, num_list[0])
# A generator has to be iterated through.
with self.assertRaises(___): num = num_generator[0]
self.assertEqual(__, list(num_generator)[0]) # This works though
# Both list comprehensions and generators can be iterated though. However, a generator
# function is only called on the first iteration. The values are generated on the fly
# instead of stored.
#
# Generators are more memory friendly, but less versatile
def test_generator_expressions_are_a_one_shot_deal(self):
dynamite = ('Boom!' for n in range(3))
attempt1 = list(dynamite)
attempt2 = list(dynamite)
self.assertEqual(__, list(attempt1))
self.assertEqual(__, list(attempt2))
# ------------------------------------------------------------------
def simple_generator_method(self):
yield 'peanut'
yield 'butter'
yield 'and'
yield 'jelly'
def test_generator_method_will_yield_values_during_iteration(self):
result = list()
for item in self.simple_generator_method():
result.append(item)
self.assertEqual(__, result)
def test_coroutines_can_take_arguments(self):
result = self.simple_generator_method()
self.assertEqual(__, next(result))
self.assertEqual(__, next(result))
result.close()
# ------------------------------------------------------------------
def square_me(self, seq):
for x in seq:
yield x * x
def test_generator_method_with_parameter(self):
result = self.square_me(range(2,5))
self.assertEqual(__, list(result))
# ------------------------------------------------------------------
def sum_it(self, seq):
value = 0
for num in seq:
# The local state of 'value' will be retained between iterations
value += num
yield value
def test_generator_keeps_track_of_local_variables(self):
result = self.sum_it(range(2,5))
self.assertEqual(__, list(result))
# ------------------------------------------------------------------
def generator_with_coroutine(self):
result = yield
yield result
def test_generators_can_take_coroutines(self):
generator = self.generator_with_coroutine()
# THINK ABOUT IT:
# Why is this line necessary?
#
# Hint: Read the "Specification: Sending Values into Generators"
# section of http://www.python.org/dev/peps/pep-0342/
next(generator)
self.assertEqual(__, generator.send(1 + 2))
def test_before_sending_a_value_to_a_generator_next_must_be_called(self):
generator = self.generator_with_coroutine()
try:
generator.send(1+2)
except TypeError as ex:
ex2 = ex
self.assertRegexpMatches(ex2.args[0], __)
# ------------------------------------------------------------------
def yield_tester(self):
value = yield
if value:
yield value
else:
yield 'no value'
def test_generators_can_see_if_they_have_been_called_with_a_value(self):
generator = self.yield_tester()
next(generator)
self.assertEqual('with value', generator.send('with value'))
generator2 = self.yield_tester()
next(generator2)
self.assertEqual(__, next(generator2))
def test_send_none_is_equivelant_to_next(self):
generator = self.yield_tester()
next(generator)
# 'next(generator)' is exactly equivelant to 'generator.send(None)'
self.assertEqual(__, generator.send(None))
| mit |
ptkool/spark | examples/src/main/python/sql/basic.py | 52 | 6269 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A simple example demonstrating basic Spark SQL features.
Run with:
./bin/spark-submit examples/src/main/python/sql/basic.py
"""
from __future__ import print_function
# $example on:init_session$
from pyspark.sql import SparkSession
# $example off:init_session$
# $example on:schema_inferring$
from pyspark.sql import Row
# $example off:schema_inferring$
# $example on:programmatic_schema$
# Import data types
from pyspark.sql.types import *
# $example off:programmatic_schema$
def basic_df_example(spark):
# $example on:create_df$
# spark is an existing SparkSession
df = spark.read.json("examples/src/main/resources/people.json")
# Displays the content of the DataFrame to stdout
df.show()
# +----+-------+
# | age| name|
# +----+-------+
# |null|Michael|
# | 30| Andy|
# | 19| Justin|
# +----+-------+
# $example off:create_df$
# $example on:untyped_ops$
# spark, df are from the previous example
# Print the schema in a tree format
df.printSchema()
# root
# |-- age: long (nullable = true)
# |-- name: string (nullable = true)
# Select only the "name" column
df.select("name").show()
# +-------+
# | name|
# +-------+
# |Michael|
# | Andy|
# | Justin|
# +-------+
# Select everybody, but increment the age by 1
df.select(df['name'], df['age'] + 1).show()
# +-------+---------+
# | name|(age + 1)|
# +-------+---------+
# |Michael| null|
# | Andy| 31|
# | Justin| 20|
# +-------+---------+
# Select people older than 21
df.filter(df['age'] > 21).show()
# +---+----+
# |age|name|
# +---+----+
# | 30|Andy|
# +---+----+
# Count people by age
df.groupBy("age").count().show()
# +----+-----+
# | age|count|
# +----+-----+
# | 19| 1|
# |null| 1|
# | 30| 1|
# +----+-----+
# $example off:untyped_ops$
# $example on:run_sql$
# Register the DataFrame as a SQL temporary view
df.createOrReplaceTempView("people")
sqlDF = spark.sql("SELECT * FROM people")
sqlDF.show()
# +----+-------+
# | age| name|
# +----+-------+
# |null|Michael|
# | 30| Andy|
# | 19| Justin|
# +----+-------+
# $example off:run_sql$
# $example on:global_temp_view$
# Register the DataFrame as a global temporary view
df.createGlobalTempView("people")
# Global temporary view is tied to a system preserved database `global_temp`
spark.sql("SELECT * FROM global_temp.people").show()
# +----+-------+
# | age| name|
# +----+-------+
# |null|Michael|
# | 30| Andy|
# | 19| Justin|
# +----+-------+
# Global temporary view is cross-session
spark.newSession().sql("SELECT * FROM global_temp.people").show()
# +----+-------+
# | age| name|
# +----+-------+
# |null|Michael|
# | 30| Andy|
# | 19| Justin|
# +----+-------+
# $example off:global_temp_view$
def schema_inference_example(spark):
# $example on:schema_inferring$
sc = spark.sparkContext
# Load a text file and convert each line to a Row.
lines = sc.textFile("examples/src/main/resources/people.txt")
parts = lines.map(lambda l: l.split(","))
people = parts.map(lambda p: Row(name=p[0], age=int(p[1])))
# Infer the schema, and register the DataFrame as a table.
schemaPeople = spark.createDataFrame(people)
schemaPeople.createOrReplaceTempView("people")
# SQL can be run over DataFrames that have been registered as a table.
teenagers = spark.sql("SELECT name FROM people WHERE age >= 13 AND age <= 19")
# The results of SQL queries are Dataframe objects.
# rdd returns the content as an :class:`pyspark.RDD` of :class:`Row`.
teenNames = teenagers.rdd.map(lambda p: "Name: " + p.name).collect()
for name in teenNames:
print(name)
# Name: Justin
# $example off:schema_inferring$
def programmatic_schema_example(spark):
# $example on:programmatic_schema$
sc = spark.sparkContext
# Load a text file and convert each line to a Row.
lines = sc.textFile("examples/src/main/resources/people.txt")
parts = lines.map(lambda l: l.split(","))
# Each line is converted to a tuple.
people = parts.map(lambda p: (p[0], p[1].strip()))
# The schema is encoded in a string.
schemaString = "name age"
fields = [StructField(field_name, StringType(), True) for field_name in schemaString.split()]
schema = StructType(fields)
# Apply the schema to the RDD.
schemaPeople = spark.createDataFrame(people, schema)
# Creates a temporary view using the DataFrame
schemaPeople.createOrReplaceTempView("people")
# SQL can be run over DataFrames that have been registered as a table.
results = spark.sql("SELECT name FROM people")
results.show()
# +-------+
# | name|
# +-------+
# |Michael|
# | Andy|
# | Justin|
# +-------+
# $example off:programmatic_schema$
if __name__ == "__main__":
# $example on:init_session$
spark = SparkSession \
.builder \
.appName("Python Spark SQL basic example") \
.config("spark.some.config.option", "some-value") \
.getOrCreate()
# $example off:init_session$
basic_df_example(spark)
schema_inference_example(spark)
programmatic_schema_example(spark)
spark.stop()
| apache-2.0 |
DhruvSarma/retrogamelib | retrogamelib/font.py | 8 | 1873 | import pygame
from pygame.locals import *
class Font(object):
def __init__(self, font, color=(255, 255, 255)):
#Dict to hold the letter images
self.letters = {}
import os
letters = {}
format = " abcdefghijklmnopqrstuvwxyz0123456789-+:,.=!)(?><"
i = 0
self.font = font
self.color = color
strip = pygame.image.load(os.path.dirname(__file__) + \
"/" + self.font["file"]).convert_alpha()
for x in range(len(format)):
letters[format[i]] = pygame.Surface(self.font["size"])
letters[format[i]].blit(strip, (-x*self.font["size"][0], 0))
i += 1
#Create the letters
for letter in letters:
x = 0
y = 0
letterimg = letters[letter]
self.letters[letter] = pygame.Surface(self.font["size"])
self.letters[letter].set_colorkey((0, 0, 0), RLEACCEL)
for y in range(letterimg.get_height()):
for x in range(letterimg.get_width()):
if letterimg.get_at((x, y)) == (255, 255, 255, 255):
self.letters[letter].set_at(
(x, y), color
)
x += 1
y += 1
x = 0
def render(self, text):
text = text.lower()
img = pygame.Surface((len(text)*self.font["size"][0],
self.font["size"][1]))
img.set_colorkey((0, 0, 0), RLEACCEL)
pos = 0
for char in text:
if char in self.letters:
img.blit(self.letters[char], (pos, 0))
pos += self.font["size"][0]
return img
def get_width(self):
return self.font["size"][0]
def get_height(self):
return self.font["size"][1]
| lgpl-2.1 |
homework/nox | src/nox/coreapps/coretests/test_mod.py | 10 | 1990 | # Copyright 2008 (C) Nicira, Inc.
#
# This file is part of NOX.
#
# NOX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# NOX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NOX. If not, see <http://www.gnu.org/licenses/>.
import os, sys
from nox.lib.core import *
from nox.coreapps.testharness.testdefs import *
class ModTestCase(Component):
def __init__(self, ctxt):
Component.__init__(self, ctxt)
def configure(self, configuration):
pass
def getInterface(self):
return str(ModTestCase)
def install(self):
self.register_for_bootstrap_complete(self.bootstrap_complete_callback)
def bootstrap_complete_callback(self, *args):
self.testImport()
self.testAttributes()
sys.stdout.flush() # XXX handle in component::exit
os._exit(0) # XXX push to component
def testImport(self):
"""attempt to load nox.lib.core module"""
try:
import nox.lib.core
except Exception, e:
nox_test_assert(0, "importing nox core")
def testAttributes(self):
"""verify that core has the appropriate set of attributes"""
import nox.lib.core
attributes = ['Component','IN_PORT']
for attr in attributes:
nox_test_assert(hasattr(nox.lib.core, attr),
"nox.lib.core module module ")
def getFactory():
class Factory:
def instance(self, ctxt):
return ModTestCase(ctxt)
return Factory()
| gpl-3.0 |
hbrunn/OpenUpgrade | addons/l10n_mx/__init__.py | 975 | 1058 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2008 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mikecroucher/GPy | GPy/util/debug.py | 7 | 1127 | # Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
"""
The module for some general debug tools
"""
import numpy as np
def checkFinite(arr, name=None):
if name is None:
name = 'Array with ID['+str(id(arr))+']'
if np.any(np.logical_not(np.isfinite(arr))):
idx = np.where(np.logical_not(np.isfinite(arr)))[0]
print(name+' at indices '+str(idx)+' have not finite values: '+str(arr[idx])+'!')
return False
return True
def checkFullRank(m, tol=1e-10, name=None, force_check=False):
if name is None:
name = 'Matrix with ID['+str(id(m))+']'
assert len(m.shape)==2 and m.shape[0]==m.shape[1], 'The input of checkFullRank has to be a square matrix!'
if not force_check and m.shape[0]>=10000: # pragma: no cover
print('The size of '+name+'is too big to check (>=10000)!')
return True
s = np.real(np.linalg.eigvals(m))
if s.min()/s.max()<tol:
print(name+' is close to singlar!')
print('The eigen values of '+name+' is '+str(s))
return False
return True
| bsd-3-clause |
nkatre/php-buildpack | lib/yaml/events.py | 985 | 2445 |
# Abstract classes.
class Event(object):
def __init__(self, start_mark=None, end_mark=None):
self.start_mark = start_mark
self.end_mark = end_mark
def __repr__(self):
attributes = [key for key in ['anchor', 'tag', 'implicit', 'value']
if hasattr(self, key)]
arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
for key in attributes])
return '%s(%s)' % (self.__class__.__name__, arguments)
class NodeEvent(Event):
def __init__(self, anchor, start_mark=None, end_mark=None):
self.anchor = anchor
self.start_mark = start_mark
self.end_mark = end_mark
class CollectionStartEvent(NodeEvent):
def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None,
flow_style=None):
self.anchor = anchor
self.tag = tag
self.implicit = implicit
self.start_mark = start_mark
self.end_mark = end_mark
self.flow_style = flow_style
class CollectionEndEvent(Event):
pass
# Implementations.
class StreamStartEvent(Event):
def __init__(self, start_mark=None, end_mark=None, encoding=None):
self.start_mark = start_mark
self.end_mark = end_mark
self.encoding = encoding
class StreamEndEvent(Event):
pass
class DocumentStartEvent(Event):
def __init__(self, start_mark=None, end_mark=None,
explicit=None, version=None, tags=None):
self.start_mark = start_mark
self.end_mark = end_mark
self.explicit = explicit
self.version = version
self.tags = tags
class DocumentEndEvent(Event):
def __init__(self, start_mark=None, end_mark=None,
explicit=None):
self.start_mark = start_mark
self.end_mark = end_mark
self.explicit = explicit
class AliasEvent(NodeEvent):
pass
class ScalarEvent(NodeEvent):
def __init__(self, anchor, tag, implicit, value,
start_mark=None, end_mark=None, style=None):
self.anchor = anchor
self.tag = tag
self.implicit = implicit
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
self.style = style
class SequenceStartEvent(CollectionStartEvent):
pass
class SequenceEndEvent(CollectionEndEvent):
pass
class MappingStartEvent(CollectionStartEvent):
pass
class MappingEndEvent(CollectionEndEvent):
pass
| apache-2.0 |
Hubert51/AutoGrading | learning/web_Haotian/venv/Lib/encodings/cp1125.py | 213 | 34597 | """ Python Character Mapping Codec for CP1125
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1125',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0410, # CYRILLIC CAPITAL LETTER A
0x0081: 0x0411, # CYRILLIC CAPITAL LETTER BE
0x0082: 0x0412, # CYRILLIC CAPITAL LETTER VE
0x0083: 0x0413, # CYRILLIC CAPITAL LETTER GHE
0x0084: 0x0414, # CYRILLIC CAPITAL LETTER DE
0x0085: 0x0415, # CYRILLIC CAPITAL LETTER IE
0x0086: 0x0416, # CYRILLIC CAPITAL LETTER ZHE
0x0087: 0x0417, # CYRILLIC CAPITAL LETTER ZE
0x0088: 0x0418, # CYRILLIC CAPITAL LETTER I
0x0089: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I
0x008a: 0x041a, # CYRILLIC CAPITAL LETTER KA
0x008b: 0x041b, # CYRILLIC CAPITAL LETTER EL
0x008c: 0x041c, # CYRILLIC CAPITAL LETTER EM
0x008d: 0x041d, # CYRILLIC CAPITAL LETTER EN
0x008e: 0x041e, # CYRILLIC CAPITAL LETTER O
0x008f: 0x041f, # CYRILLIC CAPITAL LETTER PE
0x0090: 0x0420, # CYRILLIC CAPITAL LETTER ER
0x0091: 0x0421, # CYRILLIC CAPITAL LETTER ES
0x0092: 0x0422, # CYRILLIC CAPITAL LETTER TE
0x0093: 0x0423, # CYRILLIC CAPITAL LETTER U
0x0094: 0x0424, # CYRILLIC CAPITAL LETTER EF
0x0095: 0x0425, # CYRILLIC CAPITAL LETTER HA
0x0096: 0x0426, # CYRILLIC CAPITAL LETTER TSE
0x0097: 0x0427, # CYRILLIC CAPITAL LETTER CHE
0x0098: 0x0428, # CYRILLIC CAPITAL LETTER SHA
0x0099: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA
0x009a: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN
0x009b: 0x042b, # CYRILLIC CAPITAL LETTER YERU
0x009c: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x009d: 0x042d, # CYRILLIC CAPITAL LETTER E
0x009e: 0x042e, # CYRILLIC CAPITAL LETTER YU
0x009f: 0x042f, # CYRILLIC CAPITAL LETTER YA
0x00a0: 0x0430, # CYRILLIC SMALL LETTER A
0x00a1: 0x0431, # CYRILLIC SMALL LETTER BE
0x00a2: 0x0432, # CYRILLIC SMALL LETTER VE
0x00a3: 0x0433, # CYRILLIC SMALL LETTER GHE
0x00a4: 0x0434, # CYRILLIC SMALL LETTER DE
0x00a5: 0x0435, # CYRILLIC SMALL LETTER IE
0x00a6: 0x0436, # CYRILLIC SMALL LETTER ZHE
0x00a7: 0x0437, # CYRILLIC SMALL LETTER ZE
0x00a8: 0x0438, # CYRILLIC SMALL LETTER I
0x00a9: 0x0439, # CYRILLIC SMALL LETTER SHORT I
0x00aa: 0x043a, # CYRILLIC SMALL LETTER KA
0x00ab: 0x043b, # CYRILLIC SMALL LETTER EL
0x00ac: 0x043c, # CYRILLIC SMALL LETTER EM
0x00ad: 0x043d, # CYRILLIC SMALL LETTER EN
0x00ae: 0x043e, # CYRILLIC SMALL LETTER O
0x00af: 0x043f, # CYRILLIC SMALL LETTER PE
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x0440, # CYRILLIC SMALL LETTER ER
0x00e1: 0x0441, # CYRILLIC SMALL LETTER ES
0x00e2: 0x0442, # CYRILLIC SMALL LETTER TE
0x00e3: 0x0443, # CYRILLIC SMALL LETTER U
0x00e4: 0x0444, # CYRILLIC SMALL LETTER EF
0x00e5: 0x0445, # CYRILLIC SMALL LETTER HA
0x00e6: 0x0446, # CYRILLIC SMALL LETTER TSE
0x00e7: 0x0447, # CYRILLIC SMALL LETTER CHE
0x00e8: 0x0448, # CYRILLIC SMALL LETTER SHA
0x00e9: 0x0449, # CYRILLIC SMALL LETTER SHCHA
0x00ea: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN
0x00eb: 0x044b, # CYRILLIC SMALL LETTER YERU
0x00ec: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN
0x00ed: 0x044d, # CYRILLIC SMALL LETTER E
0x00ee: 0x044e, # CYRILLIC SMALL LETTER YU
0x00ef: 0x044f, # CYRILLIC SMALL LETTER YA
0x00f0: 0x0401, # CYRILLIC CAPITAL LETTER IO
0x00f1: 0x0451, # CYRILLIC SMALL LETTER IO
0x00f2: 0x0490, # CYRILLIC CAPITAL LETTER GHE WITH UPTURN
0x00f3: 0x0491, # CYRILLIC SMALL LETTER GHE WITH UPTURN
0x00f4: 0x0404, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x00f5: 0x0454, # CYRILLIC SMALL LETTER UKRAINIAN IE
0x00f6: 0x0406, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
0x00f7: 0x0456, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
0x00f8: 0x0407, # CYRILLIC CAPITAL LETTER YI
0x00f9: 0x0457, # CYRILLIC SMALL LETTER YI
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x2116, # NUMERO SIGN
0x00fd: 0x00a4, # CURRENCY SIGN
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\u0410' # 0x0080 -> CYRILLIC CAPITAL LETTER A
'\u0411' # 0x0081 -> CYRILLIC CAPITAL LETTER BE
'\u0412' # 0x0082 -> CYRILLIC CAPITAL LETTER VE
'\u0413' # 0x0083 -> CYRILLIC CAPITAL LETTER GHE
'\u0414' # 0x0084 -> CYRILLIC CAPITAL LETTER DE
'\u0415' # 0x0085 -> CYRILLIC CAPITAL LETTER IE
'\u0416' # 0x0086 -> CYRILLIC CAPITAL LETTER ZHE
'\u0417' # 0x0087 -> CYRILLIC CAPITAL LETTER ZE
'\u0418' # 0x0088 -> CYRILLIC CAPITAL LETTER I
'\u0419' # 0x0089 -> CYRILLIC CAPITAL LETTER SHORT I
'\u041a' # 0x008a -> CYRILLIC CAPITAL LETTER KA
'\u041b' # 0x008b -> CYRILLIC CAPITAL LETTER EL
'\u041c' # 0x008c -> CYRILLIC CAPITAL LETTER EM
'\u041d' # 0x008d -> CYRILLIC CAPITAL LETTER EN
'\u041e' # 0x008e -> CYRILLIC CAPITAL LETTER O
'\u041f' # 0x008f -> CYRILLIC CAPITAL LETTER PE
'\u0420' # 0x0090 -> CYRILLIC CAPITAL LETTER ER
'\u0421' # 0x0091 -> CYRILLIC CAPITAL LETTER ES
'\u0422' # 0x0092 -> CYRILLIC CAPITAL LETTER TE
'\u0423' # 0x0093 -> CYRILLIC CAPITAL LETTER U
'\u0424' # 0x0094 -> CYRILLIC CAPITAL LETTER EF
'\u0425' # 0x0095 -> CYRILLIC CAPITAL LETTER HA
'\u0426' # 0x0096 -> CYRILLIC CAPITAL LETTER TSE
'\u0427' # 0x0097 -> CYRILLIC CAPITAL LETTER CHE
'\u0428' # 0x0098 -> CYRILLIC CAPITAL LETTER SHA
'\u0429' # 0x0099 -> CYRILLIC CAPITAL LETTER SHCHA
'\u042a' # 0x009a -> CYRILLIC CAPITAL LETTER HARD SIGN
'\u042b' # 0x009b -> CYRILLIC CAPITAL LETTER YERU
'\u042c' # 0x009c -> CYRILLIC CAPITAL LETTER SOFT SIGN
'\u042d' # 0x009d -> CYRILLIC CAPITAL LETTER E
'\u042e' # 0x009e -> CYRILLIC CAPITAL LETTER YU
'\u042f' # 0x009f -> CYRILLIC CAPITAL LETTER YA
'\u0430' # 0x00a0 -> CYRILLIC SMALL LETTER A
'\u0431' # 0x00a1 -> CYRILLIC SMALL LETTER BE
'\u0432' # 0x00a2 -> CYRILLIC SMALL LETTER VE
'\u0433' # 0x00a3 -> CYRILLIC SMALL LETTER GHE
'\u0434' # 0x00a4 -> CYRILLIC SMALL LETTER DE
'\u0435' # 0x00a5 -> CYRILLIC SMALL LETTER IE
'\u0436' # 0x00a6 -> CYRILLIC SMALL LETTER ZHE
'\u0437' # 0x00a7 -> CYRILLIC SMALL LETTER ZE
'\u0438' # 0x00a8 -> CYRILLIC SMALL LETTER I
'\u0439' # 0x00a9 -> CYRILLIC SMALL LETTER SHORT I
'\u043a' # 0x00aa -> CYRILLIC SMALL LETTER KA
'\u043b' # 0x00ab -> CYRILLIC SMALL LETTER EL
'\u043c' # 0x00ac -> CYRILLIC SMALL LETTER EM
'\u043d' # 0x00ad -> CYRILLIC SMALL LETTER EN
'\u043e' # 0x00ae -> CYRILLIC SMALL LETTER O
'\u043f' # 0x00af -> CYRILLIC SMALL LETTER PE
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u258c' # 0x00dd -> LEFT HALF BLOCK
'\u2590' # 0x00de -> RIGHT HALF BLOCK
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\u0440' # 0x00e0 -> CYRILLIC SMALL LETTER ER
'\u0441' # 0x00e1 -> CYRILLIC SMALL LETTER ES
'\u0442' # 0x00e2 -> CYRILLIC SMALL LETTER TE
'\u0443' # 0x00e3 -> CYRILLIC SMALL LETTER U
'\u0444' # 0x00e4 -> CYRILLIC SMALL LETTER EF
'\u0445' # 0x00e5 -> CYRILLIC SMALL LETTER HA
'\u0446' # 0x00e6 -> CYRILLIC SMALL LETTER TSE
'\u0447' # 0x00e7 -> CYRILLIC SMALL LETTER CHE
'\u0448' # 0x00e8 -> CYRILLIC SMALL LETTER SHA
'\u0449' # 0x00e9 -> CYRILLIC SMALL LETTER SHCHA
'\u044a' # 0x00ea -> CYRILLIC SMALL LETTER HARD SIGN
'\u044b' # 0x00eb -> CYRILLIC SMALL LETTER YERU
'\u044c' # 0x00ec -> CYRILLIC SMALL LETTER SOFT SIGN
'\u044d' # 0x00ed -> CYRILLIC SMALL LETTER E
'\u044e' # 0x00ee -> CYRILLIC SMALL LETTER YU
'\u044f' # 0x00ef -> CYRILLIC SMALL LETTER YA
'\u0401' # 0x00f0 -> CYRILLIC CAPITAL LETTER IO
'\u0451' # 0x00f1 -> CYRILLIC SMALL LETTER IO
'\u0490' # 0x00f2 -> CYRILLIC CAPITAL LETTER GHE WITH UPTURN
'\u0491' # 0x00f3 -> CYRILLIC SMALL LETTER GHE WITH UPTURN
'\u0404' # 0x00f4 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
'\u0454' # 0x00f5 -> CYRILLIC SMALL LETTER UKRAINIAN IE
'\u0406' # 0x00f6 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
'\u0456' # 0x00f7 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
'\u0407' # 0x00f8 -> CYRILLIC CAPITAL LETTER YI
'\u0457' # 0x00f9 -> CYRILLIC SMALL LETTER YI
'\xb7' # 0x00fa -> MIDDLE DOT
'\u221a' # 0x00fb -> SQUARE ROOT
'\u2116' # 0x00fc -> NUMERO SIGN
'\xa4' # 0x00fd -> CURRENCY SIGN
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a4: 0x00fd, # CURRENCY SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x0401: 0x00f0, # CYRILLIC CAPITAL LETTER IO
0x0404: 0x00f4, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x0406: 0x00f6, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
0x0407: 0x00f8, # CYRILLIC CAPITAL LETTER YI
0x0410: 0x0080, # CYRILLIC CAPITAL LETTER A
0x0411: 0x0081, # CYRILLIC CAPITAL LETTER BE
0x0412: 0x0082, # CYRILLIC CAPITAL LETTER VE
0x0413: 0x0083, # CYRILLIC CAPITAL LETTER GHE
0x0414: 0x0084, # CYRILLIC CAPITAL LETTER DE
0x0415: 0x0085, # CYRILLIC CAPITAL LETTER IE
0x0416: 0x0086, # CYRILLIC CAPITAL LETTER ZHE
0x0417: 0x0087, # CYRILLIC CAPITAL LETTER ZE
0x0418: 0x0088, # CYRILLIC CAPITAL LETTER I
0x0419: 0x0089, # CYRILLIC CAPITAL LETTER SHORT I
0x041a: 0x008a, # CYRILLIC CAPITAL LETTER KA
0x041b: 0x008b, # CYRILLIC CAPITAL LETTER EL
0x041c: 0x008c, # CYRILLIC CAPITAL LETTER EM
0x041d: 0x008d, # CYRILLIC CAPITAL LETTER EN
0x041e: 0x008e, # CYRILLIC CAPITAL LETTER O
0x041f: 0x008f, # CYRILLIC CAPITAL LETTER PE
0x0420: 0x0090, # CYRILLIC CAPITAL LETTER ER
0x0421: 0x0091, # CYRILLIC CAPITAL LETTER ES
0x0422: 0x0092, # CYRILLIC CAPITAL LETTER TE
0x0423: 0x0093, # CYRILLIC CAPITAL LETTER U
0x0424: 0x0094, # CYRILLIC CAPITAL LETTER EF
0x0425: 0x0095, # CYRILLIC CAPITAL LETTER HA
0x0426: 0x0096, # CYRILLIC CAPITAL LETTER TSE
0x0427: 0x0097, # CYRILLIC CAPITAL LETTER CHE
0x0428: 0x0098, # CYRILLIC CAPITAL LETTER SHA
0x0429: 0x0099, # CYRILLIC CAPITAL LETTER SHCHA
0x042a: 0x009a, # CYRILLIC CAPITAL LETTER HARD SIGN
0x042b: 0x009b, # CYRILLIC CAPITAL LETTER YERU
0x042c: 0x009c, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x042d: 0x009d, # CYRILLIC CAPITAL LETTER E
0x042e: 0x009e, # CYRILLIC CAPITAL LETTER YU
0x042f: 0x009f, # CYRILLIC CAPITAL LETTER YA
0x0430: 0x00a0, # CYRILLIC SMALL LETTER A
0x0431: 0x00a1, # CYRILLIC SMALL LETTER BE
0x0432: 0x00a2, # CYRILLIC SMALL LETTER VE
0x0433: 0x00a3, # CYRILLIC SMALL LETTER GHE
0x0434: 0x00a4, # CYRILLIC SMALL LETTER DE
0x0435: 0x00a5, # CYRILLIC SMALL LETTER IE
0x0436: 0x00a6, # CYRILLIC SMALL LETTER ZHE
0x0437: 0x00a7, # CYRILLIC SMALL LETTER ZE
0x0438: 0x00a8, # CYRILLIC SMALL LETTER I
0x0439: 0x00a9, # CYRILLIC SMALL LETTER SHORT I
0x043a: 0x00aa, # CYRILLIC SMALL LETTER KA
0x043b: 0x00ab, # CYRILLIC SMALL LETTER EL
0x043c: 0x00ac, # CYRILLIC SMALL LETTER EM
0x043d: 0x00ad, # CYRILLIC SMALL LETTER EN
0x043e: 0x00ae, # CYRILLIC SMALL LETTER O
0x043f: 0x00af, # CYRILLIC SMALL LETTER PE
0x0440: 0x00e0, # CYRILLIC SMALL LETTER ER
0x0441: 0x00e1, # CYRILLIC SMALL LETTER ES
0x0442: 0x00e2, # CYRILLIC SMALL LETTER TE
0x0443: 0x00e3, # CYRILLIC SMALL LETTER U
0x0444: 0x00e4, # CYRILLIC SMALL LETTER EF
0x0445: 0x00e5, # CYRILLIC SMALL LETTER HA
0x0446: 0x00e6, # CYRILLIC SMALL LETTER TSE
0x0447: 0x00e7, # CYRILLIC SMALL LETTER CHE
0x0448: 0x00e8, # CYRILLIC SMALL LETTER SHA
0x0449: 0x00e9, # CYRILLIC SMALL LETTER SHCHA
0x044a: 0x00ea, # CYRILLIC SMALL LETTER HARD SIGN
0x044b: 0x00eb, # CYRILLIC SMALL LETTER YERU
0x044c: 0x00ec, # CYRILLIC SMALL LETTER SOFT SIGN
0x044d: 0x00ed, # CYRILLIC SMALL LETTER E
0x044e: 0x00ee, # CYRILLIC SMALL LETTER YU
0x044f: 0x00ef, # CYRILLIC SMALL LETTER YA
0x0451: 0x00f1, # CYRILLIC SMALL LETTER IO
0x0454: 0x00f5, # CYRILLIC SMALL LETTER UKRAINIAN IE
0x0456: 0x00f7, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
0x0457: 0x00f9, # CYRILLIC SMALL LETTER YI
0x0490: 0x00f2, # CYRILLIC CAPITAL LETTER GHE WITH UPTURN
0x0491: 0x00f3, # CYRILLIC SMALL LETTER GHE WITH UPTURN
0x2116: 0x00fc, # NUMERO SIGN
0x221a: 0x00fb, # SQUARE ROOT
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| mit |
nishigori/boto | boto/fps/response.py | 153 | 7866 | # Copyright (c) 2012 Andy Davidoff http://www.disruptek.com/
# Copyright (c) 2010 Jason R. Coombs http://www.jaraco.com/
# Copyright (c) 2008 Chris Moyer http://coredumped.org/
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from decimal import Decimal
from boto.compat import filter, map
def ResponseFactory(action):
class FPSResponse(Response):
_action = action
_Result = globals().get(action + 'Result', ResponseElement)
# due to nodes receiving their closing tags
def endElement(self, name, value, connection):
if name != action + 'Response':
super(FPSResponse, self).endElement(name, value, connection)
return FPSResponse
class ResponseElement(object):
def __init__(self, connection=None, name=None):
if connection is not None:
self._connection = connection
self._name = name or self.__class__.__name__
@property
def connection(self):
return self._connection
def __repr__(self):
render = lambda pair: '{!s}: {!r}'.format(*pair)
do_show = lambda pair: not pair[0].startswith('_')
attrs = filter(do_show, self.__dict__.items())
return '{0}({1})'.format(self.__class__.__name__,
', '.join(map(render, attrs)))
def startElement(self, name, attrs, connection):
return None
# due to nodes receiving their closing tags
def endElement(self, name, value, connection):
if name != self._name:
setattr(self, name, value)
class Response(ResponseElement):
_action = 'Undefined'
def startElement(self, name, attrs, connection):
if name == 'ResponseMetadata':
setattr(self, name, ResponseElement(name=name))
elif name == self._action + 'Result':
setattr(self, name, self._Result(name=name))
else:
return super(Response, self).startElement(name, attrs, connection)
return getattr(self, name)
class ComplexAmount(ResponseElement):
def __repr__(self):
return '{0} {1}'.format(self.CurrencyCode, self.Value)
def __float__(self):
return float(self.Value)
def __str__(self):
return str(self.Value)
def startElement(self, name, attrs, connection):
if name not in ('CurrencyCode', 'Value'):
message = 'Unrecognized tag {0} in ComplexAmount'.format(name)
raise AssertionError(message)
return super(ComplexAmount, self).startElement(name, attrs, connection)
def endElement(self, name, value, connection):
if name == 'Value':
value = Decimal(value)
super(ComplexAmount, self).endElement(name, value, connection)
class AmountCollection(ResponseElement):
def startElement(self, name, attrs, connection):
setattr(self, name, ComplexAmount(name=name))
return getattr(self, name)
class AccountBalance(AmountCollection):
def startElement(self, name, attrs, connection):
if name == 'AvailableBalances':
setattr(self, name, AmountCollection(name=name))
return getattr(self, name)
return super(AccountBalance, self).startElement(name, attrs, connection)
class GetAccountBalanceResult(ResponseElement):
def startElement(self, name, attrs, connection):
if name == 'AccountBalance':
setattr(self, name, AccountBalance(name=name))
return getattr(self, name)
return super(GetAccountBalanceResult, self).startElement(name, attrs,
connection)
class GetTotalPrepaidLiabilityResult(ResponseElement):
def startElement(self, name, attrs, connection):
if name == 'OutstandingPrepaidLiability':
setattr(self, name, AmountCollection(name=name))
return getattr(self, name)
return super(GetTotalPrepaidLiabilityResult, self).startElement(name,
attrs, connection)
class GetPrepaidBalanceResult(ResponseElement):
def startElement(self, name, attrs, connection):
if name == 'PrepaidBalance':
setattr(self, name, AmountCollection(name=name))
return getattr(self, name)
return super(GetPrepaidBalanceResult, self).startElement(name, attrs,
connection)
class GetOutstandingDebtBalanceResult(ResponseElement):
def startElement(self, name, attrs, connection):
if name == 'OutstandingDebt':
setattr(self, name, AmountCollection(name=name))
return getattr(self, name)
return super(GetOutstandingDebtBalanceResult, self).startElement(name,
attrs, connection)
class TransactionPart(ResponseElement):
def startElement(self, name, attrs, connection):
if name == 'FeesPaid':
setattr(self, name, ComplexAmount(name=name))
return getattr(self, name)
return super(TransactionPart, self).startElement(name, attrs,
connection)
class Transaction(ResponseElement):
def __init__(self, *args, **kw):
self.TransactionPart = []
super(Transaction, self).__init__(*args, **kw)
def startElement(self, name, attrs, connection):
if name == 'TransactionPart':
getattr(self, name).append(TransactionPart(name=name))
return getattr(self, name)[-1]
if name in ('TransactionAmount', 'FPSFees', 'Balance'):
setattr(self, name, ComplexAmount(name=name))
return getattr(self, name)
return super(Transaction, self).startElement(name, attrs, connection)
class GetAccountActivityResult(ResponseElement):
def __init__(self, *args, **kw):
self.Transaction = []
super(GetAccountActivityResult, self).__init__(*args, **kw)
def startElement(self, name, attrs, connection):
if name == 'Transaction':
getattr(self, name).append(Transaction(name=name))
return getattr(self, name)[-1]
return super(GetAccountActivityResult, self).startElement(name, attrs,
connection)
class GetTransactionResult(ResponseElement):
def startElement(self, name, attrs, connection):
if name == 'Transaction':
setattr(self, name, Transaction(name=name))
return getattr(self, name)
return super(GetTransactionResult, self).startElement(name, attrs,
connection)
class GetTokensResult(ResponseElement):
def __init__(self, *args, **kw):
self.Token = []
super(GetTokensResult, self).__init__(*args, **kw)
def startElement(self, name, attrs, connection):
if name == 'Token':
getattr(self, name).append(ResponseElement(name=name))
return getattr(self, name)[-1]
return super(GetTokensResult, self).startElement(name, attrs,
connection)
| mit |
alrusdi/lettuce | tests/integration/lib/Django-1.3/django/contrib/admindocs/urls.py | 336 | 1089 | from django.conf.urls.defaults import *
from django.contrib.admindocs import views
urlpatterns = patterns('',
url('^$',
views.doc_index,
name='django-admindocs-docroot'
),
url('^bookmarklets/$',
views.bookmarklets,
name='django-admindocs-bookmarklets'
),
url('^tags/$',
views.template_tag_index,
name='django-admindocs-tags'
),
url('^filters/$',
views.template_filter_index,
name='django-admindocs-filters'
),
url('^views/$',
views.view_index,
name='django-admindocs-views-index'
),
url('^views/(?P<view>[^/]+)/$',
views.view_detail,
name='django-admindocs-views-detail'
),
url('^models/$',
views.model_index,
name='django-admindocs-models-index'
),
url('^models/(?P<app_label>[^\.]+)\.(?P<model_name>[^/]+)/$',
views.model_detail,
name='django-admindocs-models-detail'
),
url('^templates/(?P<template>.*)/$',
views.template_detail,
name='django-admindocs-templates'
),
)
| gpl-3.0 |
h4wkmoon/shinken | test/test_business_correlator.py | 1 | 63781 | #!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
import re
from shinken_test import *
class TestBusinesscorrel(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_business_correlator.cfg')
# We will try a simple bd1 OR db2
def test_simple_or_business_correlator(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print "Get the hosts and services"
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
svc_bd1 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "db1")
self.assert_(svc_bd1.got_business_rule == False)
self.assert_(svc_bd1.business_rule is None)
svc_bd2 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "db2")
self.assert_(svc_bd2.got_business_rule == False)
self.assert_(svc_bd2.business_rule is None)
svc_cor = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "Simple_Or")
self.assert_(svc_cor.got_business_rule == True)
self.assert_(svc_cor.business_rule is not None)
bp_rule = svc_cor.business_rule
self.assert_(bp_rule.operand == '|')
# We check for good parent/childs links
# So svc_cor should be a son of svc_bd1 and svc_bd2
# and bd1 and bd2 should be parents of svc_cor
self.assert_(svc_cor in svc_bd1.child_dependencies)
self.assert_(svc_cor in svc_bd2.child_dependencies)
self.assert_(svc_bd1 in svc_cor.parent_dependencies)
self.assert_(svc_bd2 in svc_cor.parent_dependencies)
sons = bp_rule.sons
print "Sons,", sons
# We've got 2 sons, 2 services nodes
self.assert_(len(sons) == 2)
self.assert_(sons[0].operand == 'service')
self.assert_(sons[0].sons[0] == svc_bd1)
self.assert_(sons[1].operand == 'service')
self.assert_(sons[1].sons[0] == svc_bd2)
# Now state working on the states
self.scheduler_loop(1, [[svc_bd2, 0, 'OK | value1=1 value2=2'], [svc_bd1, 0, 'OK | rtt=10']])
self.assert_(svc_bd1.state == 'OK')
self.assert_(svc_bd1.state_type == 'HARD')
self.assert_(svc_bd2.state == 'OK')
self.assert_(svc_bd2.state_type == 'HARD')
state = bp_rule.get_state()
self.assert_(state == 0)
# Now we set the bd1 as soft/CRITICAL
self.scheduler_loop(1, [[svc_bd1, 2, 'CRITICAL | value1=1 value2=2']])
self.assert_(svc_bd1.state == 'CRITICAL')
self.assert_(svc_bd1.state_type == 'SOFT')
self.assert_(svc_bd1.last_hard_state_id == 0)
# The business rule must still be 0
state = bp_rule.get_state()
self.assert_(state == 0)
# Now we get bd1 CRITICAL/HARD
self.scheduler_loop(1, [[svc_bd1, 2, 'CRITICAL | value1=1 value2=2']])
self.assert_(svc_bd1.state == 'CRITICAL')
self.assert_(svc_bd1.state_type == 'HARD')
self.assert_(svc_bd1.last_hard_state_id == 2)
# The rule must still be a 0 (or inside)
state = bp_rule.get_state()
self.assert_(state == 0)
# Now we also set bd2 as CRITICAL/HARD... byebye 0 :)
self.scheduler_loop(2, [[svc_bd2, 2, 'CRITICAL | value1=1 value2=2']])
self.assert_(svc_bd2.state == 'CRITICAL')
self.assert_(svc_bd2.state_type == 'HARD')
self.assert_(svc_bd2.last_hard_state_id == 2)
# And now the state of the rule must be 2
state = bp_rule.get_state()
self.assert_(state == 2)
# And If we set one WARNING?
self.scheduler_loop(2, [[svc_bd2, 1, 'WARNING | value1=1 value2=2']])
self.assert_(svc_bd2.state == 'WARNING')
self.assert_(svc_bd2.state_type == 'HARD')
self.assert_(svc_bd2.last_hard_state_id == 1)
# Must be WARNING (better no 0 value)
state = bp_rule.get_state()
self.assert_(state == 1)
# We will try a simple bd1 AND db2
def test_simple_and_business_correlator(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print "Get the hosts and services"
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
svc_bd1 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "db1")
self.assert_(svc_bd1.got_business_rule == False)
self.assert_(svc_bd1.business_rule is None)
svc_bd2 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "db2")
self.assert_(svc_bd2.got_business_rule == False)
self.assert_(svc_bd2.business_rule is None)
svc_cor = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "Simple_And")
self.assert_(svc_cor.got_business_rule == True)
self.assert_(svc_cor.business_rule is not None)
bp_rule = svc_cor.business_rule
self.assert_(bp_rule.operand == '&')
sons = bp_rule.sons
print "Sons,", sons
# We've got 2 sons, 2 services nodes
self.assert_(len(sons) == 2)
self.assert_(sons[0].operand == 'service')
self.assert_(sons[0].sons[0] == svc_bd1)
self.assert_(sons[1].operand == 'service')
self.assert_(sons[1].sons[0] == svc_bd2)
# Now state working on the states
self.scheduler_loop(1, [[svc_bd2, 0, 'OK | value1=1 value2=2'], [svc_bd1, 0, 'OK | rtt=10']])
self.assert_(svc_bd1.state == 'OK')
self.assert_(svc_bd1.state_type == 'HARD')
self.assert_(svc_bd2.state == 'OK')
self.assert_(svc_bd2.state_type == 'HARD')
state = bp_rule.get_state()
self.assert_(state == 0)
# Now we set the bd1 as soft/CRITICAL
self.scheduler_loop(1, [[svc_bd1, 2, 'CRITICAL | value1=1 value2=2']])
self.assert_(svc_bd1.state == 'CRITICAL')
self.assert_(svc_bd1.state_type == 'SOFT')
self.assert_(svc_bd1.last_hard_state_id == 0)
# The business rule must still be 0
# becase we want HARD states
state = bp_rule.get_state()
self.assert_(state == 0)
# Now we get bd1 CRITICAL/HARD
self.scheduler_loop(1, [[svc_bd1, 2, 'CRITICAL | value1=1 value2=2']])
self.assert_(svc_bd1.state == 'CRITICAL')
self.assert_(svc_bd1.state_type == 'HARD')
self.assert_(svc_bd1.last_hard_state_id == 2)
# The rule must go CRITICAL
state = bp_rule.get_state()
self.assert_(state == 2)
# Now we also set bd2 as WARNING/HARD...
self.scheduler_loop(2, [[svc_bd2, 1, 'WARNING | value1=1 value2=2']])
self.assert_(svc_bd2.state == 'WARNING')
self.assert_(svc_bd2.state_type == 'HARD')
self.assert_(svc_bd2.last_hard_state_id == 1)
# And now the state of the rule must be 2
state = bp_rule.get_state()
self.assert_(state == 2)
# And If we set one WARNING too?
self.scheduler_loop(2, [[svc_bd1, 1, 'WARNING | value1=1 value2=2']])
self.assert_(svc_bd1.state == 'WARNING')
self.assert_(svc_bd1.state_type == 'HARD')
self.assert_(svc_bd1.last_hard_state_id == 1)
# Must be WARNING (worse no 0 value for both)
state = bp_rule.get_state()
self.assert_(state == 1)
# We will try a simple 1of: bd1 OR/AND db2
def test_simple_1of_business_correlator(self):
self.run_simple_1of_business_correlator()
# We will try a simple -1of: bd1 OR/AND db2
def test_simple_1of_neg_business_correlator(self):
self.run_simple_1of_business_correlator(with_neg=True)
# We will try a simple 50%of: bd1 OR/AND db2
def test_simple_1of_pct_business_correlator(self):
self.run_simple_1of_business_correlator(with_pct=True)
# We will try a simple -50%of: bd1 OR/AND db2
def test_simple_1of_pct_neg_business_correlator(self):
self.run_simple_1of_business_correlator(with_pct=True, with_neg=True)
def run_simple_1of_business_correlator(self, with_pct=False, with_neg=False):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print "Get the hosts and services"
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
svc_bd1 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "db1")
self.assert_(svc_bd1.got_business_rule == False)
self.assert_(svc_bd1.business_rule is None)
svc_bd2 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "db2")
self.assert_(svc_bd2.got_business_rule == False)
self.assert_(svc_bd2.business_rule is None)
if with_pct is True:
if with_neg is True:
svc_cor = self.sched.services.find_srv_by_name_and_hostname(
"test_host_0", "Simple_1Of_pct_neg")
else:
svc_cor = self.sched.services.find_srv_by_name_and_hostname(
"test_host_0", "Simple_1Of_pct")
else:
if with_neg is True:
svc_cor = self.sched.services.find_srv_by_name_and_hostname(
"test_host_0", "Simple_1Of_neg")
else:
svc_cor = self.sched.services.find_srv_by_name_and_hostname(
"test_host_0", "Simple_1Of")
self.assert_(svc_cor.got_business_rule == True)
self.assert_(svc_cor.business_rule is not None)
bp_rule = svc_cor.business_rule
self.assert_(bp_rule.operand == 'of:')
# Simple 1of: so in fact a triple ('1','2','2') (1of and MAX,MAX
if with_pct is True:
if with_neg is True:
self.assert_(bp_rule.of_values == ('-50%', '2', '2'))
else:
self.assert_(bp_rule.of_values == ('50%', '2', '2'))
else:
if with_neg is True:
self.assert_(bp_rule.of_values == ('-1', '2', '2'))
else:
self.assert_(bp_rule.of_values == ('1', '2', '2'))
sons = bp_rule.sons
print "Sons,", sons
# We've got 2 sons, 2 services nodes
self.assert_(len(sons) == 2)
self.assert_(sons[0].operand == 'service')
self.assert_(sons[0].sons[0] == svc_bd1)
self.assert_(sons[1].operand == 'service')
self.assert_(sons[1].sons[0] == svc_bd2)
# Now state working on the states
self.scheduler_loop(1, [[svc_bd2, 0, 'OK | value1=1 value2=2'], [svc_bd1, 0, 'OK | rtt=10']])
self.assert_(svc_bd1.state == 'OK')
self.assert_(svc_bd1.state_type == 'HARD')
self.assert_(svc_bd2.state == 'OK')
self.assert_(svc_bd2.state_type == 'HARD')
state = bp_rule.get_state()
self.assert_(state == 0)
# Now we set the bd1 as soft/CRITICAL
self.scheduler_loop(1, [[svc_bd1, 2, 'CRITICAL | value1=1 value2=2']])
self.assert_(svc_bd1.state == 'CRITICAL')
self.assert_(svc_bd1.state_type == 'SOFT')
self.assert_(svc_bd1.last_hard_state_id == 0)
# The business rule must still be 0
# becase we want HARD states
state = bp_rule.get_state()
self.assert_(state == 0)
# Now we get bd1 CRITICAL/HARD
self.scheduler_loop(1, [[svc_bd1, 2, 'CRITICAL | value1=1 value2=2']])
self.assert_(svc_bd1.state == 'CRITICAL')
self.assert_(svc_bd1.state_type == 'HARD')
self.assert_(svc_bd1.last_hard_state_id == 2)
# The rule still be OK
state = bp_rule.get_state()
self.assert_(state == 0)
# Now we also set bd2 as CRITICAL/HARD...
self.scheduler_loop(2, [[svc_bd2, 2, 'CRITICAL | value1=1 value2=2']])
self.assert_(svc_bd2.state == 'CRITICAL')
self.assert_(svc_bd2.state_type == 'HARD')
self.assert_(svc_bd2.last_hard_state_id == 2)
# And now the state of the rule must be 2 now
state = bp_rule.get_state()
self.assert_(state == 2)
# And If we set one WARNING now?
self.scheduler_loop(2, [[svc_bd1, 1, 'WARNING | value1=1 value2=2']])
self.assert_(svc_bd1.state == 'WARNING')
self.assert_(svc_bd1.state_type == 'HARD')
self.assert_(svc_bd1.last_hard_state_id == 1)
# Must be WARNING (worse no 0 value for both, like for AND rule)
state = bp_rule.get_state()
self.assert_(state == 1)
# We will try a simple 1of: test_router_0 OR/AND test_host_0
def test_simple_1of_business_correlator_with_hosts(self):
self.run_simple_1of_business_correlator_with_hosts()
# We will try a simple -1of: test_router_0 OR/AND test_host_0
def test_simple_1of_neg_business_correlator_with_hosts(self):
self.run_simple_1of_business_correlator_with_hosts(with_neg=True)
# We will try a simple 50%of: test_router_0 OR/AND test_host_0
def test_simple_1of_pct_business_correlator_with_hosts(self):
self.run_simple_1of_business_correlator_with_hosts(with_pct=True)
# We will try a simple -50%of: test_router_0 OR/AND test_host_0
def test_simple_1of_pct_neg_business_correlator_with_hosts(self):
self.run_simple_1of_business_correlator_with_hosts(with_pct=True, with_neg=True)
def run_simple_1of_business_correlator_with_hosts(self, with_pct=False, with_neg=False):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print "Get the hosts and services"
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
if with_pct is True:
if with_neg is True:
svc_cor = self.sched.services.find_srv_by_name_and_hostname(
"test_host_0", "Simple_1Of_with_host_pct_neg")
else:
svc_cor = self.sched.services.find_srv_by_name_and_hostname(
"test_host_0", "Simple_1Of_with_host_pct")
else:
if with_neg is True:
svc_cor = self.sched.services.find_srv_by_name_and_hostname(
"test_host_0", "Simple_1Of_with_host_neg")
else:
svc_cor = self.sched.services.find_srv_by_name_and_hostname(
"test_host_0", "Simple_1Of_with_host")
self.assert_(svc_cor.got_business_rule == True)
self.assert_(svc_cor.business_rule is not None)
bp_rule = svc_cor.business_rule
self.assert_(bp_rule.operand == 'of:')
# Simple 1of: so in fact a triple ('1','2','2') (1of and MAX,MAX
if with_pct is True:
if with_neg is True:
self.assert_(bp_rule.of_values == ('-50%', '2', '2'))
else:
self.assert_(bp_rule.of_values == ('50%', '2', '2'))
else:
if with_neg is True:
self.assert_(bp_rule.of_values == ('-1', '2', '2'))
else:
self.assert_(bp_rule.of_values == ('1', '2', '2'))
sons = bp_rule.sons
print "Sons,", sons
# We've got 2 sons, 2 services nodes
self.assert_(len(sons) == 2)
self.assert_(sons[0].operand == 'host')
self.assert_(sons[0].sons[0] == host)
self.assert_(sons[1].operand == 'host')
self.assert_(sons[1].sons[0] == router)
# We will try a simple bd1 OR db2, but this time we will
# schedule a real check and see if it's good
def test_simple_or_business_correlator_with_schedule(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print "Get the hosts and services"
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
svc_bd1 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "db1")
self.assert_(svc_bd1.got_business_rule == False)
self.assert_(svc_bd1.business_rule is None)
svc_bd2 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "db2")
self.assert_(svc_bd2.got_business_rule == False)
self.assert_(svc_bd2.business_rule is None)
svc_cor = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "Simple_Or")
self.assert_(svc_cor.got_business_rule == True)
self.assert_(svc_cor.business_rule is not None)
bp_rule = svc_cor.business_rule
self.assert_(bp_rule.operand == '|')
sons = bp_rule.sons
print "Sons,", sons
# We've got 2 sons, 2 services nodes
self.assert_(len(sons) == 2)
self.assert_(sons[0].operand == 'service')
self.assert_(sons[0].sons[0] == svc_bd1)
self.assert_(sons[1].operand == 'service')
self.assert_(sons[1].sons[0] == svc_bd2)
# Now state working on the states
self.scheduler_loop(1, [[svc_bd2, 0, 'OK | value1=1 value2=2'], [svc_bd1, 0, 'OK | rtt=10']])
self.assert_(svc_bd1.state == 'OK')
self.assert_(svc_bd1.state_type == 'HARD')
self.assert_(svc_bd2.state == 'OK')
self.assert_(svc_bd2.state_type == 'HARD')
state = bp_rule.get_state()
self.assert_(state == 0)
print "Launch internal check"
svc_cor.launch_check(now-1)
c = svc_cor.actions[0]
self.assert_(c.internal == True)
self.assert_(c.is_launchable(now))
# ask the scheduler to launch this check
# and ask 2 loops: one for launch the check
# and another to integer the result
self.scheduler_loop(2, [])
# We should have no more the check
self.assert_(len(svc_cor.actions) == 0)
print "Look at svc_cor state", svc_cor.state
# What is the svc_cor state now?
self.assert_(svc_cor.state == 'OK')
self.assert_(svc_cor.state_type == 'HARD')
self.assert_(svc_cor.last_hard_state_id == 0)
# Now we set the bd1 as soft/CRITICAL
self.scheduler_loop(1, [[svc_bd1, 2, 'CRITICAL | value1=1 value2=2']])
self.assert_(svc_bd1.state == 'CRITICAL')
self.assert_(svc_bd1.state_type == 'SOFT')
self.assert_(svc_bd1.last_hard_state_id == 0)
# The business rule must still be 0
state = bp_rule.get_state()
self.assert_(state == 0)
print "Launch internal check"
svc_cor.launch_check(now-1)
c = svc_cor.actions[0]
self.assert_(c.internal == True)
self.assert_(c.is_launchable(now))
# ask the scheduler to launch this check
# and ask 2 loops: one for launch the check
# and another to integer the result
self.scheduler_loop(2, [])
# We should have no more the check
self.assert_(len(svc_cor.actions) == 0)
print "Look at svc_cor state", svc_cor.state
# What is the svc_cor state now?
self.assert_(svc_cor.state == 'OK')
self.assert_(svc_cor.state_type == 'HARD')
self.assert_(svc_cor.last_hard_state_id == 0)
# Now we get bd1 CRITICAL/HARD
self.scheduler_loop(1, [[svc_bd1, 2, 'CRITICAL | value1=1 value2=2']])
self.assert_(svc_bd1.state == 'CRITICAL')
self.assert_(svc_bd1.state_type == 'HARD')
self.assert_(svc_bd1.last_hard_state_id == 2)
# The rule must still be a 0 (or inside)
state = bp_rule.get_state()
self.assert_(state == 0)
print "Launch internal check"
svc_cor.launch_check(now-1)
c = svc_cor.actions[0]
self.assert_(c.internal == True)
self.assert_(c.is_launchable(now))
# ask the scheduler to launch this check
# and ask 2 loops: one for launch the check
# and another to integer the result
self.scheduler_loop(2, [])
# We should have no more the check
self.assert_(len(svc_cor.actions) == 0)
print "Look at svc_cor state", svc_cor.state
# What is the svc_cor state now?
self.assert_(svc_cor.state == 'OK')
self.assert_(svc_cor.state_type == 'HARD')
self.assert_(svc_cor.last_hard_state_id == 0)
# Now we also set bd2 as CRITICAL/HARD... byebye 0 :)
self.scheduler_loop(2, [[svc_bd2, 2, 'CRITICAL | value1=1 value2=2']])
self.assert_(svc_bd2.state == 'CRITICAL')
self.assert_(svc_bd2.state_type == 'HARD')
self.assert_(svc_bd2.last_hard_state_id == 2)
# And now the state of the rule must be 2
state = bp_rule.get_state()
self.assert_(state == 2)
# And now we must be CRITICAL/SOFT!
print "Launch internal check"
svc_cor.launch_check(now-1)
c = svc_cor.actions[0]
self.assert_(c.internal == True)
self.assert_(c.is_launchable(now))
# ask the scheduler to launch this check
# and ask 2 loops: one for launch the check
# and another to integer the result
self.scheduler_loop(2, [])
# We should have no more the check
self.assert_(len(svc_cor.actions) == 0)
print "Look at svc_cor state", svc_cor.state
# What is the svc_cor state now?
self.assert_(svc_cor.state == 'CRITICAL')
self.assert_(svc_cor.state_type == 'SOFT')
self.assert_(svc_cor.last_hard_state_id == 0)
# OK, re recheck again, GO HARD!
print "Launch internal check"
svc_cor.launch_check(now-1)
c = svc_cor.actions[0]
self.assert_(c.internal == True)
self.assert_(c.is_launchable(now))
# ask the scheduler to launch this check
# and ask 2 loops: one for launch the check
# and another to integer the result
self.scheduler_loop(2, [])
# We should have no more the check
self.assert_(len(svc_cor.actions) == 0)
print "Look at svc_cor state", svc_cor.state
# What is the svc_cor state now?
self.assert_(svc_cor.state == 'CRITICAL')
self.assert_(svc_cor.state_type == 'HARD')
self.assert_(svc_cor.last_hard_state_id == 2)
# And If we set one WARNING?
self.scheduler_loop(2, [[svc_bd2, 1, 'WARNING | value1=1 value2=2']])
self.assert_(svc_bd2.state == 'WARNING')
self.assert_(svc_bd2.state_type == 'HARD')
self.assert_(svc_bd2.last_hard_state_id == 1)
# Must be WARNING (better no 0 value)
state = bp_rule.get_state()
self.assert_(state == 1)
# And in a HARD
print "Launch internal check"
svc_cor.launch_check(now-1)
c = svc_cor.actions[0]
self.assert_(c.internal == True)
self.assert_(c.is_launchable(now))
# ask the scheduler to launch this check
# and ask 2 loops: one for launch the check
# and another to integer the result
self.scheduler_loop(2, [])
# We should have no more the check
self.assert_(len(svc_cor.actions) == 0)
print "Look at svc_cor state", svc_cor.state
# What is the svc_cor state now?
self.assert_(svc_cor.state == 'WARNING')
self.assert_(svc_cor.state_type == 'HARD')
self.assert_(svc_cor.last_hard_state_id == 1)
print "All elements", bp_rule.list_all_elements()
print "IMPACT:", svc_bd2.impacts
for i in svc_bd2.impacts:
print i.get_name()
# Assert that Simple_Or Is an impact of the problem bd2
self.assert_(svc_cor in svc_bd2.impacts)
# and bd1 too
self.assert_(svc_cor in svc_bd1.impacts)
def test_dep_node_list_elements(self):
svc_bd1 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "db1")
self.assert_(svc_bd1.got_business_rule == False)
self.assert_(svc_bd1.business_rule is None)
svc_bd2 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "db2")
self.assert_(svc_bd2.got_business_rule == False)
self.assert_(svc_bd2.business_rule is None)
svc_cor = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "Simple_Or")
self.assert_(svc_cor.got_business_rule == True)
self.assert_(svc_cor.business_rule is not None)
bp_rule = svc_cor.business_rule
self.assert_(bp_rule.operand == '|')
print "All elements", bp_rule.list_all_elements()
all_elt = bp_rule.list_all_elements()
self.assert_(svc_bd2 in all_elt)
self.assert_(svc_bd1 in all_elt)
print "DBG: bd2 depend_on_me", svc_bd2.act_depend_of_me
# We will try a full ERP rule and
# schedule a real check and see if it's good
def test_full_erp_rule_with_schedule(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print "Get the hosts and services"
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
svc_bd1 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "db1")
self.assert_(svc_bd1.got_business_rule == False)
self.assert_(svc_bd1.business_rule is None)
svc_bd2 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "db2")
self.assert_(svc_bd2.got_business_rule == False)
self.assert_(svc_bd2.business_rule is None)
svc_web1 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "web1")
self.assert_(svc_web1.got_business_rule == False)
self.assert_(svc_web1.business_rule is None)
svc_web2 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "web2")
self.assert_(svc_web2.got_business_rule == False)
self.assert_(svc_web2.business_rule is None)
svc_lvs1 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "lvs1")
self.assert_(svc_lvs1.got_business_rule == False)
self.assert_(svc_lvs1.business_rule is None)
svc_lvs2 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "lvs2")
self.assert_(svc_lvs2.got_business_rule == False)
self.assert_(svc_lvs2.business_rule is None)
svc_cor = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "ERP")
self.assert_(svc_cor.got_business_rule == True)
self.assert_(svc_cor.business_rule is not None)
bp_rule = svc_cor.business_rule
self.assert_(bp_rule.operand == '&')
sons = bp_rule.sons
print "Sons,", sons
# We've got 3 sons, each 3 rules
self.assert_(len(sons) == 3)
bd_node = sons[0]
self.assert_(bd_node.operand == '|')
self.assert_(bd_node.sons[0].sons[0] == svc_bd1)
self.assert_(bd_node.sons[1].sons[0] == svc_bd2)
# Now state working on the states
self.scheduler_loop(1, [[svc_bd2, 0, 'OK | value1=1 value2=2'], [svc_bd1, 0, 'OK | rtt=10']])
self.assert_(svc_bd1.state == 'OK')
self.assert_(svc_bd1.state_type == 'HARD')
self.assert_(svc_bd2.state == 'OK')
self.assert_(svc_bd2.state_type == 'HARD')
state = bp_rule.get_state()
self.assert_(state == 0)
print "Launch internal check"
svc_cor.launch_check(now-1)
c = svc_cor.actions[0]
self.assert_(c.internal == True)
self.assert_(c.is_launchable(now))
# ask the scheduler to launch this check
# and ask 2 loops: one for launch the check
# and another to integer the result
self.scheduler_loop(2, [])
# We should have no more the check
self.assert_(len(svc_cor.actions) == 0)
print "Look at svc_cor state", svc_cor.state
# What is the svc_cor state now?
self.assert_(svc_cor.state == 'OK')
self.assert_(svc_cor.state_type == 'HARD')
self.assert_(svc_cor.last_hard_state_id == 0)
# Now we set the bd1 as soft/CRITICAL
self.scheduler_loop(1, [[svc_bd1, 2, 'CRITICAL | value1=1 value2=2']])
self.assert_(svc_bd1.state == 'CRITICAL')
self.assert_(svc_bd1.state_type == 'SOFT')
self.assert_(svc_bd1.last_hard_state_id == 0)
# The business rule must still be 0
state = bp_rule.get_state()
self.assert_(state == 0)
print "Launch internal check"
svc_cor.launch_check(now-1)
c = svc_cor.actions[0]
self.assert_(c.internal == True)
self.assert_(c.is_launchable(now))
# ask the scheduler to launch this check
# and ask 2 loops: one for launch the check
# and another to integer the result
self.scheduler_loop(2, [])
# We should have no more the check
self.assert_(len(svc_cor.actions) == 0)
print "ERP: Look at svc_cor state", svc_cor.state
# What is the svc_cor state now?
self.assert_(svc_cor.state == 'OK')
self.assert_(svc_cor.state_type == 'HARD')
self.assert_(svc_cor.last_hard_state_id == 0)
# Now we get bd1 CRITICAL/HARD
self.scheduler_loop(1, [[svc_bd1, 2, 'CRITICAL | value1=1 value2=2']])
self.assert_(svc_bd1.state == 'CRITICAL')
self.assert_(svc_bd1.state_type == 'HARD')
self.assert_(svc_bd1.last_hard_state_id == 2)
# The rule must still be a 0 (or inside)
state = bp_rule.get_state()
self.assert_(state == 0)
print "ERP: Launch internal check"
svc_cor.launch_check(now-1)
c = svc_cor.actions[0]
self.assert_(c.internal == True)
self.assert_(c.is_launchable(now))
# ask the scheduler to launch this check
# and ask 2 loops: one for launch the check
# and another to integer the result
self.scheduler_loop(2, [])
# We should have no more the check
self.assert_(len(svc_cor.actions) == 0)
print "ERP: Look at svc_cor state", svc_cor.state
# What is the svc_cor state now?
self.assert_(svc_cor.state == 'OK')
self.assert_(svc_cor.state_type == 'HARD')
self.assert_(svc_cor.last_hard_state_id == 0)
# Now we also set bd2 as CRITICAL/HARD... byebye 0 :)
self.scheduler_loop(2, [[svc_bd2, 2, 'CRITICAL | value1=1 value2=2']])
self.assert_(svc_bd2.state == 'CRITICAL')
self.assert_(svc_bd2.state_type == 'HARD')
self.assert_(svc_bd2.last_hard_state_id == 2)
# And now the state of the rule must be 2
state = bp_rule.get_state()
self.assert_(state == 2)
# And now we must be CRITICAL/SOFT!
print "ERP: Launch internal check"
svc_cor.launch_check(now-1)
c = svc_cor.actions[0]
self.assert_(c.internal == True)
self.assert_(c.is_launchable(now))
# ask the scheduler to launch this check
# and ask 2 loops: one for launch the check
# and another to integer the result
self.scheduler_loop(2, [])
# We should have no more the check
self.assert_(len(svc_cor.actions) == 0)
print "ERP: Look at svc_cor state", svc_cor.state
# What is the svc_cor state now?
self.assert_(svc_cor.state == 'CRITICAL')
self.assert_(svc_cor.state_type == 'SOFT')
self.assert_(svc_cor.last_hard_state_id == 0)
# OK, re recheck again, GO HARD!
print "ERP: Launch internal check"
svc_cor.launch_check(now-1)
c = svc_cor.actions[0]
self.assert_(c.internal == True)
self.assert_(c.is_launchable(now))
# ask the scheduler to launch this check
# and ask 2 loops: one for launch the check
# and another to integer the result
self.scheduler_loop(2, [])
# We should have no more the check
self.assert_(len(svc_cor.actions) == 0)
print "ERP: Look at svc_cor state", svc_cor.state
# What is the svc_cor state now?
self.assert_(svc_cor.state == 'CRITICAL')
self.assert_(svc_cor.state_type == 'HARD')
self.assert_(svc_cor.last_hard_state_id == 2)
# And If we set one WARNING?
self.scheduler_loop(2, [[svc_bd2, 1, 'WARNING | value1=1 value2=2']])
self.assert_(svc_bd2.state == 'WARNING')
self.assert_(svc_bd2.state_type == 'HARD')
self.assert_(svc_bd2.last_hard_state_id == 1)
# Must be WARNING (better no 0 value)
state = bp_rule.get_state()
self.assert_(state == 1)
# And in a HARD
print "ERP: Launch internal check"
svc_cor.launch_check(now-1)
c = svc_cor.actions[0]
self.assert_(c.internal == True)
self.assert_(c.is_launchable(now))
# ask the scheduler to launch this check
# and ask 2 loops: one for launch the check
# and another to integer the result
self.scheduler_loop(2, [])
# We should have no more the check
self.assert_(len(svc_cor.actions) == 0)
print "ERP: Look at svc_cor state", svc_cor.state
# What is the svc_cor state now?
self.assert_(svc_cor.state == 'WARNING')
self.assert_(svc_cor.state_type == 'HARD')
self.assert_(svc_cor.last_hard_state_id == 1)
print "All elements", bp_rule.list_all_elements()
print "IMPACT:", svc_bd2.impacts
for i in svc_bd2.impacts:
print i.get_name()
# Assert that Simple_Or Is an impact of the problem bd2
self.assert_(svc_cor in svc_bd2.impacts)
# and bd1 too
self.assert_(svc_cor in svc_bd1.impacts)
# And now all is green :)
self.scheduler_loop(2, [[svc_bd2, 0, 'OK | value1=1 value2=2'], [svc_bd1, 0, 'OK | value1=1 value2=2']])
print "ERP: Launch internal check"
svc_cor.launch_check(now-1)
c = svc_cor.actions[0]
self.assert_(c.internal == True)
self.assert_(c.is_launchable(now))
# ask the scheduler to launch this check
# and ask 2 loops: one for launch the check
# and another to integer the result
self.scheduler_loop(2, [])
# We should have no more the check
self.assert_(len(svc_cor.actions) == 0)
print "ERP: Look at svc_cor state", svc_cor.state
# What is the svc_cor state now?
self.assert_(svc_cor.state == 'OK')
self.assert_(svc_cor.state_type == 'HARD')
self.assert_(svc_cor.last_hard_state_id == 0)
# And no more in impact
self.assert_(svc_cor not in svc_bd2.impacts)
self.assert_(svc_cor not in svc_bd1.impacts)
# And what if we set 2 service from distant rule CRITICAL?
# ERP should be still OK
# And now all is green :)
self.scheduler_loop(2, [[svc_bd1, 2, 'CRITICAL | value1=1 value2=2'], [svc_web1, 2, 'CRITICAL | value1=1 value2=2']])
print "ERP: Launch internal check"
svc_cor.launch_check(now-1)
c = svc_cor.actions[0]
self.assert_(c.internal == True)
self.assert_(c.is_launchable(now))
# ask the scheduler to launch this check
# and ask 2 loops: one for launch the check
# and another to integer the result
self.scheduler_loop(2, [])
# We should have no more the check
self.assert_(len(svc_cor.actions) == 0)
print "ERP: Look at svc_cor state", svc_cor.state
# What is the svc_cor state now?
self.assert_(svc_cor.state == 'OK')
self.assert_(svc_cor.state_type == 'HARD')
self.assert_(svc_cor.last_hard_state_id == 0)
# We will try a simple 1of: bd1 OR/AND db2
def test_complex_ABCof_business_correlator(self):
self.run_complex_ABCof_business_correlator(with_pct=False)
# We will try a simple 1of: bd1 OR/AND db2
def test_complex_ABCof_pct_business_correlator(self):
self.run_complex_ABCof_business_correlator(with_pct=True)
def run_complex_ABCof_business_correlator(self, with_pct=False):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print "Get the hosts and services"
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
A = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "A")
self.assert_(A.got_business_rule == False)
self.assert_(A.business_rule is None)
B = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "B")
self.assert_(B.got_business_rule == False)
self.assert_(B.business_rule is None)
C = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "C")
self.assert_(C.got_business_rule == False)
self.assert_(C.business_rule is None)
D = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "D")
self.assert_(D.got_business_rule == False)
self.assert_(D.business_rule is None)
E = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "E")
self.assert_(E.got_business_rule == False)
self.assert_(E.business_rule is None)
if with_pct == False:
svc_cor = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "Complex_ABCOf")
else:
svc_cor = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "Complex_ABCOf_pct")
self.assert_(svc_cor.got_business_rule == True)
self.assert_(svc_cor.business_rule is not None)
bp_rule = svc_cor.business_rule
self.assert_(bp_rule.operand == 'of:')
if with_pct == False:
self.assert_(bp_rule.of_values == ('5', '1', '1'))
else:
self.assert_(bp_rule.of_values == ('100%', '20%', '20%'))
sons = bp_rule.sons
print "Sons,", sons
# We've got 2 sons, 2 services nodes
self.assert_(len(sons) == 5)
self.assert_(sons[0].operand == 'service')
self.assert_(sons[0].sons[0] == A)
self.assert_(sons[1].operand == 'service')
self.assert_(sons[1].sons[0] == B)
self.assert_(sons[2].operand == 'service')
self.assert_(sons[2].sons[0] == C)
self.assert_(sons[3].operand == 'service')
self.assert_(sons[3].sons[0] == D)
self.assert_(sons[4].operand == 'service')
self.assert_(sons[4].sons[0] == E)
# Now state working on the states
self.scheduler_loop(1, [[A, 0, 'OK'], [B, 0, 'OK'], [C, 0, 'OK'], [D, 0, 'OK'], [E, 0, 'OK']])
self.assert_(A.state == 'OK')
self.assert_(A.state_type == 'HARD')
self.assert_(B.state == 'OK')
self.assert_(B.state_type == 'HARD')
self.assert_(C.state == 'OK')
self.assert_(C.state_type == 'HARD')
self.assert_(D.state == 'OK')
self.assert_(D.state_type == 'HARD')
self.assert_(E.state == 'OK')
self.assert_(E.state_type == 'HARD')
state = bp_rule.get_state()
self.assert_(state == 0)
# Now we set the A as soft/CRITICAL
self.scheduler_loop(1, [[A, 2, 'CRITICAL']])
self.assert_(A.state == 'CRITICAL')
self.assert_(A.state_type == 'SOFT')
self.assert_(A.last_hard_state_id == 0)
# The business rule must still be 0
# becase we want HARD states
state = bp_rule.get_state()
self.assert_(state == 0)
# Now we get A CRITICAL/HARD
self.scheduler_loop(1, [[A, 2, 'CRITICAL']])
self.assert_(A.state == 'CRITICAL')
self.assert_(A.state_type == 'HARD')
self.assert_(A.last_hard_state_id == 2)
# The rule still be OK
state = bp_rule.get_state()
self.assert_(state == 2)
# Now we also set B as CRITICAL/HARD...
self.scheduler_loop(2, [[B, 2, 'CRITICAL']])
self.assert_(B.state == 'CRITICAL')
self.assert_(B.state_type == 'HARD')
self.assert_(B.last_hard_state_id == 2)
# And now the state of the rule must be 2 now
state = bp_rule.get_state()
self.assert_(state == 2)
# And If we set A dn B WARNING now?
self.scheduler_loop(2, [[A, 1, 'WARNING'], [B, 1, 'WARNING']])
self.assert_(A.state == 'WARNING')
self.assert_(A.state_type == 'HARD')
self.assert_(A.last_hard_state_id == 1)
self.assert_(B.state == 'WARNING')
self.assert_(B.state_type == 'HARD')
self.assert_(B.last_hard_state_id == 1)
# Must be WARNING (worse no 0 value for both, like for AND rule)
state = bp_rule.get_state()
print "state", state
self.assert_(state == 1)
# Ok now more fun, with changing of_values and states
### W O O O O
# 4 of: -> Ok (we got 4 OK, and not 4 warn or crit, so it's OK)
# 5,1,1 -> Warning (at least one warning, and no crit -> warning)
# 5,2,1 -> OK (we want warning only if we got 2 bad states, so not here)
self.scheduler_loop(2, [[A, 1, 'WARNING'], [B, 0, 'OK']])
# 4 of: -> 4,5,5
if with_pct == False:
bp_rule.of_values = ('4', '5', '5')
else:
bp_rule.of_values = ('80%', '100%', '100%')
bp_rule.is_of_mul = False
self.assert_(bp_rule.get_state() == 0)
# 5,1,1
if with_pct == False:
bp_rule.of_values = ('5', '1', '1')
else:
bp_rule.of_values = ('100%', '20%', '20%')
bp_rule.is_of_mul = True
self.assert_(bp_rule.get_state() == 1)
# 5,2,1
if with_pct == False:
bp_rule.of_values = ('5', '2', '1')
else:
bp_rule.of_values = ('100%', '40%', '20%')
bp_rule.is_of_mul = True
self.assert_(bp_rule.get_state() == 0)
###* W C O O O
# 4 of: -> Crtitical (not 4 ok, so we take the worse state, the critical)
# 4,1,1 -> Critical (2 states raise the waring, but on raise critical, so worse state is critical)
self.scheduler_loop(2, [[A, 1, 'WARNING'], [B, 2, 'Crit']])
# 4 of: -> 4,5,5
if with_pct == False:
bp_rule.of_values = ('4', '5', '5')
else:
bp_rule.of_values = ('80%', '100%', '100%')
bp_rule.is_of_mul = False
self.assert_(bp_rule.get_state() == 2)
# 4,1,1
if with_pct == False:
bp_rule.of_values = ('4', '1', '1')
else:
bp_rule.of_values = ('40%', '20%', '20%')
bp_rule.is_of_mul = True
self.assert_(bp_rule.get_state() == 2)
##* W C C O O
# * 2 of: OK
# * 4,1,1 -> Critical (same as before)
# * 4,1,3 -> warning (the warning rule is raised, but the critical is not)
self.scheduler_loop(2, [[A, 1, 'WARNING'], [B, 2, 'Crit'], [C, 2, 'Crit']])
# * 2 of: 2,5,5
if with_pct == False:
bp_rule.of_values = ('2', '5', '5')
else:
bp_rule.of_values = ('40%', '100%', '100%')
bp_rule.is_of_mul = False
self.assert_(bp_rule.get_state() == 0)
# * 4,1,1
if with_pct == False:
bp_rule.of_values = ('4', '1', '1')
else:
bp_rule.of_values = ('80%', '20%', '20%')
bp_rule.is_of_mul = True
self.assert_(bp_rule.get_state() == 2)
# * 4,1,3
if with_pct == False:
bp_rule.of_values = ('4', '1', '3')
else:
bp_rule.of_values = ('80%', '20%', '60%')
bp_rule.is_of_mul = True
self.assert_(bp_rule.get_state() == 1)
# We will try a simple bd1 AND NOT db2
def test_simple_and_not_business_correlator(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print "Get the hosts and services"
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
svc_bd1 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "db1")
self.assert_(svc_bd1.got_business_rule == False)
self.assert_(svc_bd1.business_rule is None)
svc_bd2 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "db2")
self.assert_(svc_bd2.got_business_rule == False)
self.assert_(svc_bd2.business_rule is None)
svc_cor = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "Simple_And_not")
self.assert_(svc_cor.got_business_rule == True)
self.assert_(svc_cor.business_rule is not None)
bp_rule = svc_cor.business_rule
self.assert_(bp_rule.operand == '&')
sons = bp_rule.sons
print "Sons,", sons
# We've got 2 sons, 2 services nodes
self.assert_(len(sons) == 2)
self.assert_(sons[0].operand == 'service')
self.assert_(sons[0].sons[0] == svc_bd1)
self.assert_(sons[1].operand == 'service')
self.assert_(sons[1].sons[0] == svc_bd2)
# Now state working on the states
self.scheduler_loop(2, [[svc_bd1, 0, 'OK | value1=1 value2=2'], [svc_bd2, 2, 'CRITICAL | rtt=10']])
self.assert_(svc_bd1.state == 'OK')
self.assert_(svc_bd1.state_type == 'HARD')
self.assert_(svc_bd2.state == 'CRITICAL')
self.assert_(svc_bd2.state_type == 'HARD')
# We are a NOT, so should be OK here
state = bp_rule.get_state()
self.assert_(state == 0)
# Now we set the bd1 as soft/CRITICAL
self.scheduler_loop(1, [[svc_bd1, 2, 'CRITICAL | value1=1 value2=2']])
self.assert_(svc_bd1.state == 'CRITICAL')
self.assert_(svc_bd1.state_type == 'SOFT')
self.assert_(svc_bd1.last_hard_state_id == 0)
# The business rule must still be 0
# becase we want HARD states
state = bp_rule.get_state()
self.assert_(state == 0)
# Now we get bd1 CRITICAL/HARD
self.scheduler_loop(1, [[svc_bd1, 2, 'CRITICAL | value1=1 value2=2']])
self.assert_(svc_bd1.state == 'CRITICAL')
self.assert_(svc_bd1.state_type == 'HARD')
self.assert_(svc_bd1.last_hard_state_id == 2)
# The rule must go CRITICAL
state = bp_rule.get_state()
self.assert_(state == 2)
# Now we also set bd2 as WARNING/HARD...
self.scheduler_loop(2, [[svc_bd2, 1, 'WARNING | value1=1 value2=2']])
self.assert_(svc_bd2.state == 'WARNING')
self.assert_(svc_bd2.state_type == 'HARD')
self.assert_(svc_bd2.last_hard_state_id == 1)
# And now the state of the rule must be 2
state = bp_rule.get_state()
self.assert_(state == 2)
# And If we set one WARNING too?
self.scheduler_loop(2, [[svc_bd1, 1, 'WARNING | value1=1 value2=2']])
self.assert_(svc_bd1.state == 'WARNING')
self.assert_(svc_bd1.state_type == 'HARD')
self.assert_(svc_bd1.last_hard_state_id == 1)
# Must be WARNING (worse no 0 value for both)
state = bp_rule.get_state()
self.assert_(state == 1)
# Now try to get ok in both place, should be bad :)
self.scheduler_loop(2, [[svc_bd1, 0, 'OK | value1=1 value2=2'], [svc_bd2, 0, 'OK | value1=1 value2=2']])
self.assert_(svc_bd1.state == 'OK')
self.assert_(svc_bd1.state_type == 'HARD')
self.assert_(svc_bd1.last_hard_state_id == 0)
self.assert_(svc_bd2.state == 'OK')
self.assert_(svc_bd2.state_type == 'HARD')
self.assert_(svc_bd2.last_hard_state_id == 0)
# Must be CRITICAL (ok and not ok IS no OK :) )
state = bp_rule.get_state()
self.assert_(state == 2)
# We will try a simple bd1 OR db2
def test_multi_layers(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print "Get the hosts and services"
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0")
svc.checks_in_progress = []
svc.act_depend_of = [] # no hostchecks on critical checkresults
# THE RULE IS (test_host_0,db1| (test_host_0,db2 & (test_host_0,lvs1|test_host_0,lvs2) ) ) & test_router_0
svc_lvs1 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "lvs1")
self.assert_(svc_lvs1 is not None)
svc_lvs2 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "lvs2")
self.assert_(svc_lvs2 is not None)
svc_bd1 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "db1")
self.assert_(svc_bd1.got_business_rule == False)
self.assert_(svc_bd1.business_rule is None)
svc_bd2 = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "db2")
self.assert_(svc_bd2.got_business_rule == False)
self.assert_(svc_bd2.business_rule is None)
svc_cor = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "Multi_levels")
self.assert_(svc_cor.got_business_rule == True)
self.assert_(svc_cor.business_rule is not None)
bp_rule = svc_cor.business_rule
self.assert_(bp_rule.operand == '&')
# We check for good parent/childs links
# So svc_cor should be a son of svc_bd1 and svc_bd2
# and bd1 and bd2 should be parents of svc_cor
self.assert_(svc_cor in svc_bd1.child_dependencies)
self.assert_(svc_cor in svc_bd2.child_dependencies)
self.assert_(svc_cor in router.child_dependencies)
self.assert_(svc_bd1 in svc_cor.parent_dependencies)
self.assert_(svc_bd2 in svc_cor.parent_dependencies)
self.assert_(router in svc_cor.parent_dependencies)
sons = bp_rule.sons
print "Sons,", sons
# We've got 2 sons, 2 services nodes
self.assert_(len(sons) == 2)
# Son0 is (test_host_0,db1| (test_host_0,db2 & (test_host_0,lvs1|test_host_0,lvs2) ) )
son0 = sons[0]
self.assert_(son0.operand == '|')
# Son1 is test_router_0
self.assert_(sons[1].operand == 'host')
self.assert_(sons[1].sons[0] == router)
# Son0_0 is test_host_0,db1
# Son0_1 is test_host_0,db2 & (test_host_0,lvs1|test_host_0,lvs2)
son0_0 = son0.sons[0]
son0_1 = son0.sons[1]
self.assert_(son0_0.operand == 'service')
self.assert_(son0_0.sons[0] == svc_bd1)
self.assert_(son0_1.operand == '&')
# Son0_1_0 is test_host_0,db2
# Son0_1_1 is test_host_0,lvs1|test_host_0,lvs2
son0_1_0 = son0_1.sons[0]
son0_1_1 = son0_1.sons[1]
self.assert_(son0_1_0.operand == 'service')
self.assert_(son0_1_0.sons[0] == svc_bd2)
self.assert_(son0_1_1.operand == '|')
# Son0_1_1_0 is test_host_0,lvs1
# Son0_1_1_1 is test_host_0,lvs2
son0_1_1_0 = son0_1_1.sons[0]
son0_1_1_1 = son0_1_1.sons[1]
self.assert_(son0_1_1_0.operand == 'service')
self.assert_(son0_1_1_0.sons[0] == svc_lvs1)
self.assert_(son0_1_1_1.operand == 'service')
self.assert_(son0_1_1_1.sons[0] == svc_lvs2)
# Now state working on the states
self.scheduler_loop(1, [[svc_bd2, 0, 'OK | value1=1 value2=2'], [svc_bd1, 0, 'OK | rtt=10'],
[svc_lvs1, 0, 'OK'], [svc_lvs2, 0, 'OK'], [router, 0, 'UP'] ])
self.assert_(svc_bd1.state == 'OK')
self.assert_(svc_bd1.state_type == 'HARD')
self.assert_(svc_bd2.state == 'OK')
self.assert_(svc_bd2.state_type == 'HARD')
# All is green, the rule should be green too
state = bp_rule.get_state()
self.assert_(state == 0)
# Now we set the bd1 as soft/CRITICAL
self.scheduler_loop(1, [[svc_bd1, 2, 'CRITICAL | value1=1 value2=2']])
self.assert_(svc_bd1.state == 'CRITICAL')
self.assert_(svc_bd1.state_type == 'SOFT')
self.assert_(svc_bd1.last_hard_state_id == 0)
# The business rule must still be 0
state = bp_rule.get_state()
self.assert_(state == 0)
# Now we get bd1 CRITICAL/HARD
self.scheduler_loop(1, [[svc_bd1, 2, 'CRITICAL | value1=1 value2=2']])
self.assert_(svc_bd1.state == 'CRITICAL')
self.assert_(svc_bd1.state_type == 'HARD')
self.assert_(svc_bd1.last_hard_state_id == 2)
# The rule must still be a 0 (or inside)
state = bp_rule.get_state()
self.assert_(state == 0)
# Now we also set bd2 as CRITICAL/HARD... byebye 0 :)
self.scheduler_loop(2, [[svc_bd2, 2, 'CRITICAL | value1=1 value2=2']])
self.assert_(svc_bd2.state == 'CRITICAL')
self.assert_(svc_bd2.state_type == 'HARD')
self.assert_(svc_bd2.last_hard_state_id == 2)
# And now the state of the rule must be 2
state = bp_rule.get_state()
self.assert_(state == 2)
# And If we set one WARNING?
self.scheduler_loop(2, [[svc_bd2, 1, 'WARNING | value1=1 value2=2']])
self.assert_(svc_bd2.state == 'WARNING')
self.assert_(svc_bd2.state_type == 'HARD')
self.assert_(svc_bd2.last_hard_state_id == 1)
# Must be WARNING (better no 0 value)
state = bp_rule.get_state()
self.assert_(state == 1)
# We should got now svc_bd2 and svc_bd1 as root problems
print "Root problems"
for p in svc_cor.source_problems:
print p.get_full_name()
self.assert_(svc_bd1 in svc_cor.source_problems)
self.assert_(svc_bd2 in svc_cor.source_problems)
# What about now with the router in DOWN?
self.scheduler_loop(5, [[router, 2, 'DOWN']])
self.assert_(router.state == 'DOWN')
self.assert_(router.state_type == 'HARD')
self.assert_(router.last_hard_state_id == 1)
# Must be CRITICAL (CRITICAL VERSUS DOWN -> DOWN)
state = bp_rule.get_state()
self.assert_(state == 2)
# Now our root problem is router
print "Root problems"
for p in svc_cor.source_problems:
print p.get_full_name()
self.assert_(router in svc_cor.source_problems)
# We will try a strange rule that ask UP&UP -> DOWN&DONW-> OK
def test_darthelmet_rule(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print "Get the hosts and services"
now = time.time()
host = self.sched.hosts.find_by_name("test_darthelmet")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
A = self.sched.hosts.find_by_name("test_darthelmet_A")
B = self.sched.hosts.find_by_name("test_darthelmet_B")
self.assert_(host.got_business_rule == True)
self.assert_(host.business_rule is not None)
bp_rule = host.business_rule
self.assert_(bp_rule.operand == '|')
# Now state working on the states
self.scheduler_loop(3, [[host, 0, 'UP'], [A, 0, 'UP'], [B, 0, 'UP'] ] )
self.assert_(host.state == 'UP')
self.assert_(host.state_type == 'HARD')
self.assert_(A.state == 'UP')
self.assert_(A.state_type == 'HARD')
state = bp_rule.get_state()
print "WTF0", state
self.assert_(state == 0)
# Now we set the A as soft/DOWN
self.scheduler_loop(1, [[A, 2, 'DOWN']])
self.assert_(A.state == 'DOWN')
self.assert_(A.state_type == 'SOFT')
self.assert_(A.last_hard_state_id == 0)
# The business rule must still be 0
state = bp_rule.get_state()
self.assert_(state == 0)
# Now we get A DOWN/HARD
self.scheduler_loop(3, [[A, 2, 'DOWN']])
self.assert_(A.state == 'DOWN')
self.assert_(A.state_type == 'HARD')
self.assert_(A.last_hard_state_id == 1)
# The rule must still be a 2 (or inside)
state = bp_rule.get_state()
print "WFT", state
self.assert_(state == 2)
# Now we also set B as DOWN/HARD, should get back to 0!
self.scheduler_loop(3, [[B, 2, 'DOWN']])
self.assert_(B.state == 'DOWN')
self.assert_(B.state_type == 'HARD')
self.assert_(B.last_hard_state_id == 1)
# And now the state of the rule must be 0 again! (strange rule isn't it?)
state = bp_rule.get_state()
self.assert_(state == 0)
class TestConfigBroken(ShinkenTest):
"""A class with a broken configuration, where business rules reference unknown hosts/services"""
def setUp(self):
self.setup_with_file('etc/shinken_business_correlator_broken.cfg')
def test_conf_is_correct(self):
#
# Business rules use services which don't exist. We want
# the arbiter to output an error message and exit
# in a controlled manner.
#
print "conf_is_correct", self.conf.conf_is_correct
self.assert_(not self.conf.conf_is_correct)
# Get the arbiter's log broks
[b.prepare() for b in self.broks.values()]
logs = [b.data['log'] for b in self.broks.values() if b.type == 'log']
# Info: Simple_1Of_1unk_svc: my business rule is invalid
# Info: Simple_1Of_1unk_svc: Business rule uses unknown service test_host_0/db3
# Error: [items] In Simple_1Of_1unk_svc is incorrect ; from etc/business_correlator_broken/services.cfg
self.assert_(len([log for log in logs if re.search('Simple_1Of_1unk_svc', log)]) == 3)
self.assert_(len([log for log in logs if re.search('service test_host_0/db3', log)]) == 1)
self.assert_(len([log for log in logs if re.search('Simple_1Of_1unk_svc.+from etc.+business_correlator_broken.+services.cfg', log)]) == 1)
# Info: ERP_unk_svc: my business rule is invalid
# Info: ERP_unk_svc: Business rule uses unknown service test_host_0/web100
# Info: ERP_unk_svc: Business rule uses unknown service test_host_0/lvs100
# Error: [items] In ERP_unk_svc is incorrect ; from etc/business_correlator_broken/services.cfg
self.assert_(len([log for log in logs if re.search('ERP_unk_svc', log)]) == 4)
self.assert_(len([log for log in logs if re.search('service test_host_0/web100', log)]) == 1)
self.assert_(len([log for log in logs if re.search('service test_host_0/lvs100', log)]) == 1)
self.assert_(len([log for log in logs if re.search('ERP_unk_svc.+from etc.+business_correlator_broken.+services.cfg', log)]) == 1)
# Info: Simple_1Of_1unk_host: my business rule is invalid
# Info: Simple_1Of_1unk_host: Business rule uses unknown host test_host_9
# Error: [items] In Simple_1Of_1unk_host is incorrect ; from etc/business_correlator_broken/services.cfg
self.assert_(len([log for log in logs if re.search('Simple_1Of_1unk_host', log)]) == 3)
self.assert_(len([log for log in logs if re.search('host test_host_9', log)]) == 1)
self.assert_(len([log for log in logs if re.search('Simple_1Of_1unk_host.+from etc.+business_correlator_broken.+services.cfg', log)]) == 1)
# Now the number of all failed business rules.
self.assert_(len([log for log in logs if re.search('my business rule is invalid', log)]) == 3)
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
crosswalk-project/chromium-crosswalk-efl | third_party/protobuf/python/google/protobuf/internal/type_checkers.py | 527 | 12163 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Provides type checking routines.
This module defines type checking utilities in the forms of dictionaries:
VALUE_CHECKERS: A dictionary of field types and a value validation object.
TYPE_TO_BYTE_SIZE_FN: A dictionary with field types and a size computing
function.
TYPE_TO_SERIALIZE_METHOD: A dictionary with field types and serialization
function.
FIELD_TYPE_TO_WIRE_TYPE: A dictionary with field typed and their
coresponding wire types.
TYPE_TO_DESERIALIZE_METHOD: A dictionary with field types and deserialization
function.
"""
__author__ = 'robinson@google.com (Will Robinson)'
from google.protobuf.internal import decoder
from google.protobuf.internal import encoder
from google.protobuf.internal import wire_format
from google.protobuf import descriptor
_FieldDescriptor = descriptor.FieldDescriptor
def GetTypeChecker(cpp_type, field_type):
"""Returns a type checker for a message field of the specified types.
Args:
cpp_type: C++ type of the field (see descriptor.py).
field_type: Protocol message field type (see descriptor.py).
Returns:
An instance of TypeChecker which can be used to verify the types
of values assigned to a field of the specified type.
"""
if (cpp_type == _FieldDescriptor.CPPTYPE_STRING and
field_type == _FieldDescriptor.TYPE_STRING):
return UnicodeValueChecker()
return _VALUE_CHECKERS[cpp_type]
# None of the typecheckers below make any attempt to guard against people
# subclassing builtin types and doing weird things. We're not trying to
# protect against malicious clients here, just people accidentally shooting
# themselves in the foot in obvious ways.
class TypeChecker(object):
"""Type checker used to catch type errors as early as possible
when the client is setting scalar fields in protocol messages.
"""
def __init__(self, *acceptable_types):
self._acceptable_types = acceptable_types
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, self._acceptable_types):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), self._acceptable_types))
raise TypeError(message)
# IntValueChecker and its subclasses perform integer type-checks
# and bounds-checks.
class IntValueChecker(object):
"""Checker used for integer fields. Performs type-check and range check."""
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, (int, long)):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), (int, long)))
raise TypeError(message)
if not self._MIN <= proposed_value <= self._MAX:
raise ValueError('Value out of range: %d' % proposed_value)
class UnicodeValueChecker(object):
"""Checker used for string fields."""
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, (str, unicode)):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), (str, unicode)))
raise TypeError(message)
# If the value is of type 'str' make sure that it is in 7-bit ASCII
# encoding.
if isinstance(proposed_value, str):
try:
unicode(proposed_value, 'ascii')
except UnicodeDecodeError:
raise ValueError('%.1024r has type str, but isn\'t in 7-bit ASCII '
'encoding. Non-ASCII strings must be converted to '
'unicode objects before being added.' %
(proposed_value))
class Int32ValueChecker(IntValueChecker):
# We're sure to use ints instead of longs here since comparison may be more
# efficient.
_MIN = -2147483648
_MAX = 2147483647
class Uint32ValueChecker(IntValueChecker):
_MIN = 0
_MAX = (1 << 32) - 1
class Int64ValueChecker(IntValueChecker):
_MIN = -(1 << 63)
_MAX = (1 << 63) - 1
class Uint64ValueChecker(IntValueChecker):
_MIN = 0
_MAX = (1 << 64) - 1
# Type-checkers for all scalar CPPTYPEs.
_VALUE_CHECKERS = {
_FieldDescriptor.CPPTYPE_INT32: Int32ValueChecker(),
_FieldDescriptor.CPPTYPE_INT64: Int64ValueChecker(),
_FieldDescriptor.CPPTYPE_UINT32: Uint32ValueChecker(),
_FieldDescriptor.CPPTYPE_UINT64: Uint64ValueChecker(),
_FieldDescriptor.CPPTYPE_DOUBLE: TypeChecker(
float, int, long),
_FieldDescriptor.CPPTYPE_FLOAT: TypeChecker(
float, int, long),
_FieldDescriptor.CPPTYPE_BOOL: TypeChecker(bool, int),
_FieldDescriptor.CPPTYPE_ENUM: Int32ValueChecker(),
_FieldDescriptor.CPPTYPE_STRING: TypeChecker(str),
}
# Map from field type to a function F, such that F(field_num, value)
# gives the total byte size for a value of the given type. This
# byte size includes tag information and any other additional space
# associated with serializing "value".
TYPE_TO_BYTE_SIZE_FN = {
_FieldDescriptor.TYPE_DOUBLE: wire_format.DoubleByteSize,
_FieldDescriptor.TYPE_FLOAT: wire_format.FloatByteSize,
_FieldDescriptor.TYPE_INT64: wire_format.Int64ByteSize,
_FieldDescriptor.TYPE_UINT64: wire_format.UInt64ByteSize,
_FieldDescriptor.TYPE_INT32: wire_format.Int32ByteSize,
_FieldDescriptor.TYPE_FIXED64: wire_format.Fixed64ByteSize,
_FieldDescriptor.TYPE_FIXED32: wire_format.Fixed32ByteSize,
_FieldDescriptor.TYPE_BOOL: wire_format.BoolByteSize,
_FieldDescriptor.TYPE_STRING: wire_format.StringByteSize,
_FieldDescriptor.TYPE_GROUP: wire_format.GroupByteSize,
_FieldDescriptor.TYPE_MESSAGE: wire_format.MessageByteSize,
_FieldDescriptor.TYPE_BYTES: wire_format.BytesByteSize,
_FieldDescriptor.TYPE_UINT32: wire_format.UInt32ByteSize,
_FieldDescriptor.TYPE_ENUM: wire_format.EnumByteSize,
_FieldDescriptor.TYPE_SFIXED32: wire_format.SFixed32ByteSize,
_FieldDescriptor.TYPE_SFIXED64: wire_format.SFixed64ByteSize,
_FieldDescriptor.TYPE_SINT32: wire_format.SInt32ByteSize,
_FieldDescriptor.TYPE_SINT64: wire_format.SInt64ByteSize
}
# Maps from field types to encoder constructors.
TYPE_TO_ENCODER = {
_FieldDescriptor.TYPE_DOUBLE: encoder.DoubleEncoder,
_FieldDescriptor.TYPE_FLOAT: encoder.FloatEncoder,
_FieldDescriptor.TYPE_INT64: encoder.Int64Encoder,
_FieldDescriptor.TYPE_UINT64: encoder.UInt64Encoder,
_FieldDescriptor.TYPE_INT32: encoder.Int32Encoder,
_FieldDescriptor.TYPE_FIXED64: encoder.Fixed64Encoder,
_FieldDescriptor.TYPE_FIXED32: encoder.Fixed32Encoder,
_FieldDescriptor.TYPE_BOOL: encoder.BoolEncoder,
_FieldDescriptor.TYPE_STRING: encoder.StringEncoder,
_FieldDescriptor.TYPE_GROUP: encoder.GroupEncoder,
_FieldDescriptor.TYPE_MESSAGE: encoder.MessageEncoder,
_FieldDescriptor.TYPE_BYTES: encoder.BytesEncoder,
_FieldDescriptor.TYPE_UINT32: encoder.UInt32Encoder,
_FieldDescriptor.TYPE_ENUM: encoder.EnumEncoder,
_FieldDescriptor.TYPE_SFIXED32: encoder.SFixed32Encoder,
_FieldDescriptor.TYPE_SFIXED64: encoder.SFixed64Encoder,
_FieldDescriptor.TYPE_SINT32: encoder.SInt32Encoder,
_FieldDescriptor.TYPE_SINT64: encoder.SInt64Encoder,
}
# Maps from field types to sizer constructors.
TYPE_TO_SIZER = {
_FieldDescriptor.TYPE_DOUBLE: encoder.DoubleSizer,
_FieldDescriptor.TYPE_FLOAT: encoder.FloatSizer,
_FieldDescriptor.TYPE_INT64: encoder.Int64Sizer,
_FieldDescriptor.TYPE_UINT64: encoder.UInt64Sizer,
_FieldDescriptor.TYPE_INT32: encoder.Int32Sizer,
_FieldDescriptor.TYPE_FIXED64: encoder.Fixed64Sizer,
_FieldDescriptor.TYPE_FIXED32: encoder.Fixed32Sizer,
_FieldDescriptor.TYPE_BOOL: encoder.BoolSizer,
_FieldDescriptor.TYPE_STRING: encoder.StringSizer,
_FieldDescriptor.TYPE_GROUP: encoder.GroupSizer,
_FieldDescriptor.TYPE_MESSAGE: encoder.MessageSizer,
_FieldDescriptor.TYPE_BYTES: encoder.BytesSizer,
_FieldDescriptor.TYPE_UINT32: encoder.UInt32Sizer,
_FieldDescriptor.TYPE_ENUM: encoder.EnumSizer,
_FieldDescriptor.TYPE_SFIXED32: encoder.SFixed32Sizer,
_FieldDescriptor.TYPE_SFIXED64: encoder.SFixed64Sizer,
_FieldDescriptor.TYPE_SINT32: encoder.SInt32Sizer,
_FieldDescriptor.TYPE_SINT64: encoder.SInt64Sizer,
}
# Maps from field type to a decoder constructor.
TYPE_TO_DECODER = {
_FieldDescriptor.TYPE_DOUBLE: decoder.DoubleDecoder,
_FieldDescriptor.TYPE_FLOAT: decoder.FloatDecoder,
_FieldDescriptor.TYPE_INT64: decoder.Int64Decoder,
_FieldDescriptor.TYPE_UINT64: decoder.UInt64Decoder,
_FieldDescriptor.TYPE_INT32: decoder.Int32Decoder,
_FieldDescriptor.TYPE_FIXED64: decoder.Fixed64Decoder,
_FieldDescriptor.TYPE_FIXED32: decoder.Fixed32Decoder,
_FieldDescriptor.TYPE_BOOL: decoder.BoolDecoder,
_FieldDescriptor.TYPE_STRING: decoder.StringDecoder,
_FieldDescriptor.TYPE_GROUP: decoder.GroupDecoder,
_FieldDescriptor.TYPE_MESSAGE: decoder.MessageDecoder,
_FieldDescriptor.TYPE_BYTES: decoder.BytesDecoder,
_FieldDescriptor.TYPE_UINT32: decoder.UInt32Decoder,
_FieldDescriptor.TYPE_ENUM: decoder.EnumDecoder,
_FieldDescriptor.TYPE_SFIXED32: decoder.SFixed32Decoder,
_FieldDescriptor.TYPE_SFIXED64: decoder.SFixed64Decoder,
_FieldDescriptor.TYPE_SINT32: decoder.SInt32Decoder,
_FieldDescriptor.TYPE_SINT64: decoder.SInt64Decoder,
}
# Maps from field type to expected wiretype.
FIELD_TYPE_TO_WIRE_TYPE = {
_FieldDescriptor.TYPE_DOUBLE: wire_format.WIRETYPE_FIXED64,
_FieldDescriptor.TYPE_FLOAT: wire_format.WIRETYPE_FIXED32,
_FieldDescriptor.TYPE_INT64: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_UINT64: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_INT32: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_FIXED64: wire_format.WIRETYPE_FIXED64,
_FieldDescriptor.TYPE_FIXED32: wire_format.WIRETYPE_FIXED32,
_FieldDescriptor.TYPE_BOOL: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_STRING:
wire_format.WIRETYPE_LENGTH_DELIMITED,
_FieldDescriptor.TYPE_GROUP: wire_format.WIRETYPE_START_GROUP,
_FieldDescriptor.TYPE_MESSAGE:
wire_format.WIRETYPE_LENGTH_DELIMITED,
_FieldDescriptor.TYPE_BYTES:
wire_format.WIRETYPE_LENGTH_DELIMITED,
_FieldDescriptor.TYPE_UINT32: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_ENUM: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_SFIXED32: wire_format.WIRETYPE_FIXED32,
_FieldDescriptor.TYPE_SFIXED64: wire_format.WIRETYPE_FIXED64,
_FieldDescriptor.TYPE_SINT32: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_SINT64: wire_format.WIRETYPE_VARINT,
}
| bsd-3-clause |
Fokko/incubator-airflow | tests/contrib/operators/test_slack_webhook_operator.py | 2 | 3263 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from airflow import DAG
from airflow.contrib.operators.slack_webhook_operator import SlackWebhookOperator
from airflow.utils import timezone
DEFAULT_DATE = timezone.datetime(2017, 1, 1)
class TestSlackWebhookOperator(unittest.TestCase):
_config = {
'http_conn_id': 'slack-webhook-default',
'webhook_token': 'manual_token',
'message': 'your message here',
'attachments': [{'fallback': 'Required plain-text summary'}],
'blocks': [{'type': 'section', 'text': {'type': 'mrkdwn', 'text': '*bold text*'}}],
'channel': '#general',
'username': 'SlackMcSlackFace',
'icon_emoji': ':hankey',
'icon_url': 'https://airflow.apache.org/_images/pin_large.png',
'link_names': True,
'proxy': 'https://my-horrible-proxy.proxyist.com:8080'
}
def setUp(self):
args = {
'owner': 'airflow',
'start_date': DEFAULT_DATE
}
self.dag = DAG('test_dag_id', default_args=args)
def test_execute(self):
# Given / When
operator = SlackWebhookOperator(
task_id='slack_webhook_job',
dag=self.dag,
**self._config
)
self.assertEqual(self._config['http_conn_id'], operator.http_conn_id)
self.assertEqual(self._config['webhook_token'], operator.webhook_token)
self.assertEqual(self._config['message'], operator.message)
self.assertEqual(self._config['attachments'], operator.attachments)
self.assertEqual(self._config['blocks'], operator.blocks)
self.assertEqual(self._config['channel'], operator.channel)
self.assertEqual(self._config['username'], operator.username)
self.assertEqual(self._config['icon_emoji'], operator.icon_emoji)
self.assertEqual(self._config['icon_url'], operator.icon_url)
self.assertEqual(self._config['link_names'], operator.link_names)
self.assertEqual(self._config['proxy'], operator.proxy)
def test_assert_templated_fields(self):
operator = SlackWebhookOperator(
task_id='slack_webhook_job',
dag=self.dag,
**self._config
)
template_fields = ['webhook_token', 'message', 'attachments', 'blocks', 'channel',
'username', 'proxy']
self.assertEqual(operator.template_fields, template_fields)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
mic4ael/indico | bin/maintenance/build-assets.py | 1 | 10446 | #!/usr/bin/env python
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import errno
import json
import os
import re
import shutil
import subprocess
import sys
from contextlib import contextmanager
import click
import yaml
from setuptools import find_packages
def fail(message, *args, **kwargs):
click.echo(click.style('Error: ' + message.format(*args), fg='red', bold=True), err=True)
if 'verbose_msg' in kwargs:
click.echo(kwargs['verbose_msg'], err=True)
sys.exit(1)
def warn(message, *args):
click.echo(click.style(message.format(*args), fg='yellow', bold=True), err=True)
def info(message, *args):
click.echo(click.style(message.format(*args), fg='green', bold=True), err=True)
def step(message, *args):
click.echo(click.style(message.format(*args), fg='white', bold=True), err=True)
def _get_webpack_build_config(url_root='/'):
with open('indico/modules/events/themes.yaml') as f:
themes = yaml.safe_load(f.read())
root_path = os.path.abspath('indico')
return {
'build': {
'baseURLPath': url_root,
'clientPath': os.path.join(root_path, 'web', 'client'),
'rootPath': root_path,
'urlMapPath': os.path.normpath(os.path.join(root_path, '..', 'url_map.json')),
'staticPath': os.path.join(root_path, 'web', 'static'),
'staticURL': url_root.rstrip('/') + '/',
'distPath': os.path.join(root_path, 'web', 'static', 'dist'),
'distURL': os.path.join(url_root, 'dist/')
},
'themes': {key: {'stylesheet': theme['stylesheet'], 'print_stylesheet': theme.get('print_stylesheet')}
for key, theme in themes['definitions'].viewitems()
if set(theme) & {'stylesheet', 'print_stylesheet'}}
}
def _get_plugin_bundle_config(plugin_dir):
try:
with open(os.path.join(plugin_dir, 'webpack-bundles.json')) as f:
return json.load(f)
except IOError as e:
if e.errno == errno.ENOENT:
return {}
raise
def _get_plugin_build_deps(plugin_dir):
try:
with open(os.path.join(plugin_dir, 'required-build-plugins.json')) as f:
return json.load(f)
except IOError as e:
if e.errno == errno.ENOENT:
return []
raise
def _parse_plugin_theme_yaml(plugin_yaml):
# This is very similar to what ThemeSettingsProxy does
with open('indico/modules/events/themes.yaml') as f:
core_data = f.read()
core_data = re.sub(r'^(\S+:)$', r'__core_\1', core_data, flags=re.MULTILINE)
settings = {k: v
for k, v in yaml.safe_load(core_data + '\n' + plugin_yaml).viewitems()
if not k.startswith('__core_')}
return {name: {'stylesheet': theme['stylesheet'], 'print_stylesheet': theme.get('print_stylesheet')}
for name, theme in settings.get('definitions', {}).viewitems()
if set(theme) & {'stylesheet', 'print_stylesheet'}}
def _get_plugin_themes(plugin_dir):
bundle_config = _get_plugin_bundle_config(plugin_dir)
try:
theme_file = bundle_config['indicoTheme']
except KeyError:
return {}
with open(os.path.join(plugin_dir, theme_file)) as f:
return _parse_plugin_theme_yaml(f.read())
def _get_plugin_webpack_build_config(plugin_dir, url_root='/'):
core_config = _get_webpack_build_config(url_root)
packages = [x for x in find_packages(plugin_dir) if '.' not in x]
assert len(packages) == 1
plugin_root_path = os.path.join(plugin_dir, packages[0])
plugin_name = packages[0].replace('indico_', '') # XXX: find a better solution for this
return {
'isPlugin': True,
'plugin': plugin_name,
'indico': {
'build': core_config['build']
},
'build': {
'indicoSourcePath': os.path.abspath('.'),
'clientPath': os.path.join(plugin_root_path, 'client'),
'rootPath': plugin_root_path,
'urlMapPath': os.path.join(plugin_dir, 'url_map.json'),
'staticPath': os.path.join(plugin_root_path, 'static'),
'staticURL': os.path.join(url_root, 'static', 'plugins', plugin_name) + '/',
'distPath': os.path.join(plugin_root_path, 'static', 'dist'),
'distURL': os.path.join(url_root, 'static', 'plugins', plugin_name, 'dist/')
},
'themes': _get_plugin_themes(plugin_dir),
}
def _get_webpack_args(dev, watch):
args = ['--mode', 'development' if dev else 'production']
if watch:
args.append('--watch')
return args
@click.group()
def cli():
os.chdir(os.path.join(os.path.dirname(__file__), '..', '..'))
def _common_build_options(allow_watch=True):
def decorator(fn):
fn = click.option('--dev', is_flag=True, default=False, help="Build in dev mode")(fn)
fn = click.option('--clean/--no-clean', default=None,
help="Delete everything in dist. This is disabled by default for `--dev` builds.")(fn)
fn = click.option('--url-root', default='/', metavar='PATH',
help='URL root from which the assets are loaded. '
'Defaults to / and should usually not be changed')(fn)
if allow_watch:
fn = click.option('--watch', is_flag=True, default=False, help="Run the watcher to rebuild on changes")(fn)
return fn
return decorator
def _clean(webpack_build_config, plugin_dir=None):
dist_path = webpack_build_config['build']['distPath']
if os.path.exists(dist_path):
warn('deleting ' + os.path.relpath(dist_path, plugin_dir or os.curdir))
shutil.rmtree(dist_path)
@cli.command('indico', short_help='Builds assets of Indico.')
@_common_build_options()
def build_indico(dev, clean, watch, url_root):
"""Run webpack to build assets"""
clean = clean or (clean is None and not dev)
webpack_build_config_file = 'webpack-build-config.json'
webpack_build_config = _get_webpack_build_config(url_root)
with open(webpack_build_config_file, 'w') as f:
json.dump(webpack_build_config, f, indent=2, sort_keys=True)
if clean:
_clean(webpack_build_config)
force_url_map = ['--force'] if clean or not dev else []
url_map_path = webpack_build_config['build']['urlMapPath']
subprocess.check_call(['python', 'bin/maintenance/dump_url_map.py', '--output', url_map_path] + force_url_map)
args = _get_webpack_args(dev, watch)
try:
subprocess.check_call(['npx', 'webpack'] + args)
except subprocess.CalledProcessError:
fail('running webpack failed')
finally:
if not dev:
os.unlink(webpack_build_config_file)
def _validate_plugin_dir(ctx, param, value):
if not os.path.exists(os.path.join(value, 'setup.py')):
raise click.BadParameter('no setup.py found in {}'.format(value))
if (not os.path.exists(os.path.join(value, 'webpack.config.js')) and
not os.path.exists(os.path.join(value, 'webpack-bundles.json'))):
raise click.BadParameter('no webpack.config.js or webpack-bundles.json found in {}'.format(value))
return value
def _is_plugin_dir(path):
try:
_validate_plugin_dir(None, None, path)
except click.BadParameter:
return False
else:
return True
@contextmanager
def _chdir(path):
cwd = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(cwd)
@cli.command('plugin', short_help='Builds assets of a plugin.')
@click.argument('plugin_dir', type=click.Path(exists=True, file_okay=False, resolve_path=True),
callback=_validate_plugin_dir)
@_common_build_options()
def build_plugin(plugin_dir, dev, clean, watch, url_root):
"""Run webpack to build plugin assets"""
clean = clean or (clean is None and not dev)
webpack_build_config_file = os.path.join(plugin_dir, 'webpack-build-config.json')
webpack_build_config = _get_plugin_webpack_build_config(plugin_dir, url_root)
with open(webpack_build_config_file, 'w') as f:
json.dump(webpack_build_config, f, indent=2, sort_keys=True)
if clean:
_clean(webpack_build_config, plugin_dir)
force_url_map = ['--force'] if clean or not dev else []
url_map_path = webpack_build_config['build']['urlMapPath']
dump_plugin_args = ['--plugin', webpack_build_config['plugin']]
for name in _get_plugin_build_deps(plugin_dir):
dump_plugin_args += ['--plugin', name]
subprocess.check_call(['python', 'bin/maintenance/dump_url_map.py',
'--output', url_map_path] + dump_plugin_args + force_url_map)
webpack_config_file = os.path.join(plugin_dir, 'webpack.config.js')
if not os.path.exists(webpack_config_file):
webpack_config_file = 'plugin.webpack.config.js'
if os.path.exists(os.path.join(plugin_dir, 'package.json')):
with _chdir(plugin_dir):
try:
subprocess.check_call(['npm', 'install', '--quiet'])
except subprocess.CalledProcessError:
fail('running npm failed')
args = _get_webpack_args(dev, watch)
args += ['--config', webpack_config_file]
os.environ['NODE_PATH'] = os.path.abspath('node_modules')
os.environ['INDICO_PLUGIN_ROOT'] = plugin_dir
try:
subprocess.check_call(['npx', 'webpack'] + args)
except subprocess.CalledProcessError:
fail('running webpack failed')
finally:
if not dev:
os.unlink(webpack_build_config_file)
@cli.command('all-plugins', short_help='Builds assets of all plugins in a directory.')
@click.argument('plugins_dir', type=click.Path(exists=True, file_okay=False, resolve_path=True))
@_common_build_options(allow_watch=False)
@click.pass_context
def build_all_plugins(ctx, plugins_dir, dev, clean, url_root):
"""Run webpack to build plugin assets"""
plugins = sorted(d for d in os.listdir(plugins_dir) if _is_plugin_dir(os.path.join(plugins_dir, d)))
for plugin in plugins:
step('plugin: {}', plugin)
ctx.invoke(build_plugin, plugin_dir=os.path.join(plugins_dir, plugin), dev=dev, clean=clean, watch=False,
url_root=url_root)
if __name__ == '__main__':
cli()
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.