gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
"""
Test classes and functions for retail calendar.
Use unittest module as the main test framework.
All the test cases in this module assume default values of class parameters:
1) fiscal year starts on Aug 1st.
2) Retail calendar's end date is the last Saturday of July.
It is hard to unit-test with general, variable class parameters.
"""
from datetime import date, timedelta
import unittest
from freezegun import freeze_time
from calendars.calendars import RetailDate
class RetailDateTest(unittest.TestCase):
"""
Test cases for other properties of calendars.RetailDate that are simple enough.
"""
def test_string_output(self):
my_date = RetailDate(date(2015, 12, 31))
self.assertEqual(my_date.year_dates_string, "2016 (26-Jul-2015 - 30-Jul-2016)")
self.assertEqual(my_date.year_string, "2015 - 2016")
class RetailQuarterStartEnd(unittest.TestCase):
"""
Verify quarter_start_date and quarter_end_date properties of RetailDate.
"""
def _verify_retail_quarter(self, input_date, expected_quarter_start, expected_quarter_end):
my_date = RetailDate(input_date)
self.assertEqual(my_date.quarter_start_date, expected_quarter_start)
self.assertEqual(my_date.quarter_end_date, expected_quarter_end)
pass
def test_year_2004(self):
self._verify_retail_quarter(date(2003, 7, 27), date(2003, 7, 27), date(2003, 10, 25))
self._verify_retail_quarter(date(2003, 11, 2), date(2003, 10, 26), date(2004, 1, 24))
self._verify_retail_quarter(date(2004, 2, 1), date(2004, 1, 25), date(2004, 4, 24))
self._verify_retail_quarter(date(2004, 5, 2), date(2004, 4, 25), date(2004, 7, 31))
class QuarterNumberTest(unittest.TestCase):
"""
Verify RetailDate.quarter property.
"""
def _quarter_number(self, dategiven):
"""
An internal function to minimize changes in tests.
"""
return RetailDate(dategiven).quarter
def test_quarter_number_2004(self):
# 2004: start date and end date of Q1-Q4
self.assertEqual(1, self._quarter_number(date(2003, 7, 27)))
self.assertEqual(1, self._quarter_number(date(2003, 10, 25)))
self.assertEqual(2, self._quarter_number(date(2003, 10, 26)))
self.assertEqual(2, self._quarter_number(date(2003, 11, 1)))
self.assertEqual(2, self._quarter_number(date(2003, 1, 24)))
self.assertEqual(3, self._quarter_number(date(2004, 1, 25)))
self.assertEqual(3, self._quarter_number(date(2004, 2, 1)))
self.assertEqual(3, self._quarter_number(date(2004, 4, 24)))
self.assertEqual(4, self._quarter_number(date(2004, 4, 25)))
self.assertEqual(4, self._quarter_number(date(2004, 5, 1)))
self.assertEqual(4, self._quarter_number(date(2004, 7, 31)))
pass
def test_quarter_number_2010(self):
# 2010: start date and end date of Q1-Q4
self.assertEqual(1, self._quarter_number(date(2009, 7, 26)))
self.assertEqual(1, self._quarter_number(date(2009, 10, 24)))
self.assertEqual(2, self._quarter_number(date(2009, 10, 25)))
self.assertEqual(2, self._quarter_number(date(2009, 11, 1)))
self.assertEqual(2, self._quarter_number(date(2010, 1, 23)))
self.assertEqual(3, self._quarter_number(date(2010, 1, 24)))
self.assertEqual(3, self._quarter_number(date(2010, 1, 31)))
self.assertEqual(3, self._quarter_number(date(2010, 4, 24)))
self.assertEqual(4, self._quarter_number(date(2010, 4, 25)))
self.assertEqual(4, self._quarter_number(date(2010, 5, 2)))
self.assertEqual(4, self._quarter_number(date(2010, 7, 31)))
pass
def test_quarter_number_2014(self):
# 2014: start date and end date of Q1-Q4
self.assertEqual(1, self._quarter_number(date(2013, 7, 28)))
self.assertEqual(1, self._quarter_number(date(2013, 10, 26)))
self.assertEqual(2, self._quarter_number(date(2013, 10, 27)))
self.assertEqual(2, self._quarter_number(date(2014, 1, 25)))
self.assertEqual(3, self._quarter_number(date(2014, 1, 26)))
self.assertEqual(3, self._quarter_number(date(2014, 4, 26)))
self.assertEqual(4, self._quarter_number(date(2014, 4, 27)))
self.assertEqual(4, self._quarter_number(date(2014, 7, 26)))
pass
class RetailYearStartEnd(unittest.TestCase):
"""Test RetailDate.year_start_date and RetailDate.year_end_date properties.
The test cases assume fiscal date is August 1st.
FISCAL_START_MONTH = 8
FISCAL_START_DAY = 1
"""
# dates: list of tuples of (month,day)
dates = [(1, 1),
(8, 1),
(8, 2),
(12, 31)]
dates.extend([(7, day) for day in xrange(22, 32)])
dates.sort()
years = range(2000, 2020)
# Retail year's start dates from 2000-2020
retail_start_dates = [
(1999, 8, 1),
(2000, 7, 30),
(2001, 7, 29),
(2002, 7, 28),
(2003, 7, 27),
(2004, 8, 1),
(2005, 7, 31),
(2006, 7, 30),
(2007, 7, 29),
(2008, 7, 27),
(2009, 7, 26),
(2010, 8, 1),
(2011, 7, 31),
(2012, 7, 29),
(2013, 7, 28),
(2014, 7, 27),
(2015, 7, 26),
(2016, 7, 31),
(2017, 7, 30),
(2018, 7, 29),
(2019, 7, 28)
]
# Retail year's end dates from 2000-2020
retail_end_dates = [
(2000, 7, 29),
(2001, 7, 28),
(2002, 7, 27),
(2003, 7, 26),
(2004, 7, 31),
(2005, 7, 30),
(2006, 7, 29),
(2007, 7, 28),
(2008, 7, 26),
(2009, 7, 25),
(2010, 7, 31),
(2011, 7, 30),
(2012, 7, 28),
(2013, 7, 27),
(2014, 7, 26),
(2015, 7, 25),
(2016, 7, 30),
(2017, 7, 29),
(2018, 7, 28),
(2019, 7, 27),
(2020, 7, 25),
]
def test_start_date_output(self):
""" Sanity tests: if the input date is start date of the retail year,
the year_start_date should be the same.
"""
self.assertEqual(RetailDate.FISCAL_START_MONTH, 8)
self.assertEqual(RetailDate.FISCAL_START_DAY, 1)
# map of input date -> expected output for start date of retail calendar
input_to_output = {}
# Construct the dict:
# {
# start_date-1: previous_start_date,
# start_date : start_date,
# start_date+1: start_date
# }
for idx in xrange(len(self.years)):
start_date = date(*self.retail_start_dates[idx])
if idx != 0:
input_to_output[start_date - timedelta(1)] = date(*self.retail_start_dates[idx - 1])
input_to_output[start_date] = start_date
input_to_output[start_date + timedelta(1)] = date(*self.retail_start_dates[idx])
# Verify the actual output and expected output from dict
for k, v in input_to_output.iteritems():
actual = RetailDate(k).year_start_date
message = "Input: %s, Output: %s, Expected: %s" % (k, actual, v)
self.assertEqual(actual, v, message)
pass
def test_end_date_output(self):
# map of input date -> expected output for end date of retail calendar
input_to_output = {}
# Construct the dict:
# {
# end_date-1: end_date,
# end_date : end_date,
# end_date+1: next_end_date
# }
for idx in xrange(len(self.years)):
start_date = date(*self.retail_end_dates[idx])
input_to_output[start_date - timedelta(1)] = date(*self.retail_end_dates[idx])
input_to_output[start_date] = start_date
if idx != len(self.years)-1:
input_to_output[start_date + timedelta(1)] = date(*self.retail_end_dates[idx + 1])
for k, v in input_to_output.iteritems():
actual = RetailDate(k).year_end_date
message = "Input: %s, Output: %s, Expected: %s" % (k, actual, v)
self.assertEqual(actual, v, message)
pass
def test_aggr_date_input(self):
""" Find all retail year's start dates for random input in 2000-2020 period.
"""
actual_start_date = set([])
actual_end_date = set([])
for year in self.years:
for my_date in self.dates:
input_date = date(year, my_date[0], my_date[1])
retail_date = RetailDate(input_date)
actual_start_date.add(retail_date.year_start_date)
actual_end_date.add(retail_date.year_end_date)
# Verify the retail start dates
expected_start = set([date(mTup[0], mTup[1], mTup[2]) for mTup in self.retail_start_dates])
diff = expected_start.symmetric_difference(actual_start_date)
self.assertEqual(len(diff), 0, "Diff: " + str(diff))
# Verify the retail end dates
expected_end = set([date(mTup[0], mTup[1], mTup[2]) for mTup in self.retail_end_dates])
diff = expected_end.symmetric_difference(actual_end_date)
self.assertEqual(len(diff), 0, "Diff: " + str(diff))
class IsCurrentPreviousYearTests(unittest.TestCase):
"""
Test cases for is_current_year and is_previous_year properties of calendars.RetailDate.
"""
def test_retail_date(self):
# today should be in current retail year
my_date = RetailDate(date.today())
self.assertEqual(my_date.is_current_year, True)
self.assertEqual(my_date.is_previous_year, False)
input_date = date.today() + timedelta(days=400)
self.assertEqual(RetailDate(input_date).is_current_year, False)
input_date = date.today() - timedelta(days=400)
self.assertEqual(RetailDate(input_date).is_current_year, False)
# date way in the past should not be
input_date = date(2012, 12, 21)
self.assertEqual(RetailDate(input_date).is_current_year, False)
def test_retail_year_2010(self):
"""
Mock today() as a day during retail year 2010 (2009-07-26 to 2010-07-31)
"""
mock_todays = [date(2009, 10, 1), # random earlier half
date(2010, 2, 1), # random later half
date(2009, 12, 31), # start of calendar year
date(2010, 1, 1), # end of calendar year
date(2009, 7, 26), # start of retail year
date(2010, 7, 31), # end of retail year
]
for today_2010 in mock_todays:
self._curr_retail_2010_tests(today_2010)
self._prev_retail_2010_tests(today_2010)
pass
def _curr_retail_2010_tests(self, today):
with freeze_time(today):
# At False boundary
input_date = date(2009, 7, 24)
self.assertEqual(RetailDate(input_date).is_current_year, False)
input_date = date(2009, 7, 25)
self.assertEqual(RetailDate(input_date).is_current_year, False)
input_date = date(2010, 8, 1)
self.assertEqual(RetailDate(input_date).is_current_year, False)
input_date = date(2010, 8, 2)
self.assertEqual(RetailDate(input_date).is_current_year, False)
# At True boundary
input_date = date(2009, 7, 26)
self.assertEqual(RetailDate(input_date).is_current_year, True)
input_date = date(2009, 7, 27)
self.assertEqual(RetailDate(input_date).is_current_year, True)
input_date = date(2010, 7, 29)
self.assertEqual(RetailDate(input_date).is_current_year, True)
input_date = date(2010, 7, 30)
self.assertEqual(RetailDate(input_date).is_current_year, True)
# Next month lower end
input_date = date(2009, 7, 31)
self.assertEqual(RetailDate(input_date).is_current_year, True)
input_date = date(2009, 8, 1)
self.assertEqual(RetailDate(input_date).is_current_year, True)
input_date = date(2009, 7, 1)
self.assertEqual(RetailDate(input_date).is_current_year, False)
# Next month higher end
input_date = date(2010, 7, 31)
self.assertEqual(RetailDate(input_date).is_current_year, True)
input_date = date(2010, 8, 1)
self.assertEqual(RetailDate(input_date).is_current_year, False)
input_date = date(2010, 8, 31)
self.assertEqual(RetailDate(input_date).is_current_year, False)
# Calendar year end
input_date = date(2008, 12, 31)
self.assertEqual(RetailDate(input_date).is_current_year, False)
input_date = date(2009, 12, 31)
self.assertEqual(RetailDate(input_date).is_current_year, True)
input_date = date(2010, 12, 31)
self.assertEqual(RetailDate(input_date).is_current_year, False)
# Calendar year start
input_date = date(2009, 1, 1)
self.assertEqual(RetailDate(input_date).is_current_year, False)
input_date = date(2010, 1, 1)
self.assertEqual(RetailDate(input_date).is_current_year, True)
input_date = date(2011, 1, 1)
self.assertEqual(RetailDate(input_date).is_current_year, False)
def _prev_retail_2010_tests(self, today):
with freeze_time(today):
# At False boundary
input_date = date(2010, 8, 1)
self.assertEqual(RetailDate(input_date).is_previous_year, False)
input_date = date(2010, 8, 2)
self.assertEqual(RetailDate(input_date).is_previous_year, False)
input_date = date(2009, 7, 26)
self.assertEqual(RetailDate(input_date).is_previous_year, False)
input_date = date(2009, 7, 27)
self.assertEqual(RetailDate(input_date).is_previous_year, False)
input_date = date(2010, 7, 29)
self.assertEqual(RetailDate(input_date).is_previous_year, False)
input_date = date(2010, 7, 30)
self.assertEqual(RetailDate(input_date).is_previous_year, False)
input_date = date(2008, 7, 26)
self.assertEqual(RetailDate(input_date).is_previous_year, False)
input_date = date(2008, 7, 25)
self.assertEqual(RetailDate(input_date).is_previous_year, False)
# At True boundary
input_date = date(2009, 7, 24)
self.assertEqual(RetailDate(input_date).is_previous_year, True)
input_date = date(2009, 7, 25)
self.assertEqual(RetailDate(input_date).is_previous_year, True)
input_date = date(2008, 7, 27)
self.assertEqual(RetailDate(input_date).is_previous_year, True)
input_date = date(2008, 7, 28)
self.assertEqual(RetailDate(input_date).is_previous_year, True)
# Next month lower end
input_date = date(2009, 7, 31)
self.assertEqual(RetailDate(input_date).is_previous_year, False)
input_date = date(2009, 8, 1)
self.assertEqual(RetailDate(input_date).is_previous_year, False)
input_date = date(2009, 7, 1)
self.assertEqual(RetailDate(input_date).is_previous_year, True)
# Next month higher end
input_date = date(2010, 7, 31)
self.assertEqual(RetailDate(input_date).is_previous_year, False)
input_date = date(2010, 8, 1)
self.assertEqual(RetailDate(input_date).is_previous_year, False)
input_date = date(2010, 8, 31)
self.assertEqual(RetailDate(input_date).is_previous_year, False)
# Calendar year end
input_date = date(2008, 12, 31)
self.assertEqual(RetailDate(input_date).is_previous_year, True)
input_date = date(2009, 12, 31)
self.assertEqual(RetailDate(input_date).is_previous_year, False)
input_date = date(2010, 12, 31)
self.assertEqual(RetailDate(input_date).is_previous_year, False)
# Calendar year start
input_date = date(2009, 1, 1)
self.assertEqual(RetailDate(input_date).is_previous_year, True)
input_date = date(2010, 1, 1)
self.assertEqual(RetailDate(input_date).is_previous_year, False)
input_date = date(2011, 1, 1)
self.assertEqual(RetailDate(input_date).is_previous_year, False)
def test_retail_year_2006(self):
"""
Mock today() as a day during retail year 2006 (2005-07-31 to 2006-07-29)
"""
mock_todays = [date(2005, 10, 1), # random earlier half
date(2006, 2, 1), # random later half
date(2005, 12, 31), # start of calendar year
date(2006, 1, 1), # end of calendar year
date(2005, 7, 31), # start of fiscal year
date(2006, 7, 29), # end of fiscal year
]
for today_2006 in mock_todays:
self._curr_retail_2006_tests(today_2006)
self._prev_retail_2006_tests(today_2006)
pass
def _curr_retail_2006_tests(self, today):
with freeze_time(today):
# At False boundary
input_date = date(2005, 7, 30)
self.assertEqual(RetailDate(input_date).is_current_year, False)
input_date = date(2006, 7, 30)
self.assertEqual(RetailDate(input_date).is_current_year, False)
# At True boundary
input_date = date(2005, 7, 31)
self.assertEqual(RetailDate(input_date).is_current_year, True)
input_date = date(2006, 7, 29)
self.assertEqual(RetailDate(input_date).is_current_year, True)
# Next month lower end
input_date = date(2005, 8, 1)
self.assertEqual(RetailDate(input_date).is_current_year, True)
input_date = date(2005, 7, 1)
self.assertEqual(RetailDate(input_date).is_current_year, False)
# Next month higher end
input_date = date(2006, 7, 31)
self.assertEqual(RetailDate(input_date).is_current_year, False)
input_date = date(2006, 8, 1)
self.assertEqual(RetailDate(input_date).is_current_year, False)
input_date = date(2006, 8, 31)
self.assertEqual(RetailDate(input_date).is_current_year, False)
# Calendar year end
input_date = date(2004, 12, 31)
self.assertEqual(RetailDate(input_date).is_current_year, False)
input_date = date(2005, 12, 31)
self.assertEqual(RetailDate(input_date).is_current_year, True)
input_date = date(2006, 12, 31)
self.assertEqual(RetailDate(input_date).is_current_year, False)
# Calendar year start
input_date = date(2005, 1, 1)
self.assertEqual(RetailDate(input_date).is_current_year, False)
input_date = date(2006, 1, 1)
self.assertEqual(RetailDate(input_date).is_current_year, True)
input_date = date(2007, 1, 1)
self.assertEqual(RetailDate(input_date).is_current_year, False)
def _prev_retail_2006_tests(self, today):
with freeze_time(today):
# At False boundary
input_date = date(2006, 7, 30)
self.assertEqual(RetailDate(input_date).is_previous_year, False)
input_date = date(2004, 7, 31)
self.assertEqual(RetailDate(input_date).is_previous_year, False)
input_date = date(2005, 7, 31)
self.assertEqual(RetailDate(input_date).is_previous_year, False)
input_date = date(2006, 7, 29)
self.assertEqual(RetailDate(input_date).is_previous_year, False)
# At True boundary
input_date = date(2005, 7, 30)
self.assertEqual(RetailDate(input_date).is_previous_year, True)
input_date = date(2004, 8, 1)
self.assertEqual(RetailDate(input_date).is_previous_year, True)
# Next month lower end
input_date = date(2005, 8, 1)
self.assertEqual(RetailDate(input_date).is_previous_year, False)
input_date = date(2005, 7, 1)
self.assertEqual(RetailDate(input_date).is_previous_year, True)
# Next month higher end
input_date = date(2006, 7, 31)
self.assertEqual(RetailDate(input_date).is_previous_year, False)
input_date = date(2006, 8, 1)
self.assertEqual(RetailDate(input_date).is_previous_year, False)
input_date = date(2006, 8, 31)
self.assertEqual(RetailDate(input_date).is_previous_year, False)
# Calendar year end
input_date = date(2004, 12, 31)
self.assertEqual(RetailDate(input_date).is_previous_year, True)
input_date = date(2005, 12, 31)
self.assertEqual(RetailDate(input_date).is_previous_year, False)
input_date = date(2006, 12, 31)
self.assertEqual(RetailDate(input_date).is_previous_year, False)
# Calendar year start
input_date = date(2005, 1, 1)
self.assertEqual(RetailDate(input_date).is_previous_year, True)
input_date = date(2006, 1, 1)
self.assertEqual(RetailDate(input_date).is_previous_year, False)
input_date = date(2007, 1, 1)
self.assertEqual(RetailDate(input_date).is_previous_year, False)
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Bot to upload pages from a file.
This bot takes its input from a file that contains a number of
pages to be put on the wiki. The pages should all have the same
begin and end text (which may not overlap).
By default the text should have the intended title of the page
as the first text in bold (that is, between ''' and '''),
you can modify this behavior with command line options.
The default is not to include the begin and
end text in the page, if you want to include that text, use
the -include option.
Specific arguments:
-start:xxx Specify the text that marks the beginning of a page
-end:xxx Specify the text that marks the end of a page
-file:xxx Give the filename we are getting our material from
(default: dict.txt)
-include The beginning and end markers should be included
in the page.
-titlestart:xxx Use xxx in place of ''' for identifying the
beginning of page title
-titleend:xxx Use xxx in place of ''' for identifying the
end of page title
-notitle do not include the title, including titlestart, and
titleend, in the page
-nocontent If page has this statment it doesn't append
(example: -nocontent:"{{infobox")
-noredirect if you don't want to upload on redirect page
it is True by default and bot adds pages to redirected pages
-summary:xxx Use xxx as the edit summary for the upload - if
a page exists, standard messages are appended
after xxx for appending, prepending, or replacement
-autosummary Use MediaWikis autosummary when creating a new page,
overrides -summary in this case
-minor set minor edit flag on page edits
If the page to be uploaded already exists:
-safe do nothing (default)
-appendtop add the text to the top of it
-appendbottom add the text to the bottom of it
-force overwrite the existing page
"""
#
# (C) Andre Engels, 2004
# (C) Pywikibot team, 2005-2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id: 209355ac7be2d220436a3b2f9e9c0409a5c8e074 $'
#
import os
import re
import codecs
import pywikibot
from pywikibot import config, Bot, i18n
class NoTitle(Exception):
"""No title found."""
def __init__(self, offset):
"""Constructor."""
self.offset = offset
class PageFromFileRobot(Bot):
"""
Responsible for writing pages to the wiki.
Titles and contents are given by a PageFromFileReader.
"""
def __init__(self, reader, **kwargs):
"""Constructor."""
self.availableOptions.update({
'always': True,
'force': False,
'append': None,
'summary': None,
'minor': False,
'autosummary': False,
'nocontent': '',
'redirect': True
})
super(PageFromFileRobot, self).__init__(**kwargs)
self.reader = reader
def run(self):
"""Start file processing and upload content."""
for title, contents in self.reader.run():
self.save(title, contents)
def save(self, title, contents):
"""Upload page content."""
mysite = pywikibot.Site()
page = pywikibot.Page(mysite, title)
self.current_page = page
if self.getOption('summary'):
comment = self.getOption('summary')
else:
comment = i18n.twtranslate(mysite, 'pagefromfile-msg')
comment_top = comment + " - " + i18n.twtranslate(
mysite, 'pagefromfile-msg_top')
comment_bottom = comment + " - " + i18n.twtranslate(
mysite, 'pagefromfile-msg_bottom')
comment_force = "%s *** %s ***" % (
comment, i18n.twtranslate(mysite, 'pagefromfile-msg_force'))
# Remove trailing newlines (cause troubles when creating redirects)
contents = re.sub('^[\r\n]*', '', contents)
if page.exists():
if not self.getOption('redirect') and page.isRedirectPage():
pywikibot.output(u"Page %s is redirect, skipping!" % title)
return
pagecontents = page.get(get_redirect=True)
if self.getOption('nocontent') != u'':
if pagecontents.find(self.getOption('nocontent')) != -1 or \
pagecontents.find(self.getOption('nocontent').lower()) != -1:
pywikibot.output(u'Page has %s so it is skipped' % self.getOption('nocontent'))
return
if self.getOption('append') == 'top':
pywikibot.output(u"Page %s already exists, appending on top!"
% title)
contents = contents + pagecontents
comment = comment_top
elif self.getOption('append') == 'bottom':
pywikibot.output(u"Page %s already exists, appending on bottom!"
% title)
contents = pagecontents + contents
comment = comment_bottom
elif self.getOption('force'):
pywikibot.output(u"Page %s already exists, ***overwriting!"
% title)
comment = comment_force
else:
pywikibot.output(u"Page %s already exists, not adding!" % title)
return
else:
if self.getOption('autosummary'):
comment = ''
config.default_edit_summary = ''
self.userPut(page, page.text, contents,
summary=comment,
minor=self.getOption('minor'),
show_diff=False,
ignore_save_related_errors=True)
class PageFromFileReader:
"""
Responsible for reading the file.
The run() method yields a (title, contents) tuple for each found page.
"""
def __init__(self, filename, pageStartMarker, pageEndMarker,
titleStartMarker, titleEndMarker, include, notitle):
"""Constructor.
Check if self.file name exists. If not, ask for a new filename.
User can quit.
"""
self.filename = filename
self.pageStartMarker = pageStartMarker
self.pageEndMarker = pageEndMarker
self.titleStartMarker = titleStartMarker
self.titleEndMarker = titleEndMarker
self.include = include
self.notitle = notitle
def run(self):
"""Read file and yield page title and content."""
pywikibot.output('\n\nReading \'%s\'...' % self.filename)
try:
with codecs.open(self.filename, 'r',
encoding=config.textfile_encoding) as f:
text = f.read()
except IOError as err:
pywikibot.output(str(err))
raise IOError
position = 0
length = 0
while True:
try:
length, title, contents = self.findpage(text[position:])
except AttributeError:
if not length:
pywikibot.output(u'\nStart or end marker not found.')
else:
pywikibot.output(u'End of file.')
break
except NoTitle as err:
pywikibot.output(u'\nNo title found - skipping a page.')
position += err.offset
continue
position += length
yield title, contents
def findpage(self, text):
"""Find page to work on."""
pageR = re.compile(re.escape(self.pageStartMarker) + "(.*?)" +
re.escape(self.pageEndMarker), re.DOTALL)
titleR = re.compile(re.escape(self.titleStartMarker) + "(.*?)" +
re.escape(self.titleEndMarker))
location = pageR.search(text)
if self.include:
contents = location.group()
else:
contents = location.group(1)
try:
title = titleR.search(contents).group(1)
if self.notitle:
# Remove title (to allow creation of redirects)
contents = titleR.sub('', contents, count=1)
except AttributeError:
raise NoTitle(location.end())
else:
return location.end(), title, contents
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
# Adapt these to the file you are using. 'pageStartMarker' and
# 'pageEndMarker' are the beginning and end of each entry. Take text that
# should be included and does not occur elsewhere in the text.
# TODO: make config variables for these.
filename = "dict.txt"
pageStartMarker = "{{-start-}}"
pageEndMarker = "{{-stop-}}"
titleStartMarker = u"'''"
titleEndMarker = u"'''"
options = {}
include = False
notitle = False
for arg in pywikibot.handle_args(args):
if arg.startswith("-start:"):
pageStartMarker = arg[7:]
elif arg.startswith("-end:"):
pageEndMarker = arg[5:]
elif arg.startswith("-file:"):
filename = arg[6:]
elif arg == "-include":
include = True
elif arg.startswith('-append') and arg[7:] in ('top', 'bottom'):
options['append'] = arg[7:]
elif arg == "-force":
options['force'] = True
elif arg == "-safe":
options['force'] = False
options['append'] = None
elif arg == "-noredirect":
options['redirect'] = False
elif arg == '-notitle':
notitle = True
elif arg == '-minor':
options['minor'] = True
elif arg.startswith('-nocontent:'):
options['nocontent'] = arg[11:]
elif arg.startswith("-titlestart:"):
titleStartMarker = arg[12:]
elif arg.startswith("-titleend:"):
titleEndMarker = arg[10:]
elif arg.startswith("-summary:"):
options['summary'] = arg[9:]
elif arg == '-autosummary':
options['autosummary'] = True
else:
pywikibot.output(u"Disregarding unknown argument %s." % arg)
failed_filename = False
while not os.path.isfile(filename):
pywikibot.output('\nFile \'%s\' does not exist. ' % filename)
_input = pywikibot.input(
'Please enter the file name [q to quit]:')
if _input == 'q':
failed_filename = True
break
else:
filename = _input
# show help text from the top of this file if reader failed
# or User quit.
if failed_filename:
pywikibot.showHelp()
else:
reader = PageFromFileReader(filename, pageStartMarker, pageEndMarker,
titleStartMarker, titleEndMarker, include,
notitle)
bot = PageFromFileRobot(reader, **options)
bot.run()
if __name__ == "__main__":
main()
| |
# -*- coding: UTF-8 -*-
# Copyright 2008-2018 Rumma & Ko Ltd
#
# License: BSD (see file COPYING for details)
"""Database models for `lino_xl.lib.properties`.
This module is deprecated.
A :class:`PropOccurence` is when a given "property owner" has a given
:class:`Property`. "Property owner" can be anything: a person, a
company, a product, an upload, it depends on the implementation of
:class:`PropOccurence`. For example
:mod:`lino.projects.pcsw.models.PersonProperty`.
A :class:`Property` defines the configuration of a property.
.. autosummary::
"""
from builtins import str
from django.db import models
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from lino.core.roles import SiteStaff
from lino.api import dd, rt
from lino import mixins
from lino.core.kernel import get_choicelist, choicelist_choices
MULTIPLE_VALUES_SEP = ','
class DoYouLike(dd.ChoiceList):
"""A list of possible answers to questions of type "How much do you
like ...?".
"""
verbose_name = _("Do you like?")
add = DoYouLike.add_item
add('0', _("certainly not"))
add('1', _("rather not"))
add('2', _("normally"), "default")
add('3', _("quite much"))
add('4', _("very much"))
class HowWell(dd.ChoiceList):
"""A list of possible answers to questions of type "How well ...?":
"not at all", "a bit", "moderate", "quite well" and "very well"
which are stored in the database as '0' to '4',
and whose `__str__()` returns their translated text.
"""
verbose_name = _("How well?")
add = HowWell.add_item
add('0', _("not at all"))
add('1', _("a bit"))
add('2', _("moderate"), "default")
add('3', _("quite well"))
add('4', _("very well"))
class PropType(mixins.BabelNamed):
"""
The type of the values that a property accepts.
Each PropType may (or may not) imply a list of choices.
Examples: of property types:
- Knowledge (Choices: "merely", "acceptable", "good", "very good",...)
- YesNo (no choices)
"""
class Meta:
verbose_name = _("Property Type")
verbose_name_plural = _("Property Types")
#~ name = dd.BabelCharField(max_length=200,verbose_name=_("Designation"))
choicelist = models.CharField(
max_length=50, blank=True,
verbose_name=_("Choices List"),
choices=choicelist_choices())
default_value = models.CharField(
_("default value"),
max_length=settings.SITE.propvalue_max_length,
blank=True)
"""
The default value to set when creating a :class:`PropertyOccurence`.
This is currently used only in some fixture...
"""
limit_to_choices = models.BooleanField(
_("Limit to choices"), default=False)
"""
not yet supported
"""
multiple_choices = models.BooleanField(
_("Multiple choices"), default=False)
"""
not yet supported
"""
@dd.chooser()
def default_value_choices(cls, choicelist):
if choicelist:
return get_choicelist(choicelist).get_choices()
return []
def get_default_value_display(self, value):
return self.get_text_for_value(value)
def get_text_for_value(self, value):
if not value:
return ''
if self.choicelist:
cl = get_choicelist(self.choicelist)
return cl.get_text_for_value(value)
l = []
for v in value.split(MULTIPLE_VALUES_SEP):
try:
pc = PropChoice.objects.get(value=v, type=self)
v = dd.babelattr(pc, 'text')
except PropChoice.DoesNotExist:
pass
l.append(v)
return ','.join(l)
#~ def __unicode__(self):
#~ return dd.babelattr(self,'name')
def choices_for(self, property):
if self.choicelist:
return get_choicelist(self.choicelist).get_choices()
return [(pc.value, pc.text) for pc in
PropChoice.objects.filter(type=self).order_by('value')]
@dd.python_2_unicode_compatible
class PropChoice(dd.Model):
"""A Choice for a given PropType. `text` is the text to be displayed
in combo boxes.
`value` is the value to be stored in :attr:`PropValue.value`, it
must be unique for all PropChoices of a given PropType.
Choices for a given PropType will be sorted on `value` (we might
make this more customizable if necessary by adding a new field
`sort_text` and/or an option to sort on text instead of value)
When configuring your property choices, be aware of the fact that
existing property occurences will *not* change when you change the
`value` of a property choice.
"""
class Meta:
verbose_name = _("Property Choice")
verbose_name_plural = _("Property Choices")
unique_together = ['type', 'value']
type = dd.ForeignKey(
PropType, verbose_name=_("Property Type"))
value = models.CharField(
max_length=settings.SITE.propvalue_max_length,
verbose_name=_("Value"))
text = dd.BabelCharField(
max_length=200, verbose_name=_("Designation"), blank=True)
def save(self, *args, **kw):
if not self.text:
self.text = self.value
r = super(PropChoice, self).save(*args, **kw)
return r
def __str__(self):
return dd.babelattr(self, 'text')
class PropGroup(mixins.BabelNamed):
"""A Property Group defines a list of Properties that fit together
under a common name. Examples of Property Groups: Skills, Soft
Skills, Obstacles There will be one menu entry per Group.
"""
class Meta:
verbose_name = _("Property Group")
verbose_name_plural = _("Property Groups")
class Property(mixins.BabelNamed):
class Meta:
verbose_name = _("Property")
verbose_name_plural = _("Properties")
group = dd.ForeignKey(PropGroup)
type = dd.ForeignKey(PropType, verbose_name=_("Property Type"))
@dd.python_2_unicode_compatible
class PropertyOccurence(dd.Model):
"""A Property Occurence is when a Property occurs, possibly having a
certain value.
Abstract base class for
| :class:`lino_welfare.modlib.cv.models.PersonProperty`,
| :class:`lino_welfare.modlib.cv.models.WantedProperty`,
| :class:`lino_welfare.modlib.cv.models.AvoidedProperty`,
| ...
"""
class Meta:
abstract = True
group = dd.ForeignKey(PropGroup)
property = dd.ForeignKey('properties.Property')
value = models.CharField(
_("Value"),
max_length=settings.SITE.propvalue_max_length,
blank=True)
@dd.chooser()
def value_choices(cls, property):
if property is None:
return []
return property.type.choices_for(property)
@dd.chooser()
def property_choices(cls, group):
#~ print 20120212, group
if group is None:
return []
return Property.objects.filter(group=group).order_by('name')
def get_value_display(self, value):
if self.property_id is None:
return value
return self.property.type.get_text_for_value(value)
def full_clean(self):
if self.property_id is not None:
self.group = self.property.group
super(PropertyOccurence, self).full_clean()
def __str__(self):
if self.property_id is None:
return u"Undefined %s" % self.group
# We must call str() because get_text_for_value might return a
# lazyly translatable string:
return str(self.property.type.get_text_for_value(self.value))
# try:
# return str(self.property.type.get_text_for_value(self.value))
# except UnicodeError:
# value = self.property.type.get_text_for_value(self.value)
# raise Exception("Failed get_text_for_value(%s, %r)" % (
# self.property.type.choicelist, value))
#~ def __unicode__(self):
#~ if self.property_id is None:
#~ return u"Undefined %s" % self.group
#~ return u'%s.%s=%s' % (
#~ self.group,self.property,
#~ self.property.type.get_text_for_value(self.value))
class PropGroups(dd.Table):
required_roles = dd.login_required(dd.SiteStaff)
model = PropGroup
detail_layout = """
id name
PropsByGroup
"""
class PropTypes(dd.Table):
required_roles = dd.login_required(dd.SiteStaff)
model = PropType
detail_layout = """
id name choicelist default_value
ChoicesByType
PropsByType
"""
class Properties(dd.Table):
required_roles = dd.login_required(dd.SiteStaff)
model = Property
order_by = ['name']
#~ column_names = "id name"
class PropsByGroup(Properties):
master_key = 'group'
class PropsByType(Properties):
master_key = 'type'
class PropChoices(dd.Table):
model = PropChoice
class ChoicesByType(PropChoices):
"Lists all PropChoices for a given PropType."
master_key = 'type'
order_by = ['value']
column_names = 'value text *'
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
from . import common
from proton import *
from proton._compat import str2bin
class Test(common.Test):
pass
class ClientTransportTest(Test):
def setup(self):
self.transport = Transport()
self.peer = Transport()
self.conn = Connection()
self.peer.bind(self.conn)
def teardown(self):
self.transport = None
self.peer = None
self.conn = None
def drain(self):
while True:
p = self.transport.pending()
if p < 0:
return
elif p > 0:
data = self.transport.peek(p)
self.peer.push(data)
self.transport.pop(len(data))
else:
assert False
def assert_error(self, name):
assert self.conn.remote_container is None, self.conn.remote_container
self.drain()
# verify that we received an open frame
assert self.conn.remote_container is not None, self.conn.remote_container
# verify that we received a close frame
assert self.conn.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_CLOSED, self.conn.state
# verify that a framing error was reported
assert self.conn.remote_condition.name == name, self.conn.remote_condition
def testEOS(self):
self.transport.push(str2bin("")) # should be a noop
self.transport.close_tail() # should result in framing error
self.assert_error(u'amqp:connection:framing-error')
def testPartial(self):
self.transport.push(str2bin("AMQ")) # partial header
self.transport.close_tail() # should result in framing error
self.assert_error(u'amqp:connection:framing-error')
def testGarbage(self, garbage=str2bin("GARBAGE_")):
self.transport.push(garbage)
self.assert_error(u'amqp:connection:framing-error')
assert self.transport.pending() < 0
self.transport.close_tail()
assert self.transport.pending() < 0
def testSmallGarbage(self):
self.testGarbage(str2bin("XXX"))
def testBigGarbage(self):
self.testGarbage(str2bin("GARBAGE_XXX"))
def testHeader(self):
self.transport.push(str2bin("AMQP\x00\x01\x00\x00"))
self.transport.close_tail()
self.assert_error(u'amqp:connection:framing-error')
def testProtocolNotSupported(self):
self.transport.push(str2bin("AMQP\x01\x01\x0a\x00"))
p = self.transport.pending()
assert p >= 8, p
bytes = self.transport.peek(p)
assert bytes[:8] == str2bin("AMQP\x00\x01\x00\x00")
self.transport.pop(p)
self.drain()
assert self.transport.closed
def testPeek(self):
out = self.transport.peek(1024)
assert out is not None
def testBindAfterOpen(self):
conn = Connection()
ssn = conn.session()
conn.open()
ssn.open()
conn.container = "test-container"
conn.hostname = "test-hostname"
trn = Transport()
trn.bind(conn)
out = trn.peek(1024)
assert str2bin("test-container") in out, repr(out)
assert str2bin("test-hostname") in out, repr(out)
self.transport.push(out)
c = Connection()
assert c.remote_container == None
assert c.remote_hostname == None
assert c.session_head(0) == None
self.transport.bind(c)
assert c.remote_container == "test-container"
assert c.remote_hostname == "test-hostname"
assert c.session_head(0) != None
def testCloseHead(self):
n = self.transport.pending()
assert n > 0, n
try:
self.transport.close_head()
except TransportException:
e = sys.exc_info()[1]
assert "aborted" in str(e), str(e)
n = self.transport.pending()
assert n < 0, n
def testCloseTail(self):
n = self.transport.capacity()
assert n > 0, n
try:
self.transport.close_tail()
except TransportException:
e = sys.exc_info()[1]
assert "aborted" in str(e), str(e)
n = self.transport.capacity()
assert n < 0, n
def testUnpairedPop(self):
conn = Connection()
self.transport.bind(conn)
conn.hostname = "hostname"
conn.open()
dat1 = self.transport.peek(1024)
ssn = conn.session()
ssn.open()
dat2 = self.transport.peek(1024)
assert dat2[:len(dat1)] == dat1
snd = ssn.sender("sender")
snd.open()
self.transport.pop(len(dat1))
self.transport.pop(len(dat2) - len(dat1))
dat3 = self.transport.peek(1024)
self.transport.pop(len(dat3))
assert self.transport.peek(1024) == str2bin("")
self.peer.push(dat1)
self.peer.push(dat2[len(dat1):])
self.peer.push(dat3)
class ServerTransportTest(Test):
def setup(self):
self.transport = Transport(Transport.SERVER)
self.peer = Transport()
self.conn = Connection()
self.peer.bind(self.conn)
def teardown(self):
self.transport = None
self.peer = None
self.conn = None
def drain(self):
while True:
p = self.transport.pending()
if p < 0:
return
elif p > 0:
bytes = self.transport.peek(p)
self.peer.push(bytes)
self.transport.pop(len(bytes))
else:
assert False
def assert_error(self, name):
assert self.conn.remote_container is None, self.conn.remote_container
self.drain()
# verify that we received an open frame
assert self.conn.remote_container is not None, self.conn.remote_container
# verify that we received a close frame
assert self.conn.state == Endpoint.LOCAL_UNINIT | Endpoint.REMOTE_CLOSED, self.conn.state
# verify that a framing error was reported
assert self.conn.remote_condition.name == name, self.conn.remote_condition
# TODO: This may no longer be testing anything
def testEOS(self):
self.transport.push(str2bin("")) # should be a noop
self.transport.close_tail()
p = self.transport.pending()
self.drain()
assert self.transport.closed
def testPartial(self):
self.transport.push(str2bin("AMQ")) # partial header
self.transport.close_tail()
p = self.transport.pending()
assert p >= 8, p
bytes = self.transport.peek(p)
assert bytes[:8] == str2bin("AMQP\x00\x01\x00\x00")
self.transport.pop(p)
self.drain()
assert self.transport.closed
def testGarbage(self, garbage="GARBAGE_"):
self.transport.push(str2bin(garbage))
p = self.transport.pending()
assert p >= 8, p
bytes = self.transport.peek(p)
assert bytes[:8] == str2bin("AMQP\x00\x01\x00\x00")
self.transport.pop(p)
self.drain()
assert self.transport.closed
def testSmallGarbage(self):
self.testGarbage("XXX")
def testBigGarbage(self):
self.testGarbage("GARBAGE_XXX")
def testHeader(self):
self.transport.push(str2bin("AMQP\x00\x01\x00\x00"))
self.transport.close_tail()
self.assert_error(u'amqp:connection:framing-error')
def testProtocolNotSupported(self):
self.transport.push(str2bin("AMQP\x01\x01\x0a\x00"))
p = self.transport.pending()
assert p >= 8, p
bytes = self.transport.peek(p)
assert bytes[:8] == str2bin("AMQP\x00\x01\x00\x00")
self.transport.pop(p)
self.drain()
assert self.transport.closed
def testPeek(self):
out = self.transport.peek(1024)
assert out is not None
def testBindAfterOpen(self):
conn = Connection()
ssn = conn.session()
conn.open()
ssn.open()
conn.container = "test-container"
conn.hostname = "test-hostname"
trn = Transport()
trn.bind(conn)
out = trn.peek(1024)
assert str2bin("test-container") in out, repr(out)
assert str2bin("test-hostname") in out, repr(out)
self.transport.push(out)
c = Connection()
assert c.remote_container == None
assert c.remote_hostname == None
assert c.session_head(0) == None
self.transport.bind(c)
assert c.remote_container == "test-container"
assert c.remote_hostname == "test-hostname"
assert c.session_head(0) != None
def testCloseHead(self):
n = self.transport.pending()
assert n >= 0, n
try:
self.transport.close_head()
except TransportException:
e = sys.exc_info()[1]
assert "aborted" in str(e), str(e)
n = self.transport.pending()
assert n < 0, n
def testCloseTail(self):
n = self.transport.capacity()
assert n > 0, n
try:
self.transport.close_tail()
except TransportException:
e = sys.exc_info()[1]
assert "aborted" in str(e), str(e)
n = self.transport.capacity()
assert n < 0, n
def testUnpairedPop(self):
conn = Connection()
self.transport.bind(conn)
conn.hostname = "hostname"
conn.open()
dat1 = self.transport.peek(1024)
ssn = conn.session()
ssn.open()
dat2 = self.transport.peek(1024)
assert dat2[:len(dat1)] == dat1
snd = ssn.sender("sender")
snd.open()
self.transport.pop(len(dat1))
self.transport.pop(len(dat2) - len(dat1))
dat3 = self.transport.peek(1024)
self.transport.pop(len(dat3))
assert self.transport.peek(1024) == str2bin("")
self.peer.push(dat1)
self.peer.push(dat2[len(dat1):])
self.peer.push(dat3)
def testEOSAfterSASL(self):
self.transport.sasl().allowed_mechs('ANONYMOUS')
self.peer.sasl().allowed_mechs('ANONYMOUS')
# this should send over the sasl header plus a sasl-init set up
# for anonymous
p = self.peer.pending()
self.transport.push(self.peer.peek(p))
self.peer.pop(p)
# now we send EOS
self.transport.close_tail()
# the server may send an error back
p = self.transport.pending()
while p>0:
self.peer.push(self.transport.peek(p))
self.transport.pop(p)
p = self.transport.pending()
# server closed
assert self.transport.pending() < 0
class LogTest(Test):
def testTracer(self):
t = Transport()
assert t.tracer is None
messages = []
def tracer(transport, message):
messages.append((transport, message))
t.tracer = tracer
assert t.tracer is tracer
t.log("one")
t.log("two")
t.log("three")
assert messages == [(t, "one"), (t, "two"), (t, "three")], messages
| |
"""
Configuration of network interfaces on Windows hosts
====================================================
.. versionadded:: 2014.1.0
This module provides the ``network`` state(s) on Windows hosts. DNS servers, IP
addresses and default gateways can currently be managed.
Below is an example of the configuration for an interface that uses DHCP for
both DNS servers and IP addresses:
.. code-block:: yaml
Local Area Connection #2:
network.managed:
- dns_proto: dhcp
- ip_proto: dhcp
.. note::
Both the ``dns_proto`` and ``ip_proto`` arguments are required.
Static DNS and IP addresses can be configured like so:
.. code-block:: yaml
Local Area Connection #2:
network.managed:
- dns_proto: static
- dns_servers:
- 8.8.8.8
- 8.8.4.4
- ip_proto: static
- ip_addrs:
- 10.2.3.4/24
.. note::
IP addresses are specified using the format
``<ip-address>/<subnet-length>``. Salt provides a convenience function
called :mod:`ip.get_subnet_length <salt.modules.win_ip.get_subnet_length>`
to calculate the subnet length from a netmask.
Optionally, if you are setting a static IP address, you can also specify the
default gateway using the ``gateway`` parameter:
.. code-block:: yaml
Local Area Connection #2:
network.managed:
- dns_proto: static
- dns_servers:
- 8.8.8.8
- 8.8.4.4
- ip_proto: static
- ip_addrs:
- 10.2.3.4/24
- gateway: 10.2.3.1
"""
import logging
import salt.utils.data
import salt.utils.platform
import salt.utils.validate.net
from salt.exceptions import CommandExecutionError
# Set up logging
log = logging.getLogger(__name__)
__VALID_PROTO = ("static", "dhcp")
# Define the module's virtual name
__virtualname__ = "network"
def __virtual__():
"""
Confine this module to Windows systems with the required execution module
available.
"""
if salt.utils.platform.is_windows() and "ip.get_interface" in __salt__:
return __virtualname__
return (False, "ip module could not be loaded")
def _validate(dns_proto, dns_servers, ip_proto, ip_addrs, gateway):
"""
Ensure that the configuration passed is formatted correctly and contains
valid IP addresses, etc.
"""
errors = []
# Validate DNS configuration
if dns_proto == "dhcp":
if dns_servers is not None:
errors.append(
"The dns_servers param cannot be set if unless dns_proto is "
"set to 'static'"
)
else:
if str(dns_servers).lower() in ["none", "[]"]:
pass
elif not isinstance(dns_servers, list):
errors.append("The dns_servers param must be formatted as a list")
else:
bad_ips = [
x for x in dns_servers if not salt.utils.validate.net.ipv4_addr(x)
]
if bad_ips:
errors.append("Invalid DNS server IPs: {}".format(", ".join(bad_ips)))
# Validate IP configuration
if ip_proto == "dhcp":
if ip_addrs is not None:
errors.append(
"The ip_addrs param cannot be set if unless ip_proto is set to 'static'"
)
if gateway is not None:
errors.append(
"A gateway IP cannot be set if unless ip_proto is set to 'static'"
)
else:
if not ip_addrs:
errors.append("The ip_addrs param is required to set static IPs")
elif not isinstance(ip_addrs, list):
errors.append("The ip_addrs param must be formatted as a list")
else:
bad_ips = [x for x in ip_addrs if not salt.utils.validate.net.ipv4_addr(x)]
if bad_ips:
errors.append(
"The following static IPs are invalid: {}".format(
", ".join(bad_ips)
)
)
# Validate default gateway
if gateway is not None:
if not salt.utils.validate.net.ipv4_addr(gateway):
errors.append("Gateway IP {} is invalid".format(gateway))
return errors
def _addrdict_to_ip_addrs(addrs):
"""
Extracts a list of IP/CIDR expressions from a list of addrdicts, as
retrieved from ip.get_interface
"""
return [
"{}/{}".format(x["IP Address"], x["Subnet"].rsplit("/", 1)[-1]) for x in addrs
]
def _changes(cur, dns_proto, dns_servers, ip_proto, ip_addrs, gateway):
"""
Compares the current interface against the desired configuration and
returns a dictionary describing the changes that need to be made.
"""
changes = {}
cur_dns_proto = "static" if "Statically Configured DNS Servers" in cur else "dhcp"
if cur_dns_proto == "static":
if isinstance(cur["Statically Configured DNS Servers"], list):
cur_dns_servers = cur["Statically Configured DNS Servers"]
else:
cur_dns_servers = [cur["Statically Configured DNS Servers"]]
if set(dns_servers or ["None"]) != set(cur_dns_servers):
changes["dns_servers"] = dns_servers
elif "DNS servers configured through DHCP" in cur:
cur_dns_servers = cur["DNS servers configured through DHCP"]
if dns_proto == "static":
# If we're currently set to 'dhcp' but moving to 'static', specify the changes.
if set(dns_servers or ["None"]) != set(cur_dns_servers):
changes["dns_servers"] = dns_servers
cur_ip_proto = "static" if cur["DHCP enabled"] == "No" else "dhcp"
cur_ip_addrs = _addrdict_to_ip_addrs(cur.get("ip_addrs", []))
cur_gateway = cur.get("Default Gateway")
if dns_proto != cur_dns_proto:
changes["dns_proto"] = dns_proto
if ip_proto != cur_ip_proto:
changes["ip_proto"] = ip_proto
if set(ip_addrs or []) != set(cur_ip_addrs):
if ip_proto == "static":
changes["ip_addrs"] = ip_addrs
if gateway != cur_gateway:
if ip_proto == "static":
changes["gateway"] = gateway
return changes
def managed(
name,
dns_proto=None,
dns_servers=None,
ip_proto=None,
ip_addrs=None,
gateway=None,
enabled=True,
**kwargs
):
"""
Ensure that the named interface is configured properly.
Args:
name (str):
The name of the interface to manage
dns_proto (str): None
Set to ``static`` and use the ``dns_servers`` parameter to provide a
list of DNS nameservers. set to ``dhcp`` to use DHCP to get the DNS
servers.
dns_servers (list): None
A list of static DNS servers. To clear the list of DNS servers pass
an empty list (``[]``). ``None`` will make no changes.
ip_proto (str): None
Set to ``static`` and use the ``ip_addrs`` and (optionally)
``gateway`` parameters to provide a list of static IP addresses and
the default gateway. Set to ``dhcp`` to use DHCP.
ip_addrs (list): None
A list of static IP addresses with netmask flag, ie: 192.168.0.11/24
gateway (str): None
The gateway to set for the interface
enabled (bool): True
Set to ``False`` to ensure that this interface is disabled.
Returns:
dict: A dictionary of old and new settings
Example:
.. code-block:: yaml
Ethernet1:
network.managed:
- dns_proto: static
- dns_servers:
- 8.8.8.8
- 8.8.8.4
- ip_proto: static
- ip_addrs:
- 192.168.0.100/24
Clear DNS entries example:
.. code-block:: yaml
Ethernet1:
network.managed:
- dns_proto: static
- dns_servers: []
- ip_proto: dhcp
"""
ret = {
"name": name,
"changes": {},
"result": True,
"comment": "Interface '{}' is up to date".format(name),
}
dns_proto = str(dns_proto).lower()
ip_proto = str(ip_proto).lower()
errors = []
if dns_proto not in __VALID_PROTO:
ret["result"] = False
errors.append(
"dns_proto must be one of the following: {}".format(
", ".join(__VALID_PROTO)
)
)
if ip_proto not in __VALID_PROTO:
errors.append(
"ip_proto must be one of the following: {}".format(", ".join(__VALID_PROTO))
)
if errors:
ret["result"] = False
ret["comment"] = "\n".join(errors)
return ret
try:
currently_enabled = __salt__["ip.is_enabled"](name)
except CommandExecutionError:
currently_enabled = False
if not enabled:
if currently_enabled:
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Interface '{}' will be disabled".format(name)
else:
ret["result"] = __salt__["ip.disable"](name)
if not ret["result"]:
ret["comment"] = "Failed to disable interface '{}'".format(name)
else:
ret["comment"] += " (already disabled)"
return ret
else:
if not currently_enabled:
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Interface '{}' will be enabled".format(name)
else:
if not __salt__["ip.enable"](name):
ret["result"] = False
ret[
"comment"
] = "Failed to enable interface '{}' to make changes".format(name)
return ret
errors = _validate(dns_proto, dns_servers, ip_proto, ip_addrs, gateway)
if errors:
ret["result"] = False
ret[
"comment"
] = "The following SLS configuration errors were detected:\n- {}".format(
"\n- ".join(errors)
)
return ret
old = __salt__["ip.get_interface"](name)
if not old:
ret["result"] = False
ret[
"comment"
] = "Unable to get current configuration for interface '{}'".format(name)
return ret
changes = _changes(old, dns_proto, dns_servers, ip_proto, ip_addrs, gateway)
# If dns_servers is the default `None` make no changes
# To clear the list, pass an empty dict
if str(dns_servers).lower() == "none":
changes.pop("dns_servers", None)
if not changes:
return ret
if __opts__["test"]:
comments = []
if "dns_proto" in changes:
comments.append(
"DNS protocol will be changed to: {}".format(changes["dns_proto"])
)
if dns_proto == "static" and "dns_servers" in changes:
if len(changes["dns_servers"]) == 0:
comments.append("The list of DNS servers will be cleared")
else:
comments.append(
"DNS servers will be set to the following: {}".format(
", ".join(changes["dns_servers"])
)
)
if "ip_proto" in changes:
comments.append(
"IP protocol will be changed to: {}".format(changes["ip_proto"])
)
if ip_proto == "static":
if "ip_addrs" in changes:
comments.append(
"IP addresses will be set to the following: {}".format(
", ".join(changes["ip_addrs"])
)
)
if "gateway" in changes:
if changes["gateway"] is None:
comments.append("Default gateway will be removed")
else:
comments.append(
"Default gateway will be set to {}".format(
changes["gateway"]
)
)
ret["result"] = None
ret[
"comment"
] = "The following changes will be made to interface '{}':\n- {}".format(
name, "\n- ".join(comments)
)
return ret
if changes.get("dns_proto") == "dhcp":
__salt__["ip.set_dhcp_dns"](name)
elif "dns_servers" in changes:
if len(changes["dns_servers"]) == 0:
# To clear the list of DNS servers you have to pass []. Later
# changes gets passed like *args and a single empty list is
# converted to an empty tuple. So, you have to add [] here
changes["dns_servers"] = [[]]
__salt__["ip.set_static_dns"](name, *changes["dns_servers"])
if changes.get("ip_proto") == "dhcp":
__salt__["ip.set_dhcp_ip"](name)
elif (
changes.get("ip_addrs")
or changes.get("gateway")
or changes.get("ip_proto") == "static"
):
if changes.get("gateway") and not changes.get("ip_addrs"):
changes["ip_addrs"] = ip_addrs
if changes.get("ip_proto") == "static" and not changes.get("ip_addrs"):
changes["ip_addrs"] = ip_addrs
for idx in range(len(changes["ip_addrs"])):
if idx == 0:
__salt__["ip.set_static_ip"](
name, changes["ip_addrs"][idx], gateway=gateway, append=False
)
else:
__salt__["ip.set_static_ip"](
name, changes["ip_addrs"][idx], gateway=None, append=True
)
new = __salt__["ip.get_interface"](name)
ret["changes"] = salt.utils.data.compare_dicts(old, new)
if _changes(new, dns_proto, dns_servers, ip_proto, ip_addrs, gateway):
ret["result"] = False
ret["comment"] = (
"Failed to set desired configuration settings "
"for interface '{}'".format(name)
)
else:
ret[
"comment"
] = "Successfully updated configuration for interface '{}'".format(name)
return ret
| |
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2009, Jean-Michel Sizun
# Copyright 2009 Frank Scholz <coherence@beebits.net>
import os.path
from twisted.internet import reactor, threads
from twisted.web import server, static
from twisted.web.error import PageRedirect
from coherence.upnp.core import utils
from coherence.upnp.core.utils import ReverseProxyUriResource, ReverseProxyResource
from coherence.upnp.core import DIDLLite
from coherence.backend import BackendStore,BackendItem
from coherence import log
from gdata.youtube.service import YouTubeService
from coherence.extern.youtubedl import FileDownloader,YoutubeIE,MetacafeIE,YoutubePlaylistIE
from coherence.backends.picasa_storage import Container, LazyContainer, AbstractBackendStore
MPEG4_MIMETYPE = 'video/mp4'
MPEG4_EXTENSION = 'mp4'
class TestVideoProxy(ReverseProxyUriResource, log.Loggable):
logCategory = 'youtube_store'
def __init__(self, uri, id,
proxy_mode,
cache_directory,
cache_maxsize=100000000,
buffer_size=2000000,
fct=None, **kwargs):
ReverseProxyUriResource.__init__(self, uri)
self.id = id
if isinstance(self.id,int):
self.id = '%d' % self.id
self.proxy_mode = proxy_mode
self.cache_directory = cache_directory
self.cache_maxsize = int(cache_maxsize)
self.buffer_size = int(buffer_size)
self.downloader = None
self.video_url = None # the url we get from the youtube page
self.stream_url = None # the real video stream, cached somewhere
self.mimetype = None
self.filesize = 0
self.file_in_cache = False
self.url_extractor_fct = fct
self.url_extractor_params = kwargs
def requestFinished(self, result):
""" self.connection is set in utils.ReverseProxyResource.render """
self.info("ProxyStream requestFinished",result)
if hasattr(self,'connection'):
self.connection.transport.loseConnection()
def render(self, request):
self.info("VideoProxy render", request, self.stream_url, self.video_url)
self.info("VideoProxy headers:", request.getAllHeaders())
self.info("VideoProxy id:", self.id)
d = request.notifyFinish()
d.addBoth(self.requestFinished)
if self.stream_url is None:
web_url = "http://%s%s" % (self.host,self.path)
#print "web_url", web_url
def got_real_urls(real_urls):
got_real_url(real_urls[0])
def got_real_url(real_url):
self.info("Real URL is %s" % real_url)
self.stream_url = real_url
if self.stream_url is None:
self.warning('Error to retrieve URL - inconsistent web page')
return self.requestFinished(None) #FIXME
self.stream_url = self.stream_url.encode('ascii', 'strict')
self.resetUri(self.stream_url)
self.info("Video URL: %s" % self.stream_url)
self.video_url = self.stream_url[:]
d = self.followRedirects(request)
d.addCallback(self.proxyURL)
d.addErrback(self.requestFinished)
if self.url_extractor_fct is not None:
d = self.url_extractor_fct(web_url, **self.url_extractor_params)
d.addCallback(got_real_urls)
else:
got_real_url(web_url)
return server.NOT_DONE_YET
reactor.callLater(0.05,self.proxyURL,request)
return server.NOT_DONE_YET
def followRedirects(self, request):
self.info("HTTP redirect ", request, self.stream_url)
d = utils.getPage(self.stream_url, method="HEAD", followRedirect=0)
def gotHeader(result,request):
data,header = result
self.info("finally got something %r", header)
#FIXME what do we do here if the headers aren't there?
self.filesize = int(header['content-length'][0])
self.mimetype = header['content-type'][0]
return request
def gotError(error,request):
# error should be a "Failure" instance at this point
self.info("gotError" % error)
error_value = error.value
if (isinstance(error_value,PageRedirect)):
self.info("got PageRedirect %r" % error_value.location)
self.stream_url = error_value.location
self.resetUri(self.stream_url)
return self.followRedirects(request)
else:
self.warning("Error while retrieving page header for URI ", self.stream_url)
self.requestFinished(None)
return error
d.addCallback(gotHeader, request)
d.addErrback(gotError,request)
return d
def proxyURL(self, request):
self.info("proxy_mode: %s, request %s" % (self.proxy_mode,request.method))
if self.proxy_mode == 'redirect':
# send stream url to client for redirection
request.redirect(self.stream_url)
request.finish()
elif self.proxy_mode in ('proxy',):
res = ReverseProxyResource.render(self,request)
if isinstance(res,int):
return res
request.write(res)
return
elif self.proxy_mode in ('buffer','buffered'):
# download stream to cache,
# and send it to the client in // after X bytes
filepath = os.path.join(self.cache_directory, self.id)
file_is_already_available = False
if (os.path.exists(filepath)
and os.path.getsize(filepath) == self.filesize):
res = self.renderFile(request, filepath)
if isinstance(res,int):
return res
request.write(res)
request.finish()
else:
if request.method != 'HEAD':
self.downloadFile(request, filepath, None)
range = request.getHeader('range')
if range is not None:
bytesrange = range.split('=')
assert bytesrange[0] == 'bytes',\
"Syntactically invalid http range header!"
start, end = bytesrange[1].split('-', 1)
#print "%r %r" %(start,end)
if start:
start = int(start)
if end:
end = int(end)
else:
end = self.filesize -1
# Are we requesting something beyond the current size of the file?
try:
size = os.path.getsize(filepath)
except OSError:
size = 0
if (start >= size and
end+10 > self.filesize and
end-start < 200000):
#print "let's hand that through, it is probably a mp4 index request"
res = ReverseProxyResource.render(self,request)
if isinstance(res,int):
return res
request.write(res)
return
res = self.renderBufferFile (request, filepath, self.buffer_size)
if res == '' and request.method != 'HEAD':
return server.NOT_DONE_YET
if not isinstance(res,int):
request.write(res)
if request.method == 'HEAD':
request.finish()
else:
self.warning("Unsupported Proxy Mode: %s" % self.proxy_mode)
return self.requestFinished(None)
def renderFile(self,request,filepath):
self.info('Cache file available %r %r ' %(request, filepath))
downloadedFile = utils.StaticFile(filepath, self.mimetype)
downloadedFile.type = MPEG4_MIMETYPE
downloadedFile.encoding = None
return downloadedFile.render(request)
def renderBufferFile (self, request, filepath, buffer_size):
# Try to render file(if we have enough data)
self.info("renderBufferFile %s" % filepath)
rendering = False
if os.path.exists(filepath) is True:
filesize = os.path.getsize(filepath)
if ((filesize >= buffer_size) or (filesize == self.filesize)):
rendering = True
self.info("Render file", filepath, self.filesize, filesize, buffer_size)
bufferFile = utils.BufferFile(filepath, self.filesize, MPEG4_MIMETYPE)
bufferFile.type = MPEG4_MIMETYPE
#bufferFile.type = 'video/mpeg'
bufferFile.encoding = None
try:
return bufferFile.render(request)
except Exception,error:
self.info(error)
if request.method != 'HEAD':
self.info('Will retry later to render buffer file')
reactor.callLater(0.5, self.renderBufferFile, request,filepath,buffer_size)
return ''
def downloadFinished(self, result):
self.info('Download finished!')
self.downloader = None
def gotDownloadError(self, error, request):
self.info("Unable to download stream to file: %s" % self.stream_url)
self.info(request)
self.info(error)
def downloadFile(self, request, filepath, callback, *args):
if (self.downloader is None):
self.info("Proxy: download data to cache file %s" % filepath)
self.checkCacheSize()
self.downloader = utils.downloadPage(self.stream_url, filepath, supportPartial=1)
self.downloader.addCallback(self.downloadFinished)
self.downloader.addErrback(self.gotDownloadError, request)
if(callback is not None):
self.downloader.addCallback(callback, request, filepath, *args)
return self.downloader
def checkCacheSize(self):
cache_listdir = os.listdir(self.cache_directory)
cache_size = 0
for filename in cache_listdir:
path = "%s%s%s" % (self.cache_directory, os.sep, filename)
statinfo = os.stat(path)
cache_size += statinfo.st_size
self.info("Cache size: %d (max is %s)" % (cache_size, self.cache_maxsize))
if (cache_size > self.cache_maxsize):
cache_targetsize = self.cache_maxsize * 2/3
self.info("Cache above max size: Reducing to %d" % cache_targetsize)
def compare_atime(filename1, filename2):
path1 = "%s%s%s" % (self.cache_directory, os.sep, filename1)
path2 = "%s%s%s" % (self.cache_directory, os.sep, filename2)
cmp = int(os.stat(path1).st_atime - os.stat(path2).st_atime)
return cmp
cache_listdir = sorted(cache_listdir,compare_atime)
while (cache_size > cache_targetsize):
filename = cache_listdir.pop(0)
path = "%s%s%s" % (self.cache_directory, os.sep, filename)
cache_size -= os.stat(path).st_size
os.remove(path)
self.info("removed %s" % filename)
self.info("new cache size is %d" % cache_size)
class YoutubeVideoItem(BackendItem):
def __init__(self, external_id, title, url, mimetype, entry, store):
self.external_id = external_id
self.name = title
self.duration = None
self.size = None
self.mimetype = mimetype
self.description = None
self.date = None
self.item = None
self.store = store
def extractDataURL(url, quality):
if (quality == 'hd'):
format = '22'
else:
format = '18'
kwargs = {
'usenetrc': False,
'quiet': True,
'forceurl': True,
'forcetitle': False,
'simulate': True,
'format': format,
'outtmpl': u'%(id)s.%(ext)s',
'ignoreerrors': True,
'ratelimit': None,
}
if len(self.store.login) > 0:
kwargs['username'] = self.store.login
kwargs['password'] = self.store.password
fd = FileDownloader(kwargs)
youtube_ie = YoutubeIE()
fd.add_info_extractor(YoutubePlaylistIE(youtube_ie))
fd.add_info_extractor(MetacafeIE(youtube_ie))
fd.add_info_extractor(youtube_ie)
deferred = fd.get_real_urls([url])
return deferred
#self.location = VideoProxy(url, self.external_id,
# store.proxy_mode,
# store.cache_directory, store.cache_maxsize, store.buffer_size,
# extractDataURL, quality=self.store.quality)
self.location = TestVideoProxy(url, self.external_id,
store.proxy_mode,
store.cache_directory, store.cache_maxsize,store.buffer_size,
extractDataURL, quality=self.store.quality)
def get_item(self):
if self.item == None:
upnp_id = self.get_id()
upnp_parent_id = self.parent.get_id()
self.item = DIDLLite.VideoItem(upnp_id, upnp_parent_id, self.name)
self.item.description = self.description
self.item.date = self.date
if hasattr(self.parent, 'cover'):
self.item.albumArtURI = self.parent.cover
res = DIDLLite.Resource(self.url, 'http-get:*:%s:*' % self.mimetype)
res.duration = self.duration
res.size = self.size
self.item.res.append(res)
return self.item
def get_path(self):
self.url = self.store.urlbase + str(self.storage_id) + "." + MPEG4_EXTENSION
return self.url
def get_id(self):
return self.storage_id
class YouTubeStore(AbstractBackendStore):
logCategory = 'youtube_store'
implements = ['MediaServer']
def __init__(self, server, **kwargs):
AbstractBackendStore.__init__(self, server, **kwargs)
self.name = kwargs.get('name','YouTube')
self.login = kwargs.get('userid',kwargs.get('login',''))
self.password = kwargs.get('password','')
self.locale = kwargs.get('location',None)
self.quality = kwargs.get('quality','sd')
self.showStandardFeeds = (kwargs.get('standard_feeds','True') in ['Yes','yes','true','True','1'])
self.refresh = int(kwargs.get('refresh',60))*60
self.proxy_mode = kwargs.get('proxy_mode', 'redirect')
self.cache_directory = kwargs.get('cache_directory', '/tmp/coherence-cache')
try:
if self.proxy_mode != 'redirect':
os.mkdir(self.cache_directory)
except:
pass
self.cache_maxsize = kwargs.get('cache_maxsize', 100000000)
self.buffer_size = kwargs.get('buffer_size', 750000)
rootItem = Container(None, self.name)
self.set_root_item(rootItem)
if (self.showStandardFeeds):
standardfeeds_uri = 'http://gdata.youtube.com/feeds/api/standardfeeds'
if self.locale is not None:
standardfeeds_uri += "/%s" % self.locale
standardfeeds_uri += "/%s"
self.appendFeed('Most Viewed', standardfeeds_uri % 'most_viewed', rootItem)
self.appendFeed('Top Rated', standardfeeds_uri % 'top_rated', rootItem)
self.appendFeed('Recently Featured', standardfeeds_uri % 'recently_featured', rootItem)
self.appendFeed('Watch On Mobile', standardfeeds_uri % 'watch_on_mobile', rootItem)
self.appendFeed('Most Discussed', standardfeeds_uri % 'most_discussed', rootItem)
self.appendFeed('Top Favorites', standardfeeds_uri % 'top_favorites', rootItem)
self.appendFeed('Most Linked', standardfeeds_uri % 'most_linked', rootItem)
self.appendFeed('Most Responded', standardfeeds_uri % 'most_responded', rootItem)
self.appendFeed('Most Recent', standardfeeds_uri % 'most_recent', rootItem)
if len(self.login) > 0:
userfeeds_uri = 'http://gdata.youtube.com/feeds/api/users/%s/%s'
self.appendFeed('My Uploads', userfeeds_uri % (self.login,'uploads'), rootItem)
self.appendFeed('My Favorites', userfeeds_uri % (self.login,'favorites'), rootItem)
playlistsItem = LazyContainer(rootItem, 'My Playlists', None, self.refresh, self.retrievePlaylistFeeds)
rootItem.add_child(playlistsItem)
subscriptionsItem = LazyContainer(rootItem, 'My Subscriptions', None, self.refresh, self.retrieveSubscriptionFeeds)
rootItem.add_child(subscriptionsItem)
self.init_completed()
def __repr__(self):
return str(self.__class__).split('.')[-1]
def appendFeed( self, name, feed_uri, parent):
item = LazyContainer(parent, name, None, self.refresh, self.retrieveFeedItems, feed_uri=feed_uri)
parent.add_child(item, external_id=feed_uri)
def appendVideoEntry(self, entry, parent):
external_id = entry.id.text.split('/')[-1]
title = entry.media.title.text
url = entry.media.player.url
mimetype = MPEG4_MIMETYPE
#mimetype = 'video/mpeg'
item = YoutubeVideoItem (external_id, title, url, mimetype, entry, self)
item.parent = parent
parent.add_child(item, external_id=external_id)
def upnp_init(self):
self.current_connection_id = None
if self.server:
self.server.connection_manager_server.set_variable(0, 'SourceProtocolInfo',
['http-get:*:%s:*' % MPEG4_MIMETYPE],
default=True)
self.wmc_mapping = {'15': self.get_root_id()}
self.yt_service = YouTubeService()
self.yt_service.client_id = 'ytapi-JeanMichelSizun-youtubebackendpl-ruabstu7-0'
self.yt_service.developer_key = 'AI39si7dv2WWffH-s3pfvmw8fTND-cPWeqF1DOcZ8rwTgTPi4fheX7jjQXpn7SG61Ido0Zm_9gYR52TcGog9Pt3iG9Sa88-1yg'
self.yt_service.email = self.login
self.yt_service.password = self.password
self.yt_service.source = 'Coherence UPnP backend'
if len(self.login) > 0:
d = threads.deferToThread(self.yt_service.ProgrammaticLogin)
def retrieveFeedItems (self, parent=None, feed_uri=''):
feed = threads.deferToThread(self.yt_service.GetYouTubeVideoFeed, feed_uri)
def gotFeed(feed):
if feed is None:
self.warning("Unable to retrieve feed %s" % feed_uri)
return
for entry in feed.entry:
self.appendVideoEntry(entry, parent)
def gotError(error):
self.warning("ERROR: %s" % error)
feed.addCallbacks(gotFeed, gotError)
return feed
def retrievePlaylistFeedItems (self, parent, playlist_id):
feed = threads.deferToThread(self.yt_service.GetYouTubePlaylistVideoFeed,playlist_id=playlist_id)
def gotFeed(feed):
if feed is None:
self.warning("Unable to retrieve playlist items %s" % feed_uri)
return
for entry in feed.entry:
self.appendVideoEntry(entry, parent)
def gotError(error):
self.warning("ERROR: %s" % error)
feed.addCallbacks(gotFeed, gotError)
return feed
def retrieveSubscriptionFeedItems (self, parent, uri):
entry = threads.deferToThread(self.yt_service.GetYouTubeSubscriptionEntry,uri)
def gotEntry(entry):
if entry is None:
self.warning("Unable to retrieve subscription items %s" % uri)
return
feed_uri = entry.feed_link[0].href
return self.retrieveFeedItems(parent, feed_uri)
def gotError(error):
self.warning("ERROR: %s" % error)
entry.addCallbacks(gotEntry, gotError)
return entry
def retrievePlaylistFeeds(self, parent):
playlists_feed = threads.deferToThread(self.yt_service.GetYouTubePlaylistFeed, username=self.login)
def gotPlaylists(playlist_video_feed):
if playlist_video_feed is None:
self.warning("Unable to retrieve playlists feed")
return
for playlist_video_entry in playlist_video_feed.entry:
title = playlist_video_entry.title.text
playlist_id = playlist_video_entry.id.text.split("/")[-1] # FIXME find better way to retrieve the playlist ID
item = LazyContainer(parent, title, playlist_id, self.refresh, self.retrievePlaylistFeedItems, playlist_id=playlist_id)
parent.add_child(item, external_id=playlist_id)
def gotError(error):
self.warning("ERROR: %s" % error)
playlists_feed.addCallbacks(gotPlaylists, gotError)
return playlists_feed
def retrieveSubscriptionFeeds(self, parent):
playlists_feed = threads.deferToThread(self.yt_service.GetYouTubeSubscriptionFeed, username=self.login)
def gotPlaylists(playlist_video_feed):
if playlist_video_feed is None:
self.warning("Unable to retrieve subscriptions feed")
return
for entry in playlist_video_feed.entry:
type = entry.GetSubscriptionType()
title = entry.title.text
uri = entry.id.text
name = "[%s] %s" % (type,title)
item = LazyContainer(parent, name, uri, self.refresh, self.retrieveSubscriptionFeedItems, uri=uri)
item.parent = parent
parent.add_child(item, external_id=uri)
def gotError(error):
self.warning("ERROR: %s" % error)
playlists_feed.addCallbacks(gotPlaylists, gotError)
return playlists_feed
| |
'''
Pubnub Modular Input Script
'''
import sys,logging,os,time,re,threading,hashlib
import xml.dom.minidom
from datetime import datetime
RESPONSE_HANDLER_INSTANCE = None
SPLUNK_HOME = os.environ.get("SPLUNK_HOME")
#dynamically load in any eggs
EGG_DIR = SPLUNK_HOME + "/etc/apps/pubnub_ta/bin/"
for filename in os.listdir(EGG_DIR):
if filename.endswith(".egg"):
sys.path.append(EGG_DIR + filename)
from pubnubsdk import Pubnub
#set up logging
logging.root
logging.root.setLevel(logging.ERROR)
formatter = logging.Formatter('%(levelname)s %(message)s')
#with zero args , should go to STD ERR
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logging.root.addHandler(handler)
SCHEME = """<scheme>
<title>Pubnub</title>
<description>Pubnub input for subscribing to Pubnub channels</description>
<use_external_validation>true</use_external_validation>
<streaming_mode>xml</streaming_mode>
<use_single_instance>false</use_single_instance>
<endpoint>
<args>
<arg name="name">
<title>Pubnub input name</title>
<description>Name of this Pubnub input</description>
</arg>
<arg name="activation_key">
<title>Activation Key</title>
<description>Visit http://www.baboonbones.com/#activation to obtain a non-expiring key</description>
<required_on_edit>true</required_on_edit>
<required_on_create>true</required_on_create>
</arg>
<arg name="key">
<title>Key</title>
<description>Subscribe key</description>
<required_on_edit>true</required_on_edit>
<required_on_create>true</required_on_create>
</arg>
<arg name="channel">
<title>Channel</title>
<description>Pubnub channel</description>
<required_on_edit>true</required_on_edit>
<required_on_create>true</required_on_create>
</arg>
<arg name="response_handler">
<title>Response Handler</title>
<description>Python classname of custom response handler</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="response_handler_args">
<title>Response Handler Arguments</title>
<description>Response Handler arguments string , key=value,key2=value2</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
</args>
</endpoint>
</scheme>
"""
def do_validate():
config = get_validation_config()
#TODO
#if error , print_validation_error & sys.exit(2)
def _callback(message, channel):
handle_output(message)
def _error(message):
handle_output(message)
def do_run(config):
activation_key = config.get("activation_key").strip()
app_name = "Pubnub Modular Input"
if len(activation_key) > 32:
activation_hash = activation_key[:32]
activation_ts = activation_key[32:][::-1]
current_ts = time.time()
m = hashlib.md5()
m.update((app_name + activation_ts))
if not m.hexdigest().upper() == activation_hash.upper():
logging.error("FATAL Trial Activation key for App '%s' failed. Please ensure that you copy/pasted the key correctly." % app_name)
sys.exit(2)
if ((current_ts - long(activation_ts)) > 604800):
logging.error("FATAL Trial Activation key for App '%s' has now expired. Please visit http://www.baboonbones.com/#activation to purchase a non expiring key." % app_name)
sys.exit(2)
else:
m = hashlib.md5()
m.update((app_name))
if not m.hexdigest().upper() == activation_key.upper():
logging.error("FATAL Activation key for App '%s' failed. Please ensure that you copy/pasted the key correctly." % app_name)
sys.exit(2)
delimiter = ','
#params
key=config.get("key",)
channel=config.get("channel")
response_handler_args={}
response_handler_args_str=config.get("response_handler_args")
if not response_handler_args_str is None:
response_handler_args = dict((k.strip(), v.strip()) for k,v in
(item.split('=',1) for item in response_handler_args_str.split(delimiter)))
response_handler=config.get("response_handler","DefaultResponseHandler")
module = __import__("responsehandlers")
class_ = getattr(module,response_handler)
global RESPONSE_HANDLER_INSTANCE
RESPONSE_HANDLER_INSTANCE = class_(**response_handler_args)
pubnub = Pubnub(subscribe_key=key,publish_key=None)
try:
pubnub.subscribe(channels=channel, callback=_callback, error=_error)
except RuntimeError,e:
logging.error("Looks like an error: %s" % str(e))
sys.exit(2)
def handle_output(message):
try:
RESPONSE_HANDLER_INSTANCE(message)
sys.stdout.flush()
except RuntimeError,e:
logging.error("Looks like an error handle the response output: %s" % str(e))
# prints validation error data to be consumed by Splunk
def print_validation_error(s):
print "<error><message>%s</message></error>" % encodeXMLText(s)
# prints XML stream
def print_xml_single_instance_mode(s):
print "<stream><event><data>%s</data></event></stream>" % encodeXMLText(s)
# prints simple stream
def print_simple(s):
print "%s\n" % s
def encodeXMLText(text):
text = text.replace("&", "&")
text = text.replace("\"", """)
text = text.replace("'", "'")
text = text.replace("<", "<")
text = text.replace(">", ">")
return text
def usage():
print "usage: %s [--scheme|--validate-arguments]"
logging.error("Incorrect Program Usage")
sys.exit(2)
def do_scheme():
print SCHEME
#read XML configuration passed from splunkd, need to refactor to support single instance mode
def get_input_config():
config = {}
try:
# read everything from stdin
config_str = sys.stdin.read()
# parse the config XML
doc = xml.dom.minidom.parseString(config_str)
root = doc.documentElement
session_key_node = root.getElementsByTagName("session_key")[0]
if session_key_node and session_key_node.firstChild and session_key_node.firstChild.nodeType == session_key_node.firstChild.TEXT_NODE:
data = session_key_node.firstChild.data
config["session_key"] = data
server_uri_node = root.getElementsByTagName("server_uri")[0]
if server_uri_node and server_uri_node.firstChild and server_uri_node.firstChild.nodeType == server_uri_node.firstChild.TEXT_NODE:
data = server_uri_node.firstChild.data
config["server_uri"] = data
conf_node = root.getElementsByTagName("configuration")[0]
if conf_node:
logging.debug("XML: found configuration")
stanza = conf_node.getElementsByTagName("stanza")[0]
if stanza:
stanza_name = stanza.getAttribute("name")
if stanza_name:
logging.debug("XML: found stanza " + stanza_name)
config["name"] = stanza_name
params = stanza.getElementsByTagName("param")
for param in params:
param_name = param.getAttribute("name")
logging.debug("XML: found param '%s'" % param_name)
if param_name and param.firstChild and \
param.firstChild.nodeType == param.firstChild.TEXT_NODE:
data = param.firstChild.data
config[param_name] = data
logging.debug("XML: '%s' -> '%s'" % (param_name, data))
checkpnt_node = root.getElementsByTagName("checkpoint_dir")[0]
if checkpnt_node and checkpnt_node.firstChild and \
checkpnt_node.firstChild.nodeType == checkpnt_node.firstChild.TEXT_NODE:
config["checkpoint_dir"] = checkpnt_node.firstChild.data
if not config:
raise Exception, "Invalid configuration received from Splunk."
except Exception, e:
raise Exception, "Error getting Splunk configuration via STDIN: %s" % str(e)
return config
#read XML configuration passed from splunkd, need to refactor to support single instance mode
def get_validation_config():
val_data = {}
# read everything from stdin
val_str = sys.stdin.read()
# parse the validation XML
doc = xml.dom.minidom.parseString(val_str)
root = doc.documentElement
logging.debug("XML: found items")
item_node = root.getElementsByTagName("item")[0]
if item_node:
logging.debug("XML: found item")
name = item_node.getAttribute("name")
val_data["stanza"] = name
params_node = item_node.getElementsByTagName("param")
for param in params_node:
name = param.getAttribute("name")
logging.debug("Found param %s" % name)
if name and param.firstChild and \
param.firstChild.nodeType == param.firstChild.TEXT_NODE:
val_data[name] = param.firstChild.data
return val_data
if __name__ == '__main__':
if len(sys.argv) > 1:
if sys.argv[1] == "--scheme":
do_scheme()
elif sys.argv[1] == "--validate-arguments":
do_validate()
else:
usage()
else:
config = get_input_config()
do_run(config)
sys.exit(0)
| |
# VMware vSphere Python SDK
# Copyright (c) 2008-2015 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## @file connect.py
## @brief Connect to a VMOMI ServiceInstance.
##
## Detailed description (for Doxygen goes here)
"""
Connect to a VMOMI ServiceInstance.
Detailed description (for [e]pydoc goes here).
"""
from __future__ import print_function
import sys
import re
from six import reraise
from pyVim.credstore import VICredStore, HostNotFoundException, NoCredentialsFileFound
from pyVmomi import vim, vmodl
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
from xml.parsers.expat import ExpatError
import requests
from requests.auth import HTTPBasicAuth
from pyVmomi import vim, vmodl, SoapStubAdapter, SessionOrientedStub
from pyVmomi.VmomiSupport import versionIdMap, versionMap, IsChildVersion
from pyVmomi.VmomiSupport import GetServiceVersions
"""
Global regular expression for parsing host and port connection
See http://www.ietf.org/rfc/rfc3986.txt sec 3.2.2
"""
_rx = re.compile(r"(^\[.+\]|[^:]+)(:\d+)?$")
_si = None
"""
Global (thread-shared) ServiceInstance
@todo: Get rid of me?
"""
class closing(object):
"""
Helper class for using closable objects in a 'with' statement,
similar to the one provided by contextlib.
"""
def __init__(self, obj):
self.obj = obj
def __enter__(self):
return self.obj
def __exit__(self, *exc_info):
self.obj.close()
class VimSessionOrientedStub(SessionOrientedStub):
'''A vim-specific SessionOrientedStub. See the SessionOrientedStub class
in pyVmomi/SoapAdapter.py for more information.'''
# The set of exceptions that should trigger a relogin by the session stub.
SESSION_EXCEPTIONS = (
vim.fault.NotAuthenticated,
)
@staticmethod
def makeUserLoginMethod(username, password, locale=None):
'''Return a function that will call the vim.SessionManager.Login() method
with the given parameters. The result of this function can be passed as
the "loginMethod" to a SessionOrientedStub constructor.'''
def _doLogin(soapStub):
si = vim.ServiceInstance("ServiceInstance", soapStub)
sm = si.content.sessionManager
if not sm.currentSession:
si.content.sessionManager.Login(username, password, locale)
return _doLogin
@staticmethod
def makeExtensionLoginMethod(extensionKey):
'''Return a function that will call the vim.SessionManager.Login() method
with the given parameters. The result of this function can be passed as
the "loginMethod" to a SessionOrientedStub constructor.'''
def _doLogin(soapStub):
si = vim.ServiceInstance("ServiceInstance", soapStub)
sm = si.content.sessionManager
if not sm.currentSession:
si.content.sessionManager.LoginExtensionByCertificate(extensionKey)
return _doLogin
@staticmethod
def makeCertHokTokenLoginMethod(stsUrl, stsCert=None):
'''Return a function that will call the vim.SessionManager.LoginByToken()
after obtaining a HoK SAML token from the STS. The result of this function
can be passed as the "loginMethod" to a SessionOrientedStub constructor.
@param stsUrl: URL of the SAML Token issuing service. (i.e. SSO server).
@param stsCert: public key of the STS service.
'''
assert(stsUrl)
def _doLogin(soapStub):
import sso
cert = soapStub.schemeArgs['cert_file']
key = soapStub.schemeArgs['key_file']
authenticator = sso.SsoAuthenticator(sts_url=stsUrl,
sts_cert=stsCert)
samlAssertion = authenticator.get_hok_saml_assertion(cert,key)
def _requestModifier(request):
return sso.add_saml_context(request, samlAssertion, key)
si = vim.ServiceInstance("ServiceInstance", soapStub)
sm = si.content.sessionManager
if not sm.currentSession:
with soapStub.requestModifier(_requestModifier):
try:
soapStub.samlToken = samlAssertion
si.content.sessionManager.LoginByToken()
finally:
soapStub.samlToken = None
return _doLogin
@staticmethod
def makeCredBearerTokenLoginMethod(username,
password,
stsUrl,
stsCert=None):
'''Return a function that will call the vim.SessionManager.LoginByToken()
after obtaining a Bearer token from the STS. The result of this function
can be passed as the "loginMethod" to a SessionOrientedStub constructor.
@param username: username of the user/service registered with STS.
@param password: password of the user/service registered with STS.
@param stsUrl: URL of the SAML Token issueing service. (i.e. SSO server).
@param stsCert: public key of the STS service.
'''
assert(username)
assert(password)
assert(stsUrl)
def _doLogin(soapStub):
import sso
cert = soapStub.schemeArgs['cert_file']
key = soapStub.schemeArgs['key_file']
authenticator = sso.SsoAuthenticator(sts_url=stsUrl,
sts_cert=stsCert)
samlAssertion = authenticator.get_bearer_saml_assertion(username,
password,
cert,
key)
si = vim.ServiceInstance("ServiceInstance", soapStub)
sm = si.content.sessionManager
if not sm.currentSession:
try:
soapStub.samlToken = samlAssertion
si.content.sessionManager.LoginByToken()
finally:
soapStub.samlToken = None
return _doLogin
def Connect(host='localhost', port=443, user='root', pwd='',
service="hostd", adapter="SOAP", namespace=None, path="/sdk",
version=None, keyFile=None, certFile=None):
"""
Connect to the specified server, login and return the service
instance object.
Throws any exception back to caller. The service instance object is
also saved in the library for easy access.
Clients should modify the service parameter only when connecting to
a VMOMI server other than hostd/vpxd. For both of the latter, the
default value is fine.
@param host: Which host to connect to.
@type host: string
@param port: Port
@type port: int
@param user: User
@type user: string
@param pwd: Password
@type pwd: string
@param service: Service
@type service: string
@param adapter: Adapter
@type adapter: string
@param namespace: Namespace *** Deprecated: Use version instead ***
@type namespace: string
@param path: Path
@type path: string
@param version: Version
@type version: string
@param keyFile: ssl key file path
@type keyFile: string
@param certFile: ssl cert file path
@type certFile: string
"""
# Check for a credentials store if user and passwords weren't passed via command line.
try:
cstore = VICredStore()
(store_user, store_pwd) = cstore.get_userpwd(host)
user = store_user
pwd = store_pwd
except HostNotFoundException:
print("Host [" + host + "] was not found on credentials file. You need to enter credentials manually!")
except NoCredentialsFileFound:
print("No credentials store file found. You need to enter credentials manually!")
try:
info = re.match(_rx, host)
if info is not None:
host = info.group(1)
if host[0] == '[':
host = info.group(1)[1:-1]
if info.group(2) is not None:
port = int(info.group(2)[1:])
except ValueError as ve:
pass
if namespace:
assert(version is None)
version = versionMap[namespace]
elif not version:
version="vim.version.version6"
si, stub = __Login(host, port, user, pwd, service, adapter, version, path,
keyFile, certFile)
SetSi(si)
return si
def Disconnect(si):
"""
Disconnect (logout) service instance
@param si: Service instance (returned from Connect)
"""
# Logout
__Logout(si)
SetSi(None)
## Method that gets a local ticket for the specified user
def GetLocalTicket(si, user):
try:
sessionManager = si.content.sessionManager
except Exception as e:
if type(e).__name__ == 'ExpatError':
msg = 'Malformed response while querying for local ticket: "%s"' % e
raise vim.fault.HostConnectFault(msg=msg)
else:
msg = 'Failed to query for local ticket: "%s"' % e
raise vim.fault.HostConnectFault(msg=msg)
localTicket = sessionManager.AcquireLocalTicket(userName=user)
return (localTicket.userName, file(localTicket.passwordFilePath).read())
## Private method that performs the actual Connect and returns a
## connected service instance object.
def __Login(host, port, user, pwd, service, adapter, version, path,
keyFile, certFile):
"""
Private method that performs the actual Connect and returns a
connected service instance object.
@param host: Which host to connect to.
@type host: string
@param port: Port
@type port: int
@param user: User
@type user: string
@param pwd: Password
@type pwd: string
@param service: Service
@type service: string
@param adapter: Adapter
@type adapter: string
@param version: Version
@type version: string
@param path: Path
@type path: string
@param keyFile: ssl key file path
@type keyFile: string
@param certFile: ssl cert file path
@type certFile: string
"""
# XXX remove the adapter and service arguments once dependent code is fixed
if adapter != "SOAP":
raise ValueError(adapter)
# Create the SOAP stub adapter
stub = SoapStubAdapter(host, port, version=version, path=path,
certKeyFile=keyFile, certFile=certFile)
# Get Service instance
si = vim.ServiceInstance("ServiceInstance", stub)
try:
content = si.RetrieveContent()
except vmodl.MethodFault:
raise
except Exception as e:
# NOTE (hartsock): preserve the traceback for diagnostics
# pulling and preserving the traceback makes diagnosing connection
# failures easier since the fault will also include where inside the
# library the fault occurred. Without the traceback we have no idea
# why the connection failed beyond the message string.
(type, value, traceback) = sys.exc_info()
if traceback:
fault = vim.fault.HostConnectFault(msg=str(e))
reraise(vim.fault.HostConnectFault, fault, traceback)
else:
raise vim.fault.HostConnectFault(msg=str(e))
# Get a ticket if we're connecting to localhost and password is not specified
if host == 'localhost' and not pwd:
try:
(user, pwd) = GetLocalTicket(si, user)
except:
pass # This is not supported against vCenter, and connecting
# with an empty password is fine in debug builds
# Login
try:
x = content.sessionManager.Login(user, pwd, None)
except vim.fault.InvalidLogin:
raise
except Exception as e:
raise
return si, stub
## Private method that performs the actual Disonnect
def __Logout(si):
"""
Disconnect (logout) service instance
@param si: Service instance (returned from Connect)
"""
try:
if si:
content = si.RetrieveContent()
content.sessionManager.Logout()
except Exception as e:
pass
## Get the saved service instance.
def GetSi():
""" Get the saved service instance. """
return _si
## Set the saved service instance.
def SetSi(si):
""" Set the saved service instance. """
global _si
_si = si
## Get the global saved stub
def GetStub():
""" Get the global saved stub. """
si = GetSi()
if si:
return si._GetStub()
return None;
## RAII-style class for managing connections
class Connection(object):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.si = None
def __enter__(self):
self.si = Connect(*self.args, **self.kwargs)
return self.si
def __exit__(self, *exc_info):
if self.si:
Disconnect(self.si)
self.si = None
class SmartConnection(object):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.si = None
def __enter__(self):
self.si = SmartConnect(*self.args, **self.kwargs)
return self.si
def __exit__(self, *exc_info):
if self.si:
Disconnect(self.si)
self.si = None
## Private method that returns an ElementTree describing the API versions
## supported by the specified server. The result will be vimServiceVersions.xml
## if it exists, otherwise vimService.wsdl if it exists, otherwise None.
def __GetServiceVersionDescription(protocol, server, port, path):
"""
Private method that returns a root from an ElementTree describing the API versions
supported by the specified server. The result will be vimServiceVersions.xml
if it exists, otherwise vimService.wsdl if it exists, otherwise None.
@param protocol: What protocol to use for the connection (e.g. https or http).
@type protocol: string
@param server: Which server to connect to.
@type server: string
@param port: Port
@type port: int
@param path: Path
@type path: string
"""
url = "%s://%s:%s/%s/vimServiceVersions.xml" % (protocol, server, port, path)
try:
sock = requests.get(url, verify=False)
if sock.status_code == 200:
tree = ElementTree.fromstring(sock.content)
return tree
except ExpatError:
pass
url = "%s://%s:%s/%s/vimService.wsdl" % (protocol, server, port, path)
try:
sock = requests.get(url, verify=False)
if sock.status_code == 200:
tree = ElementTree.fromstring(sock.content)
return tree
except ExpatError:
pass
return None
## Private method that returns true if the service version description document
## indicates that the desired version is supported
def __VersionIsSupported(desiredVersion, serviceVersionDescription):
"""
Private method that returns true if the service version description document
indicates that the desired version is supported
@param desiredVersion: The version we want to see if the server supports
(eg. vim.version.version2.
@type desiredVersion: string
@param serviceVersionDescription: A root ElementTree for vimServiceVersions.xml
or vimService.wsdl.
@type serviceVersionDescription: root ElementTree
"""
root = serviceVersionDescription
if root.tag == 'namespaces':
# serviceVersionDescription appears to be a vimServiceVersions.xml document
if root.get('version') != '1.0':
raise RuntimeError('vimServiceVersions.xml has version %s,' \
' which is not understood' % (root.get('version')))
desiredVersionId = versionIdMap[desiredVersion]
supportedVersion = None
for namespace in root.findall('namespace'):
versionId = namespace.findtext('version')
if versionId == desiredVersionId:
return True
else:
for versionId in namespace.findall('priorVersions/version'):
if versionId.text == desiredVersionId:
return True
else:
# serviceVersionDescription must be a vimService.wsdl document
wsdlNS = 'http://schemas.xmlsoap.org/wsdl/'
importElement = serviceVersionDescription.find('.//{%s}import' % wsdlNS)
supportedVersion = versionMap[importElement.get('namespace')[4:]]
if IsChildVersion(supportedVersion, desiredVersion):
return True
return False
## Private method that returns the most preferred API version supported by the
## specified server,
def __FindSupportedVersion(protocol, server, port, path, preferredApiVersions):
"""
Private method that returns the most preferred API version supported by the
specified server,
@param protocol: What protocol to use for the connection (e.g. https or http).
@type protocol: string
@param server: Which server to connect to.
@type server: string
@param port: Port
@type port: int
@param path: Path
@type path: string
@param preferredApiVersions: Acceptable API version(s) (e.g. vim.version.version3)
If a list of versions is specified the versions should
be ordered from most to least preferred.
@type preferredApiVersions: string or string list
"""
serviceVersionDescription = __GetServiceVersionDescription(protocol,
server,
port,
path)
if serviceVersionDescription is None:
return None
if not isinstance(preferredApiVersions, list):
preferredApiVersions = [ preferredApiVersions ]
for desiredVersion in preferredApiVersions:
if __VersionIsSupported(desiredVersion, serviceVersionDescription):
return desiredVersion
return None
def SmartConnect(protocol='https', host='localhost', port=443, user='root', pwd='',
service="hostd", path="/sdk",
preferredApiVersions=None):
"""
Determine the most preferred API version supported by the specified server,
then connect to the specified server using that API version, login and return
the service instance object.
Throws any exception back to caller. The service instance object is
also saved in the library for easy access.
Clients should modify the service parameter only when connecting to
a VMOMI server other than hostd/vpxd. For both of the latter, the
default value is fine.
@param protocol: What protocol to use for the connection (e.g. https or http).
@type protocol: string
@param host: Which host to connect to.
@type host: string
@param port: Port
@type port: int
@param user: User
@type user: string
@param pwd: Password
@type pwd: string
@param service: Service
@type service: string
@param path: Path
@type path: string
@param preferredApiVersions: Acceptable API version(s) (e.g. vim.version.version3)
If a list of versions is specified the versions should
be ordered from most to least preferred. If None is
specified, the list of versions support by pyVmomi will
be used.
@type preferredApiVersions: string or string list
"""
if preferredApiVersions is None:
preferredApiVersions = GetServiceVersions('vim25')
supportedVersion = __FindSupportedVersion(protocol,
host,
port,
path,
preferredApiVersions)
if supportedVersion is None:
raise Exception("%s:%s is not a VIM server" % (host, port))
portNumber = protocol == "http" and -int(port) or int(port)
return Connect(host=host,
port=portNumber,
user=user,
pwd=pwd,
service=service,
adapter='SOAP',
version=supportedVersion,
path=path)
def OpenUrlWithBasicAuth(url, user='root', pwd=''):
"""
Open the specified URL, using HTTP basic authentication to provide
the specified credentials to the server as part of the request.
Returns the response as a file-like object.
"""
return requests.get(url, auth=HTTPBasicAuth(user, pwd), verify=False)
def OpenPathWithStub(path, stub):
"""
Open the specified path using HTTP, using the host/port/protocol
associated with the specified stub. If the stub has a session cookie,
it is included with the HTTP request. Returns the response as a
file-like object.
"""
import httplib
if not hasattr(stub, 'scheme'):
raise vmodl.fault.NotSupported()
elif stub.scheme == httplib.HTTPConnection:
protocol = 'http'
elif stub.scheme == httplib.HTTPSConnection:
protocol = 'https'
else:
raise vmodl.fault.NotSupported()
hostPort = stub.host
url = '%s://%s%s' % (protocol, hostPort, path)
headers = {}
if stub.cookie:
headers["Cookie"] = stub.cookie
return requests.get(url, headers=headers, verify=False)
| |
from django.forms import fields, widgets
from django.db.models.fields import NOT_PROVIDED
from django.utils import formats
from django.core.exceptions import ValidationError
class Field(object):
default = None
help_text = None
nullable = True
required = False
def __init__(self, *args, **kwargs):
allowed_properties = ["default", "help_text", "nullable", "required", ]
for name, value in kwargs.iteritems():
if name in allowed_properties:
current_value = getattr(self, name)
if not value == current_value and not value == NOT_PROVIDED:
setattr(self, name, value)
@classmethod
def instance_from_model_field(cls, field):
default = field.default
nullable = field.null
required = False if field.blank else True
return cls(default=default, nullable=nullable, required=required)
def generate_schema(self):
return {
"default": self.default,
"help_text": self.help_text,
"nullable": self.nullable,
"required": self.required,
}
def get_form_field(self):
raise NotImplementedError("You must define this in your field class.")
def serialize(self, value):
form_field = self.get_form_field()
if hasattr(form_field.widget, "_format_value"):
value = form_field.widget._format_value(value)
return value
def unserialize(self, value):
form_field = self.get_form_field()
cleaned = form_field.clean(value)
return cleaned
class BooleanField(Field):
default = False
help_text = "Boolean data."
def get_form_field(self):
return fields.BooleanField(required=self.required)
class CharField(Field):
help_text = "Unicode string data."
min_length = None
max_length = None
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(CharField, self).__init__(*args, **kwargs)
self.min_length = min_length
self.max_length = max_length
def get_form_field(self):
return fields.CharField(min_length=self.min_length, max_length=self.max_length, required=self.required)
@classmethod
def instance_from_model_field(cls, field):
instance = super(CharField, cls).instance_from_model_field(field)
instance.max_length = field.max_length
return instance
def generate_schema(self):
schema = super(CharField, self).generate_schema()
schema.update({
"min_length": self.min_length,
"max_length": self.max_length,
})
return schema
class DateField(Field):
help_text = "A date as a string."
auto_now = False
auto_now_add = False
input_formats = formats.get_format('DATE_INPUT_FORMATS')
def __init__(self, auto_now=False, auto_now_add=False, input_formats=None, *args, **kwargs):
super(DateField, self).__init__(*args, **kwargs)
self.auto_now = auto_now
self.auto_now_add = auto_now_add
if input_formats:
self.input_formats = input_formats
def generate_schema(self):
schema = super(DateField, self).generate_schema()
schema.update({
"formats": self.input_formats,
})
return schema
def get_form_field(self):
return fields.DateField(input_formats=self.input_formats, required=self.required)
@classmethod
def instance_from_model_field(cls, model):
field = super(DateField, cls).instance_from_model_field(model)
field.auto_now = model.auto_now
field.auto_now_add = model.auto_now_add
return field
class DateTimeField(DateField):
help_text = "A date and time as a string."
input_formats = formats.get_format('DATETIME_INPUT_FORMATS')
def get_form_field(self):
return fields.DateTimeField(input_formats=self.input_formats, required=self.required)
class DecimalField(Field):
default = 0
help_text = "Decimal data."
max_digits = None
decimal_places = None
def __init__(self, max_digits=None, decimal_places=None, *args, **kwargs):
super(DecimalField, self).__init__(*args, **kwargs)
self.max_digits = max_digits
self.decimal_places = decimal_places
def generate_schema(self):
schema = super(DecimalField, self).generate_schema()
schema.update({
"max_digits": self.max_digits,
"decimal_places": self.decimal_places,
})
return schema
def get_form_field(self):
return fields.DecimalField(max_digits=self.max_digits, decimal_places=self.decimal_places, required=self.required)
@classmethod
def instance_from_model_field(cls, model):
instance = super(DecimalField, cls).instance_from_model_field(model)
instance.max_digits = model.max_digits
instance.decimal_places = model.decimal_places
return instance
class EmailField(CharField):
help_text = "Email address data."
def get_form_field(self):
return fields.EmailField(min_length=self.min_length, max_length=self.max_length, required=self.required)
class IntegerField(Field):
default = 0
help_text = "Integer data."
min_value = None
max_value = None
def __init__(self, min_value=None, max_value=None, *args, **kwargs):
super(IntegerField, self).__init__(*args, **kwargs)
self.min_value = min_value
self.max_value = max_value
def get_form_field(self):
return fields.IntegerField(min_value=self.min_value, max_value=self.max_value, required=self.required)
def generate_schema(self):
schema = super(IntegerField, self).generate_schema()
schema.update({
"min_value": self.min_value,
"max_value": self.max_value,
})
return schema
class PositiveIntegerField(IntegerField):
help_text = "Integer data which is positive or zero."
min_value = 0
def __init__(self, *args, **kwargs):
return super(PositiveIntegerField, self).__init__(min_value=0, *args, **kwargs)
def get_form_field(self):
return fields.PositiveIntegerField(max_value=self.max_value, required=self.required)
class TextField(Field):
help_text = "Unicode string data."
def get_form_field(self):
return fields.CharField(required=self.required)
class TimeField(DateField):
help_text = "A time as a string."
input_formats = formats.get_format('TIME_INPUT_FORMATS')
def get_form_field(self):
return fields.TimeField(input_formats=self.input_formats, required=self.required)
class RelatedField(Field):
attribute = None
is_relation = True
to_resource = None
def __init__(self, to_resource, attribute, *args, **kwargs):
super(RelatedField, self).__init__(*args, **kwargs)
self.attribute = attribute
self.to_resource = to_resource
def generate_schema(self):
schema = super(RelatedField, self).generate_schema()
del schema["default"]
schema.update({
"related_uri": self._build_resource_uri(),
})
return schema
def serialize(self, value):
return value
def unserialize(self, value):
return value
def _build_obj_uri(self, obj):
return self.to_resource().serialize_obj(obj)["resource_uri"]
def _build_resource_uri(self):
from django.core.urlresolvers import reverse
return reverse("%s_index" % (self.to_resource.resource_name, ))
class ToManyField(RelatedField):
help_text = "A link to a list of other related resources."
def __init__(self, to_resource, attribute, *args, **kwargs):
super(ToManyField, self).__init__(to_resource=to_resource, attribute=attribute, *args, **kwargs)
def get_form_field(self):
from django.forms import models as model_fields
field = model_fields.ModelMultipleChoiceField(queryset=self.to_resource().data_set.queryset, required=self.required)
field.error_messages["invalid_choice"] = "You did not provide a valid reference for this resource."
return field
def serialize(self, resource, value):
obj = resource.data_set.get(pk=value[resource.data_set._primary_key])
manager = getattr(obj, self.attribute)
uri_list = []
for inst in manager.all():
uri_list.append(self._build_obj_uri(inst))
return uri_list
def unserialize(self, value):
from django.core.urlresolvers import resolve
form_field = self.get_form_field()
pks = []
if isinstance(value, list):
for pk in value:
try:
pks.append(int(pk))
except ValueError:
try:
func, args, kwargs = resolve(pk)
pks.append(kwargs["pks"])
except:
raise ValidationError(form_field.error_messages["invalid_choice"])
else:
try:
pks = [int(value)]
except ValueError:
try:
func, args, kwargs = resolve(value)
pks = [kwargs["pks"]]
except:
raise ValidationError(form_field.error_messages["invalid_choice"])
obj = form_field.clean(pks)
return obj
class ToResourceField(RelatedField):
help_text = "A link to a single related resource."
def get_form_field(self):
from django.forms import models as model_fields
field = model_fields.ModelChoiceField(queryset=self.to_resource().data_set.queryset, required=self.required)
field.error_messages["invalid_choice"] = "You did not provide a valid reference for this resource."
return field
def serialize(self, resource, value):
if self.attribute in value:
pk = value[self.attribute]
else:
pk = value[self.attribute + "_id"]
obj_related = self.to_resource().data_set.get(pk=pk)
uri = self._build_obj_uri(obj_related)
return uri
def unserialize(self, value):
from django.core.urlresolvers import resolve
form_field = self.get_form_field()
try:
pk = int(value)
except ValueError:
try:
func, args, kwargs = resolve(value)
pk = kwargs["pks"]
except:
raise ValidationError(form_field.error_messages["invalid_choice"])
obj = form_field.clean(pk)
return obj
| |
# Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for run_perf_tests."""
import StringIO
import datetime
import json
import re
import unittest
from webkitpy.common.host_mock import MockHost
from webkitpy.common.system.output_capture import OutputCapture
from webkitpy.layout_tests.port.driver import DriverOutput
from webkitpy.layout_tests.port.test import TestPort
from webkitpy.performance_tests.perftest import ChromiumStylePerfTest
from webkitpy.performance_tests.perftest import DEFAULT_TEST_RUNNER_COUNT
from webkitpy.performance_tests.perftest import PerfTest
from webkitpy.performance_tests.perftestsrunner import PerfTestsRunner
class MainTest(unittest.TestCase):
def create_runner(self, args=[]):
args = args or []
options, _ = PerfTestsRunner._parse_args(args)
test_port = TestPort(host=MockHost(), options=options)
runner = PerfTestsRunner(args=args, port=test_port)
runner._host.filesystem.maybe_make_directory(runner._base_path, 'inspector')
runner._host.filesystem.maybe_make_directory(runner._base_path, 'Bindings')
runner._host.filesystem.maybe_make_directory(runner._base_path, 'Parser')
return runner, test_port
def _add_file(self, runner, dirname, filename, content=True):
dirname = runner._host.filesystem.join(runner._base_path, dirname) if dirname else runner._base_path
runner._host.filesystem.maybe_make_directory(dirname)
runner._host.filesystem.files[runner._host.filesystem.join(dirname, filename)] = content
def test_collect_tests(self):
runner, _ = self.create_runner()
self._add_file(runner, 'inspector', 'a_file.html', 'a content')
tests = runner._collect_tests()
self.assertEqual(len(tests), 1)
def _collect_tests_and_sort_test_name(self, runner):
return sorted([test.test_name() for test in runner._collect_tests()])
def test_collect_tests_with_multile_files(self):
runner, port = self.create_runner(args=['PerformanceTests/test1.html', 'test2.html'])
def add_file(filename):
port.host.filesystem.files[runner._host.filesystem.join(runner._base_path, filename)] = 'some content'
add_file('test1.html')
add_file('test2.html')
add_file('test3.html')
port.host.filesystem.chdir(runner._port.perf_tests_dir()[:runner._port.perf_tests_dir().rfind(runner._host.filesystem.sep)])
self.assertItemsEqual(self._collect_tests_and_sort_test_name(runner), ['test1.html', 'test2.html'])
def test_collect_tests_with_skipped_list(self):
runner, port = self.create_runner()
self._add_file(runner, 'inspector', 'test1.html')
self._add_file(runner, 'inspector', 'unsupported_test1.html')
self._add_file(runner, 'inspector', 'test2.html')
self._add_file(runner, 'inspector/resources', 'resource_file.html')
self._add_file(runner, 'unsupported', 'unsupported_test2.html')
port.skipped_perf_tests = lambda: ['inspector/unsupported_test1.html', 'unsupported']
self.assertItemsEqual(self._collect_tests_and_sort_test_name(runner), ['inspector/test1.html', 'inspector/test2.html'])
def test_collect_tests_with_skipped_list_and_files(self):
runner, port = self.create_runner(args=['Suite/Test1.html', 'Suite/SkippedTest1.html', 'SkippedSuite/Test1.html'])
self._add_file(runner, 'SkippedSuite', 'Test1.html')
self._add_file(runner, 'SkippedSuite', 'Test2.html')
self._add_file(runner, 'Suite', 'Test1.html')
self._add_file(runner, 'Suite', 'Test2.html')
self._add_file(runner, 'Suite', 'SkippedTest1.html')
self._add_file(runner, 'Suite', 'SkippedTest2.html')
port.skipped_perf_tests = lambda: ['Suite/SkippedTest1.html', 'Suite/SkippedTest1.html', 'SkippedSuite']
self.assertItemsEqual(self._collect_tests_and_sort_test_name(runner),
['SkippedSuite/Test1.html', 'Suite/SkippedTest1.html', 'Suite/Test1.html'])
def test_collect_tests_with_ignored_skipped_list(self):
runner, port = self.create_runner(args=['--force'])
self._add_file(runner, 'inspector', 'test1.html')
self._add_file(runner, 'inspector', 'unsupported_test1.html')
self._add_file(runner, 'inspector', 'test2.html')
self._add_file(runner, 'inspector/resources', 'resource_file.html')
self._add_file(runner, 'unsupported', 'unsupported_test2.html')
port.skipped_perf_tests = lambda: ['inspector/unsupported_test1.html', 'unsupported']
self.assertItemsEqual(
self._collect_tests_and_sort_test_name(runner),
[
'inspector/test1.html',
'inspector/test2.html',
'inspector/unsupported_test1.html',
'unsupported/unsupported_test2.html'
])
def test_default_args(self):
options, _ = PerfTestsRunner._parse_args([])
self.assertTrue(options.build)
self.assertEqual(options.time_out_ms, 600 * 1000)
self.assertTrue(options.generate_results)
self.assertTrue(options.show_results)
self.assertTrue(options.use_skipped_list)
self.assertEqual(options.repeat, 1)
self.assertEqual(options.test_runner_count, DEFAULT_TEST_RUNNER_COUNT)
def test_parse_args(self):
options, _ = PerfTestsRunner._parse_args([
'--build-directory=folder42',
'--platform=platform42',
'--builder-name', 'webkit-mac-1',
'--build-number=56',
'--time-out-ms=42',
'--no-show-results',
'--reset-results',
'--output-json-path=a/output.json',
'--slave-config-json-path=a/source.json',
'--test-results-server=somehost',
'--additional-driver-flag=--enable-threaded-parser',
'--additional-driver-flag=--awesomesauce',
'--repeat=5',
'--test-runner-count=5',
'--debug'
])
self.assertTrue(options.build)
self.assertEqual(options.build_directory, 'folder42')
self.assertEqual(options.platform, 'platform42')
self.assertEqual(options.builder_name, 'webkit-mac-1')
self.assertEqual(options.build_number, '56')
self.assertEqual(options.time_out_ms, '42')
self.assertEqual(options.configuration, 'Debug')
self.assertFalse(options.show_results)
self.assertTrue(options.reset_results)
self.assertEqual(options.output_json_path, 'a/output.json')
self.assertEqual(options.slave_config_json_path, 'a/source.json')
self.assertEqual(options.test_results_server, 'somehost')
self.assertEqual(options.additional_driver_flag, ['--enable-threaded-parser', '--awesomesauce'])
self.assertEqual(options.repeat, 5)
self.assertEqual(options.test_runner_count, 5)
def test_upload_json(self):
runner, port = self.create_runner()
port.host.filesystem.files['/mock-checkout/some.json'] = 'some content'
class MockFileUploader:
called = []
upload_single_text_file_throws = False
upload_single_text_file_return_value = None
@classmethod
def reset(cls):
cls.called = []
cls.upload_single_text_file_throws = False
cls.upload_single_text_file_return_value = None
def __init__(mock, url, timeout):
self.assertEqual(url, 'https://some.host/some/path')
self.assertTrue(isinstance(timeout, int) and timeout)
mock.called.append('FileUploader')
def upload_single_text_file(mock, filesystem, content_type, filename):
self.assertEqual(filesystem, port.host.filesystem)
self.assertEqual(content_type, 'application/json')
self.assertEqual(filename, 'some.json')
mock.called.append('upload_single_text_file')
if mock.upload_single_text_file_throws:
raise Exception
return mock.upload_single_text_file_return_value
MockFileUploader.upload_single_text_file_return_value = StringIO.StringIO('OK')
self.assertTrue(runner._upload_json('some.host', 'some.json', '/some/path', MockFileUploader))
self.assertEqual(MockFileUploader.called, ['FileUploader', 'upload_single_text_file'])
MockFileUploader.reset()
MockFileUploader.upload_single_text_file_return_value = StringIO.StringIO('Some error')
output = OutputCapture()
output.capture_output()
self.assertFalse(runner._upload_json('some.host', 'some.json', '/some/path', MockFileUploader))
_, _, logs = output.restore_output()
self.assertEqual(logs, 'Uploaded JSON to https://some.host/some/path but got a bad response:\nSome error\n')
# Throwing an exception upload_single_text_file shouldn't blow up _upload_json
MockFileUploader.reset()
MockFileUploader.upload_single_text_file_throws = True
self.assertFalse(runner._upload_json('some.host', 'some.json', '/some/path', MockFileUploader))
self.assertEqual(MockFileUploader.called, ['FileUploader', 'upload_single_text_file'])
MockFileUploader.reset()
MockFileUploader.upload_single_text_file_return_value = StringIO.StringIO('{"status": "OK"}')
self.assertTrue(runner._upload_json('some.host', 'some.json', '/some/path', MockFileUploader))
self.assertEqual(MockFileUploader.called, ['FileUploader', 'upload_single_text_file'])
MockFileUploader.reset()
MockFileUploader.upload_single_text_file_return_value = StringIO.StringIO(
'{"status": "SomethingHasFailed", "failureStored": false}')
output = OutputCapture()
output.capture_output()
self.assertFalse(runner._upload_json('some.host', 'some.json', '/some/path', MockFileUploader))
_, _, logs = output.restore_output()
serialized_json = json.dumps({'status': 'SomethingHasFailed', 'failureStored': False}, indent=4)
self.assertEqual(logs, 'Uploaded JSON to https://some.host/some/path but got an error:\n%s\n' % serialized_json)
class InspectorPassTestData:
text = 'RESULT group_name: test_name= 42 ms'
output = """Running inspector/pass.html (2 of 2)
RESULT group_name: test_name= 42 ms
Finished: 0.1 s
"""
class EventTargetWrapperTestData:
text = """Running 20 times
Ignoring warm-up run (1502)
1504
1505
1510
1504
1507
1509
1510
1487
1488
1472
1472
1488
1473
1472
1475
1487
1486
1486
1475
1471
Time:
values 1486, 1471, 1510, 1505, 1478, 1490 ms
avg 1490 ms
median 1488 ms
stdev 15.13935 ms
min 1471 ms
max 1510 ms
"""
output = """Running Bindings/event-target-wrapper.html (1 of 2)
RESULT Bindings: event-target-wrapper: Time= 1490.0 ms
median= 1488.0 ms, stdev= 14.11751 ms, min= 1471.0 ms, max= 1510.0 ms
Finished: 0.1 s
"""
results = {
'url': ('https://chromium.googlesource.com/chromium/src/+/master/third_party/WebKit'
'/PerformanceTests/Bindings/event-target-wrapper.html'),
'metrics': {'Time': {'current': [[1486.0, 1471.0, 1510.0, 1505.0, 1478.0, 1490.0]] * 4}}
}
class SomeParserTestData:
text = """Running 20 times
Ignoring warm-up run (1115)
Time:
values 1080, 1120, 1095, 1101, 1104 ms
avg 1100 ms
median 1101 ms
stdev 14.50861 ms
min 1080 ms
max 1120 ms
"""
output = """Running Parser/some-parser.html (2 of 2)
RESULT Parser: some-parser: Time= 1100.0 ms
median= 1101.0 ms, stdev= 13.31402 ms, min= 1080.0 ms, max= 1120.0 ms
Finished: 0.1 s
"""
class MemoryTestData:
text = """Running 20 times
Ignoring warm-up run (1115)
Time:
values 1080, 1120, 1095, 1101, 1104 ms
avg 1100 ms
median 1101 ms
stdev 14.50861 ms
min 1080 ms
max 1120 ms
JS Heap:
values 825000, 811000, 848000, 837000, 829000 bytes
avg 830000 bytes
median 829000 bytes
stdev 13784.04875 bytes
min 811000 bytes
max 848000 bytes
Malloc:
values 529000, 511000, 548000, 536000, 521000 bytes
avg 529000 bytes
median 529000 bytes
stdev 14124.44689 bytes
min 511000 bytes
max 548000 bytes
"""
output = """Running 1 tests
Running Parser/memory-test.html (1 of 1)
RESULT Parser: memory-test: Time= 1100.0 ms
median= 1101.0 ms, stdev= 13.31402 ms, min= 1080.0 ms, max= 1120.0 ms
RESULT Parser: memory-test: JSHeap= 830000.0 bytes
median= 829000.0 bytes, stdev= 12649.11064 bytes, min= 811000.0 bytes, max= 848000.0 bytes
RESULT Parser: memory-test: Malloc= 529000.0 bytes
median= 529000.0 bytes, stdev= 12961.48139 bytes, min= 511000.0 bytes, max= 548000.0 bytes
Finished: 0.1 s
"""
results = {'current': [[1080, 1120, 1095, 1101, 1104]] * 4}
js_heap_results = {'current': [[825000, 811000, 848000, 837000, 829000]] * 4}
malloc_results = {'current': [[529000, 511000, 548000, 536000, 521000]] * 4}
class TestDriver:
def run_test(self, driver_input, stop_when_done):
text = ''
timeout = False
crash = False
if driver_input.test_name.endswith('pass.html'):
text = InspectorPassTestData.text
elif driver_input.test_name.endswith('timeout.html'):
timeout = True
elif driver_input.test_name.endswith('failed.html'):
text = None
elif driver_input.test_name.endswith('tonguey.html'):
text = 'we are not expecting an output from perf tests but RESULT blablabla'
elif driver_input.test_name.endswith('crash.html'):
crash = True
elif driver_input.test_name.endswith('event-target-wrapper.html'):
text = EventTargetWrapperTestData.text
elif driver_input.test_name.endswith('some-parser.html'):
text = SomeParserTestData.text
elif driver_input.test_name.endswith('memory-test.html'):
text = MemoryTestData.text
return DriverOutput(text, '', '', '', crash=crash, timeout=timeout)
def start(self):
"""do nothing"""
def stop(self):
"""do nothing"""
class IntegrationTest(unittest.TestCase):
def _normalize_output(self, log):
return re.sub(r'(stdev=\s+\d+\.\d{5})\d+', r'\1', re.sub(r'Finished: [0-9\.]+ s', 'Finished: 0.1 s', log))
def _load_output_json(self, runner):
json_content = runner._host.filesystem.read_text_file(runner._output_json_path())
return json.loads(re.sub(r'("stdev":\s*\d+\.\d{5})\d+', r'\1', json_content))
def create_runner(self, args=None, driver_class=TestDriver):
args = args or []
options, _ = PerfTestsRunner._parse_args(args)
test_port = TestPort(host=MockHost(), options=options)
test_port.create_driver = lambda worker_number=None, no_timeout=False: driver_class()
runner = PerfTestsRunner(args=args, port=test_port)
runner._host.filesystem.maybe_make_directory(runner._base_path, 'inspector')
runner._host.filesystem.maybe_make_directory(runner._base_path, 'Bindings')
runner._host.filesystem.maybe_make_directory(runner._base_path, 'Parser')
return runner, test_port
def run_test(self, test_name):
runner, port = self.create_runner()
tests = [ChromiumStylePerfTest(port, test_name, runner._host.filesystem.join('some-dir', test_name))]
return runner._run_tests_set(tests) == 0
def test_run_passing_test(self):
self.assertTrue(self.run_test('pass.html'))
def test_run_silent_test(self):
self.assertFalse(self.run_test('silent.html'))
def test_run_failed_test(self):
self.assertFalse(self.run_test('failed.html'))
def test_run_tonguey_test(self):
self.assertFalse(self.run_test('tonguey.html'))
def test_run_timeout_test(self):
self.assertFalse(self.run_test('timeout.html'))
def test_run_crash_test(self):
self.assertFalse(self.run_test('crash.html'))
def _tests_for_runner(self, runner, test_names):
filesystem = runner._host.filesystem
tests = []
for test in test_names:
path = filesystem.join(runner._base_path, test)
if test.startswith('inspector/'):
tests.append(ChromiumStylePerfTest(runner._port, test, path))
else:
tests.append(PerfTest(runner._port, test, path))
return tests
def test_run_test_set(self):
runner, _ = self.create_runner()
tests = self._tests_for_runner(runner, ['inspector/pass.html', 'inspector/silent.html', 'inspector/failed.html',
'inspector/tonguey.html', 'inspector/timeout.html', 'inspector/crash.html'])
output = OutputCapture()
output.capture_output()
try:
unexpected_result_count = runner._run_tests_set(tests)
finally:
_, _, log = output.restore_output()
self.assertEqual(unexpected_result_count, len(tests) - 1)
self.assertTrue('\nRESULT group_name: test_name= 42 ms\n' in log)
def test_run_test_set_kills_drt_per_run(self):
class TestDriverWithStopCount(TestDriver):
stop_count = 0
def stop(self):
TestDriverWithStopCount.stop_count += 1
runner, _ = self.create_runner(driver_class=TestDriverWithStopCount)
tests = self._tests_for_runner(runner, ['inspector/pass.html', 'inspector/silent.html', 'inspector/failed.html',
'inspector/tonguey.html', 'inspector/timeout.html', 'inspector/crash.html'])
runner._run_tests_set(tests)
self.assertEqual(TestDriverWithStopCount.stop_count, 6)
def test_run_test_set_for_parser_tests(self):
runner, _ = self.create_runner()
tests = self._tests_for_runner(runner, ['Bindings/event-target-wrapper.html', 'Parser/some-parser.html'])
output = OutputCapture()
output.capture_output()
try:
unexpected_result_count = runner._run_tests_set(tests)
finally:
_, _, log = output.restore_output()
self.assertEqual(unexpected_result_count, 0)
self.assertEqual(self._normalize_output(log), EventTargetWrapperTestData.output + SomeParserTestData.output)
def test_run_memory_test(self):
runner, port = self.create_runner_and_setup_results_template()
runner._timestamp = 123456789
port.host.filesystem.write_text_file(runner._base_path + '/Parser/memory-test.html', 'some content')
output = OutputCapture()
output.capture_output()
try:
unexpected_result_count = runner.run()
finally:
_, _, log = output.restore_output()
self.assertEqual(unexpected_result_count, 0)
self.assertEqual(self._normalize_output(log), MemoryTestData.output + '\nMOCK: user.open_url: file://...\n')
parser_tests = self._load_output_json(runner)[0]['tests']['Parser']['tests']
self.assertEqual(parser_tests['memory-test']['metrics']['Time'], MemoryTestData.results)
self.assertEqual(parser_tests['memory-test']['metrics']['JSHeap'], MemoryTestData.js_heap_results)
self.assertEqual(parser_tests['memory-test']['metrics']['Malloc'], MemoryTestData.malloc_results)
def _test_run_with_json_output(
self, runner, filesystem, upload_succeeds=False, results_shown=True, expected_exit_code=0, repeat=1, compare_logs=True):
filesystem.write_text_file(runner._base_path + '/inspector/pass.html', 'some content')
filesystem.write_text_file(runner._base_path + '/Bindings/event-target-wrapper.html', 'some content')
uploaded = [False]
def mock_upload_json(hostname, json_path, host_path=None):
# FIXME: Get rid of the hard-coded perf.webkit.org once we've completed the transition.
self.assertIn(hostname, ['some.host'])
self.assertIn(json_path, ['/mock-checkout/output.json'])
self.assertIn(host_path, [None, '/api/report'])
uploaded[0] = upload_succeeds
return upload_succeeds
runner._upload_json = mock_upload_json
runner._timestamp = 123456789
runner._utc_timestamp = datetime.datetime(2013, 2, 8, 15, 19, 37, 460000)
output_capture = OutputCapture()
output_capture.capture_output()
try:
self.assertEqual(runner.run(), expected_exit_code)
finally:
_, _, logs = output_capture.restore_output()
if not expected_exit_code and compare_logs:
expected_logs = ''
for i in xrange(repeat):
runs = ' (Run %d of %d)' % (i + 1, repeat) if repeat > 1 else ''
expected_logs += 'Running 2 tests%s\n' % runs + EventTargetWrapperTestData.output + InspectorPassTestData.output
if results_shown:
expected_logs += 'MOCK: user.open_url: file://...\n'
self.assertEqual(self._normalize_output(logs), expected_logs)
self.assertEqual(uploaded[0], upload_succeeds)
return logs
_event_target_wrapper_and_inspector_results = {
'Bindings': {
'url': 'https://chromium.googlesource.com/chromium/src/+/master/third_party/WebKit/PerformanceTests/Bindings',
'tests': {'event-target-wrapper': EventTargetWrapperTestData.results}
}
}
def test_run_with_json_output(self):
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
'--test-results-server=some.host'])
self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
self.assertEqual(self._load_output_json(runner), [{
'buildTime': '2013-02-08T15:19:37.460000', 'tests': self._event_target_wrapper_and_inspector_results,
'revisions': {'chromium': {'timestamp': '2013-02-01 08:48:05 +0000', 'revision': '5678'}}}])
filesystem = port.host.filesystem
self.assertTrue(filesystem.isfile(runner._output_json_path()))
self.assertTrue(filesystem.isfile(filesystem.splitext(runner._output_json_path())[0] + '.html'))
def test_run_with_description(self):
runner, port = self.create_runner_and_setup_results_template(
args=['--output-json-path=/mock-checkout/output.json',
'--test-results-server=some.host', '--description', 'some description'])
self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
self.assertEqual(self._load_output_json(runner), [{
'buildTime': '2013-02-08T15:19:37.460000', 'description': 'some description',
'tests': self._event_target_wrapper_and_inspector_results,
'revisions': {'chromium': {'timestamp': '2013-02-01 08:48:05 +0000', 'revision': '5678'}}}])
def create_runner_and_setup_results_template(self, args=None):
args = args or []
runner, port = self.create_runner(args)
filesystem = port.host.filesystem
filesystem.write_text_file(
runner._base_path + '/resources/results-template.html', # pylint: disable=protected-access
('BEGIN<script src="%AbsolutePathToWebKitTrunk%/some.js"></script>'
'<script src="%AbsolutePathToWebKitTrunk%/other.js"></script><script>%PeformanceTestsResultsJSON%</script>END'))
filesystem.write_text_file(runner._base_path + '/Dromaeo/resources/dromaeo/web/lib/jquery-1.6.4.js', 'jquery content')
return runner, port
def test_run_respects_no_results(self):
runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
'--test-results-server=some.host', '--no-results'])
self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=False, results_shown=False)
self.assertFalse(port.host.filesystem.isfile('/mock-checkout/output.json'))
def test_run_generates_json_by_default(self):
runner, port = self.create_runner_and_setup_results_template()
filesystem = port.host.filesystem
output_json_path = runner._output_json_path()
results_page_path = filesystem.splitext(output_json_path)[0] + '.html'
self.assertFalse(filesystem.isfile(output_json_path))
self.assertFalse(filesystem.isfile(results_page_path))
self._test_run_with_json_output(runner, port.host.filesystem)
self.assertEqual(
self._load_output_json(runner),
[
{
'buildTime': '2013-02-08T15:19:37.460000',
'tests': self._event_target_wrapper_and_inspector_results,
'revisions': {'chromium': {'timestamp': '2013-02-01 08:48:05 +0000', 'revision': '5678'}}
}
])
self.assertTrue(filesystem.isfile(output_json_path))
self.assertTrue(filesystem.isfile(results_page_path))
def test_run_merges_output_by_default(self):
runner, port = self.create_runner_and_setup_results_template()
filesystem = port.host.filesystem
output_json_path = runner._output_json_path()
filesystem.write_text_file(output_json_path, '[{"previous": "results"}]')
self._test_run_with_json_output(runner, port.host.filesystem)
self.assertEqual(self._load_output_json(runner), [{'previous': 'results'}, {
'buildTime': '2013-02-08T15:19:37.460000', 'tests': self._event_target_wrapper_and_inspector_results,
'revisions': {'chromium': {'timestamp': '2013-02-01 08:48:05 +0000', 'revision': '5678'}}}])
self.assertTrue(filesystem.isfile(filesystem.splitext(output_json_path)[0] + '.html'))
def test_run_respects_reset_results(self):
runner, port = self.create_runner_and_setup_results_template(args=['--reset-results'])
filesystem = port.host.filesystem
output_json_path = runner._output_json_path()
filesystem.write_text_file(output_json_path, '[{"previous": "results"}]')
self._test_run_with_json_output(runner, port.host.filesystem)
self.assertEqual(self._load_output_json(runner), [{
'buildTime': '2013-02-08T15:19:37.460000', 'tests': self._event_target_wrapper_and_inspector_results,
'revisions': {'chromium': {'timestamp': '2013-02-01 08:48:05 +0000', 'revision': '5678'}}}])
self.assertTrue(filesystem.isfile(filesystem.splitext(output_json_path)[0] + '.html'))
def test_run_generates_and_show_results_page(self):
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json'])
page_shown = []
port.show_results_html_file = lambda path: page_shown.append(path)
filesystem = port.host.filesystem
self._test_run_with_json_output(runner, filesystem, results_shown=False)
expected_entry = {'buildTime': '2013-02-08T15:19:37.460000',
'tests': self._event_target_wrapper_and_inspector_results,
'revisions': {'chromium': {'timestamp': '2013-02-01 08:48:05 +0000', 'revision': '5678'}}}
self.maxDiff = None
self.assertEqual(runner._output_json_path(), '/mock-checkout/output.json')
self.assertEqual(self._load_output_json(runner), [expected_entry])
self.assertEqual(filesystem.read_text_file('/mock-checkout/output.html'),
'BEGIN<script src="/test.checkout/some.js"></script><script src="/test.checkout/other.js"></script>'
'<script>%s</script>END' % port.host.filesystem.read_text_file(runner._output_json_path()))
self.assertEqual(page_shown[0], '/mock-checkout/output.html')
self._test_run_with_json_output(runner, filesystem, results_shown=False)
self.assertEqual(runner._output_json_path(), '/mock-checkout/output.json')
self.assertEqual(self._load_output_json(runner), [expected_entry, expected_entry])
self.assertEqual(filesystem.read_text_file('/mock-checkout/output.html'),
'BEGIN<script src="/test.checkout/some.js"></script><script src="/test.checkout/other.js"></script>'
'<script>%s</script>END' % port.host.filesystem.read_text_file(runner._output_json_path()))
def test_run_respects_no_show_results(self):
show_results_html_file = lambda path: page_shown.append(path)
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json'])
page_shown = []
port.show_results_html_file = show_results_html_file
self._test_run_with_json_output(runner, port.host.filesystem, results_shown=False)
self.assertEqual(page_shown[0], '/mock-checkout/output.html')
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
'--no-show-results'])
page_shown = []
port.show_results_html_file = show_results_html_file
self._test_run_with_json_output(runner, port.host.filesystem, results_shown=False)
self.assertEqual(page_shown, [])
def test_run_with_bad_output_json(self):
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json'])
port.host.filesystem.write_text_file('/mock-checkout/output.json', 'bad json')
self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_MERGE)
port.host.filesystem.write_text_file('/mock-checkout/output.json', '{"another bad json": "1"}')
self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_MERGE)
def test_run_with_slave_config_json(self):
runner, port = self.create_runner_and_setup_results_template(
args=['--output-json-path=/mock-checkout/output.json',
'--slave-config-json-path=/mock-checkout/slave-config.json',
'--test-results-server=some.host'])
port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', '{"key": "value"}')
self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
self.assertEqual(self._load_output_json(runner), [{
'buildTime': '2013-02-08T15:19:37.460000', 'tests': self._event_target_wrapper_and_inspector_results,
'revisions': {'chromium': {'timestamp': '2013-02-01 08:48:05 +0000', 'revision': '5678'}}, 'builderKey': 'value'}])
def test_run_with_bad_slave_config_json(self):
runner, port = self.create_runner_and_setup_results_template(
args=['--output-json-path=/mock-checkout/output.json',
'--slave-config-json-path=/mock-checkout/slave-config.json',
'--test-results-server=some.host'])
logs = self._test_run_with_json_output(runner, port.host.filesystem,
expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
self.assertTrue('Missing slave configuration JSON file: /mock-checkout/slave-config.json' in logs)
port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', 'bad json')
self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', '["another bad json"]')
self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
def test_run_with_multiple_repositories(self):
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
'--test-results-server=some.host'])
port.repository_path = lambda: '/mock-checkout'
self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
self.assertEqual(self._load_output_json(runner), [{
'buildTime': '2013-02-08T15:19:37.460000', 'tests': self._event_target_wrapper_and_inspector_results,
'revisions': {'chromium': {'timestamp': '2013-02-01 08:48:05 +0000', 'revision': '5678'}}}])
def test_run_with_upload_json(self):
runner, port = self.create_runner_and_setup_results_template(args=[
'--output-json-path=/mock-checkout/output.json',
'--test-results-server', 'some.host',
'--platform', 'platform1',
'--builder-name', 'builder1',
'--build-number', '123'
])
self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
generated_json = json.loads(port.host.filesystem.files['/mock-checkout/output.json'])
self.assertEqual(generated_json[0]['platform'], 'platform1')
self.assertEqual(generated_json[0]['builderName'], 'builder1')
self.assertEqual(generated_json[0]['buildNumber'], 123)
self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=False,
expected_exit_code=PerfTestsRunner.EXIT_CODE_FAILED_UPLOADING)
def test_run_with_upload_json_should_generate_perf_webkit_json(self):
runner, port = self.create_runner_and_setup_results_template(args=[
'--output-json-path=/mock-checkout/output.json',
'--test-results-server', 'some.host',
'--platform', 'platform1',
'--builder-name', 'builder1',
'--build-number', '123',
'--slave-config-json-path=/mock-checkout/slave-config.json'
])
port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', '{"key": "value1"}')
self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
generated_json = json.loads(port.host.filesystem.files['/mock-checkout/output.json'])
self.assertTrue(isinstance(generated_json, list))
self.assertEqual(len(generated_json), 1)
output = generated_json[0]
self.maxDiff = None
self.assertEqual(output['platform'], 'platform1')
self.assertEqual(output['buildNumber'], 123)
self.assertEqual(output['buildTime'], '2013-02-08T15:19:37.460000')
self.assertEqual(output['builderName'], 'builder1')
self.assertEqual(output['builderKey'], 'value1')
self.assertEqual(output['revisions'], {'chromium': {'revision': '5678', 'timestamp': '2013-02-01 08:48:05 +0000'}})
self.assertEqual(output['tests'].keys(), ['Bindings'])
self.assertEqual(sorted(output['tests']['Bindings'].keys()), ['tests', 'url'])
self.assertEqual(output['tests']['Bindings']['url'],
'https://chromium.googlesource.com/chromium/src/+/master/third_party/WebKit/PerformanceTests/Bindings')
self.assertEqual(output['tests']['Bindings']['tests'].keys(), ['event-target-wrapper'])
self.assertEqual(output['tests']['Bindings']['tests']['event-target-wrapper'], {
'url': ('https://chromium.googlesource.com/chromium/src/+/master/third_party/WebKit'
'/PerformanceTests/Bindings/event-target-wrapper.html'),
'metrics': {'Time': {'current': [[1486.0, 1471.0, 1510.0, 1505.0, 1478.0, 1490.0]] * 4}}
})
def test_run_with_repeat(self):
self.maxDiff = None
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
'--test-results-server=some.host', '--repeat', '5'])
self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True, repeat=5)
self.assertEqual(self._load_output_json(runner), [
{'buildTime': '2013-02-08T15:19:37.460000',
'tests': self._event_target_wrapper_and_inspector_results,
'revisions': {'chromium': {'timestamp': '2013-02-01 08:48:05 +0000', 'revision': '5678'}}},
{'buildTime': '2013-02-08T15:19:37.460000',
'tests': self._event_target_wrapper_and_inspector_results,
'revisions': {'chromium': {'timestamp': '2013-02-01 08:48:05 +0000', 'revision': '5678'}}},
{'buildTime': '2013-02-08T15:19:37.460000',
'tests': self._event_target_wrapper_and_inspector_results,
'revisions': {'chromium': {'timestamp': '2013-02-01 08:48:05 +0000', 'revision': '5678'}}},
{'buildTime': '2013-02-08T15:19:37.460000',
'tests': self._event_target_wrapper_and_inspector_results,
'revisions': {'chromium': {'timestamp': '2013-02-01 08:48:05 +0000', 'revision': '5678'}}},
{'buildTime': '2013-02-08T15:19:37.460000',
'tests': self._event_target_wrapper_and_inspector_results,
'revisions': {'chromium': {'timestamp': '2013-02-01 08:48:05 +0000', 'revision': '5678'}}}])
def test_run_with_test_runner_count(self):
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
'--test-runner-count=3'])
self._test_run_with_json_output(runner, port.host.filesystem, compare_logs=False)
generated_json = json.loads(port.host.filesystem.files['/mock-checkout/output.json'])
self.assertTrue(isinstance(generated_json, list))
self.assertEqual(len(generated_json), 1)
output = generated_json[0]['tests']['Bindings']['tests']['event-target-wrapper']['metrics']['Time']['current']
self.assertEqual(len(output), 3)
expectedMetrics = EventTargetWrapperTestData.results['metrics']['Time']['current'][0]
for metrics in output:
self.assertEqual(metrics, expectedMetrics)
| |
from django.core.cache.backends.base import BaseCache, InvalidCacheBackendError
from django.core.exceptions import ImproperlyConfigured
try:
import redis
except ImportError:
raise InvalidCacheBackendError(
"Redis cache backend requires the 'redis-py' library"
)
from redis.connection import DefaultParser
from redis_cache.compat import DEFAULT_TIMEOUT
from redis_cache.connection import pool
from redis_cache.utils import (
CacheKey, get_servers, parse_connection_kwargs, import_class
)
from functools import wraps
def get_client(write=False):
def wrapper(method):
@wraps(method)
def wrapped(self, key, *args, **kwargs):
version = kwargs.pop('version', None)
client = self.get_client(key, write=write)
key = self.make_key(key, version=version)
return method(self, client, key, *args, **kwargs)
return wrapped
return wrapper
class BaseRedisCache(BaseCache):
def __init__(self, server, params):
"""
Connect to Redis, and set up cache backend.
"""
super(BaseRedisCache, self).__init__(params)
self.server = server
self.servers = get_servers(server)
self.params = params or {}
self.options = params.get('OPTIONS', {})
self.clients = {}
self.client_list = []
self.db = self.get_db()
self.password = self.get_password()
self.parser_class = self.get_parser_class()
self.pickle_version = self.get_pickle_version()
self.socket_timeout = self.get_socket_timeout()
self.socket_connect_timeout = self.get_socket_connect_timeout()
self.connection_pool_class = self.get_connection_pool_class()
self.connection_pool_class_kwargs = (
self.get_connection_pool_class_kwargs()
)
# Serializer
self.serializer_class = self.get_serializer_class()
self.serializer_class_kwargs = self.get_serializer_class_kwargs()
self.serializer = self.serializer_class(
**self.serializer_class_kwargs
)
# Compressor
self.compressor_class = self.get_compressor_class()
self.compressor_class_kwargs = self.get_compressor_class_kwargs()
self.compressor = self.compressor_class(
**self.compressor_class_kwargs
)
def __getstate__(self):
return {'params': self.params, 'server': self.server}
def __setstate__(self, state):
self.__init__(**state)
def get_db(self):
_db = self.params.get('db', self.options.get('DB', 1))
try:
return int(_db)
except (ValueError, TypeError):
raise ImproperlyConfigured("db value must be an integer")
def get_password(self):
return self.params.get('password', self.options.get('PASSWORD', None))
def get_parser_class(self):
parser_class = self.options.get('PARSER_CLASS', None)
if parser_class is None:
return DefaultParser
return import_class(parser_class)
def get_pickle_version(self):
"""
Get the pickle version from the settings and save it for future use
"""
_pickle_version = self.options.get('PICKLE_VERSION', -1)
try:
return int(_pickle_version)
except (ValueError, TypeError):
raise ImproperlyConfigured(
"pickle version value must be an integer"
)
def get_socket_timeout(self):
return self.options.get('SOCKET_TIMEOUT', None)
def get_socket_connect_timeout(self):
return self.options.get('SOCKET_CONNECT_TIMEOUT', None)
def get_connection_pool_class(self):
pool_class = self.options.get(
'CONNECTION_POOL_CLASS',
'redis.ConnectionPool'
)
return import_class(pool_class)
def get_connection_pool_class_kwargs(self):
return self.options.get('CONNECTION_POOL_CLASS_KWARGS', {})
def get_serializer_class(self):
serializer_class = self.options.get(
'SERIALIZER_CLASS',
'redis_cache.serializers.PickleSerializer'
)
return import_class(serializer_class)
def get_serializer_class_kwargs(self):
return self.options.get('SERIALIZER_CLASS_KWARGS', {})
def get_compressor_class(self):
compressor_class = self.options.get(
'COMPRESSOR_CLASS',
'redis_cache.compressors.NoopCompressor'
)
return import_class(compressor_class)
def get_compressor_class_kwargs(self):
return self.options.get('COMPRESSOR_CLASS_KWARGS', {})
def get_master_client(self):
"""
Get the write server:port of the master cache
"""
cache = self.options.get('MASTER_CACHE', None)
if cache is None:
return next(iter(self.client_list))
kwargs = parse_connection_kwargs(cache, db=self.db)
return self.clients[(
kwargs['host'],
kwargs['port'],
kwargs['db'],
kwargs['unix_socket_path'],
)]
def create_client(self, server):
kwargs = parse_connection_kwargs(
server,
db=self.db,
password=self.password,
socket_timeout=self.socket_timeout,
socket_connect_timeout=self.socket_connect_timeout,
)
client = redis.Redis(**kwargs)
kwargs.update(
parser_class=self.parser_class,
connection_pool_class=self.connection_pool_class,
connection_pool_class_kwargs=self.connection_pool_class_kwargs,
)
connection_pool = pool.get_connection_pool(client, **kwargs)
client.connection_pool = connection_pool
return client
def serialize(self, value):
return self.serializer.serialize(value)
def deserialize(self, value):
return self.serializer.deserialize(value)
def compress(self, value):
return self.compressor.compress(value)
def decompress(self, value):
return self.compressor.decompress(value)
def get_value(self, original):
try:
value = int(original)
except (ValueError, TypeError):
value = self.decompress(original)
value = self.deserialize(value)
return value
def prep_value(self, value):
if isinstance(value, int) and not isinstance(value, bool):
return value
value = self.serialize(value)
return self.compress(value)
def make_key(self, key, version=None):
if not isinstance(key, CacheKey):
versioned_key = super(BaseRedisCache, self).make_key(key, version)
return CacheKey(key, versioned_key)
return key
def make_keys(self, keys, version=None):
return [self.make_key(key, version=version) for key in keys]
def get_timeout(self, timeout):
if timeout is DEFAULT_TIMEOUT:
timeout = self.default_timeout
if timeout is not None:
timeout = int(timeout)
return timeout
####################
# Django cache api #
####################
@get_client(write=True)
def add(self, client, key, value, timeout=DEFAULT_TIMEOUT):
"""Add a value to the cache, failing if the key already exists.
Returns ``True`` if the object was added, ``False`` if not.
"""
timeout = self.get_timeout(timeout)
return self._set(client, key, self.prep_value(value), timeout, _add_only=True)
@get_client()
def get(self, client, key, default=None):
"""Retrieve a value from the cache.
Returns deserialized value if key is found, the default if not.
"""
value = client.get(key)
if value is None:
return default
value = self.get_value(value)
return value
def _set(self, client, key, value, timeout, _add_only=False):
if timeout is None or timeout == 0:
if _add_only:
return client.setnx(key, value)
return client.set(key, value)
elif timeout > 0:
if _add_only:
added = client.setnx(key, value)
if added:
client.expire(key, timeout)
return added
return client.setex(key, value, timeout)
else:
return False
@get_client(write=True)
def set(self, client, key, value, timeout=DEFAULT_TIMEOUT):
"""Persist a value to the cache, and set an optional expiration time.
"""
timeout = self.get_timeout(timeout)
result = self._set(client, key, self.prep_value(value), timeout, _add_only=False)
return result
@get_client(write=True)
def delete(self, client, key):
"""Remove a key from the cache."""
return client.delete(key)
def _delete_many(self, client, keys):
return client.delete(*keys)
def delete_many(self, keys, version=None):
"""
Remove multiple keys at once.
"""
raise NotImplementedError
def _clear(self, client):
return client.flushdb()
def clear(self, version=None):
"""Flush cache keys.
If version is specified, all keys belonging the version's key
namespace will be deleted. Otherwise, all keys will be deleted.
"""
raise NotImplementedError
def _get_many(self, client, original_keys, versioned_keys):
recovered_data = {}
map_keys = dict(zip(versioned_keys, original_keys))
results = client.mget(versioned_keys)
for key, value in zip(versioned_keys, results):
if value is None:
continue
recovered_data[map_keys[key]] = self.get_value(value)
return recovered_data
def get_many(self, keys, version=None):
"""Retrieve many keys."""
raise NotImplementedError
def _set_many(self, client, data):
return client.mset(data)
def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None):
"""Set a bunch of values in the cache at once from a dict of key/value
pairs. This is much more efficient than calling set() multiple times.
If timeout is given, that timeout will be used for the key; otherwise
the default cache timeout will be used.
"""
raise NotImplementedError
@get_client(write=True)
def incr(self, client, key, delta=1):
"""Add delta to value in the cache. If the key does not exist, raise a
`ValueError` exception.
"""
exists = client.exists(key)
if not exists:
raise ValueError("Key '%s' not found" % key)
try:
value = client.incr(key, delta)
except redis.ResponseError:
key = key._original_key
value = self.get(key) + delta
self.set(key, value, timeout=None)
return value
def _incr_version(self, client, old, new, delta, version):
try:
client.rename(old, new)
except redis.ResponseError:
raise ValueError("Key '%s' not found" % old._original_key)
return version + delta
def incr_version(self, key, delta=1, version=None):
"""Adds delta to the cache version for the supplied key. Returns the
new version.
"""
raise NotImplementedError
#####################
# Extra api methods #
#####################
@get_client()
def has_key(self, client, key):
"""Returns True if the key is in the cache and has not expired."""
return client.exists(key)
@get_client()
def ttl(self, client, key):
"""Returns the 'time-to-live' of a key. If the key is not volitile,
i.e. it has not set expiration, then the value returned is None.
Otherwise, the value is the number of seconds remaining. If the key
does not exist, 0 is returned.
"""
if client.exists(key):
return client.ttl(key)
return 0
def _delete_pattern(self, client, pattern):
keys = client.keys(pattern)
if len(keys):
client.delete(*keys)
def delete_pattern(self, pattern, version=None):
raise NotImplementedError
@get_client(write=True)
def get_or_set(self, client, key, func, timeout=DEFAULT_TIMEOUT):
if not callable(func):
raise Exception("Must pass in a callable")
timeout = self.get_timeout(timeout)
dogpile_lock_key = "_lock" + key._versioned_key
dogpile_lock = client.get(dogpile_lock_key)
if dogpile_lock is None:
self.set(dogpile_lock_key, 0, None)
value = func()
self._set(client, key, self.prep_value(value), None)
self._set(client, dogpile_lock_key, 0, timeout)
else:
value = self.get(key._original_key)
return value
def _reinsert_keys(self, client):
keys = client.keys('*')
for key in keys:
timeout = client.ttl(key)
value = self.get_value(client.get(key))
if timeout is None:
client.set(key, self.prep_value(value))
def reinsert_keys(self):
"""
Reinsert cache entries using the current pickle protocol version.
"""
raise NotImplementedError
@get_client(write=True)
def persist(self, client, key):
"""Remove the timeout on a key.
Equivalent to setting a timeout of None in a set command.
Returns True if successful and False if not.
"""
return client.persist(key)
@get_client(write=True)
def expire(self, client, key, timeout):
"""
Set the expire time on a key
returns True if successful and False if not.
"""
return client.expire(key, timeout)
| |
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
from matplotlib import dates
import os
import pickle
from datetime import datetime
from pprint import pprint
import sys
import math
import traceback
import time
import mysql.connector
data_dir = 'C:/Users/Sarah Hanna/Documents/Data/WHI long term record/size_distrs/'
os.chdir(data_dir)
#database connection
cnx = mysql.connector.connect(user='root', password='Suresh15', host='localhost', database='black_carbon')
cursor = cnx.cursor()
#fileFR = 'AD_corr - size distr - FRlessFT - FR.sdbinpickl'
#fileBB = 'AD_corr - size distr - FRlessFT - BB.sdbinpickl'
#fileCont = 'AD_corr - size distr - FRlessFT - Cont.sdbinpickl'
#fileNPac = 'AD_corr - size distr - FRlessFT - NPac.sdbinpickl'
#fileSPac = 'AD_corr - size distr - FRlessFT - SPac.sdbinpickl'
#fileLRT = 'AD_corr - size distr - FRlessFT - LRT.sdbinpickl'
#fileGBPS = 'AD_corr - size distr - FRlessFT - GBPS.sdbinpickl'
fileFR = 'AD_corr - size distr - FRlessFT - FR.sdbinpickl'
fileBB = 'AD_corr - size distr - FRlessFT - BB.sdbinpickl'
fileCont = 'AD_corr - size distr - FT - Cont.sdbinpickl'
fileNPac = 'AD_corr - size distr - FT - NPac.sdbinpickl'
fileSPac = 'AD_corr - size distr - FT - SPac.sdbinpickl'
fileLRT = 'AD_corr - size distr - FT - LRT.sdbinpickl'
fileGBPS = 'AD_corr - size distr - FT - GBPS.sdbinpickl'
fileallFT = 'AD_corr - size distr - FT - all_FT.sdbinpickl'
#fileFR = 'AD_corr - size distr - FR.sdbinpickl'
#fileBB = 'AD_corr - size distr - BB.sdbinpickl'
#fileCont = 'AD_corr - size distr - Cont.sdbinpickl'
#fileNPac = 'AD_corr - size distr - NPac.sdbinpickl'
#fileSPac = 'AD_corr - size distr - SPac.sdbinpickl'
#fileLRT = 'AD_corr - size distr - LRT.sdbinpickl'
#fileGBPS = 'AD_corr - size distr - GBPS.sdbinpickl'
distributions = {
'FR':[fileFR],
'BB':[fileBB],
'Cont':[fileCont],
'NPac':[fileNPac],
'SPac':[fileSPac],
'LRT':[fileLRT],
'GBPS':[fileGBPS],
'All_FT':[fileallFT],
}
fit_bins = []
for x in range (30,800,5):
fit_bins.append(x+2)
def lognorm(x_vals, A, w, xc):
return A/(np.sqrt(2*math.pi)*w*x_vals)*np.exp(-(np.log(x_vals/xc))**2/(2*w**2))
for distribution, distribution_data in distributions.iteritems():
file_name = distribution_data[0]
with open(file_name, 'r') as f:
size_distribution_file = pickle.load(f)
bins = np.array([row[0] for row in size_distribution_file])
if file_name =='AD_corr - size distr - FT - GBPS.sdbinpickl':
file_c6 = open(file_name, 'r')
c6_data = pickle.load(file_c6)
file_c6.close()
#combine clusters 4 and 6 (S PAc)
if file_name == 'AD_corr - size distr - FT - SPac.sdbinpickl':
i=0
lognorm_masses_l = []
for row in size_distribution_file:
lognorm_mass_c4 = row[1]
lognorm_mass_c6 = c6_data[i][1]
mean_mass = (lognorm_mass_c4+lognorm_mass_c6)/2
lognorm_masses_l.append(mean_mass)
lognorm_masses = np.array(lognorm_masses_l)
i+=1
#other clusters
else:
lognorm_masses = np.array([row[1] for row in size_distribution_file])
lognorm_masses = np.array([row[1] for row in size_distribution_file])
temp = []
for mass in lognorm_masses:
norm_mass = mass/np.max(lognorm_masses)
temp.append(norm_mass)
lognorm_masses_max1 = np.array(temp)
distribution_data.append(bins)
distribution_data.append(lognorm_masses)
distribution_data.append(lognorm_masses_max1)
mass_bins = distribution_data[1]#[2:]
norm_log_masses = distribution_data[2]#[2:]
norm_1_masses = distribution_data[3]
#print mass_bins
try:
popt, pcov = curve_fit(lognorm, mass_bins, norm_log_masses)
perr = np.sqrt(np.diag(pcov)) #from docs: To compute one standard deviation errors on the parameters use perr = np.sqrt(np.diag(pcov))
err_variables = [popt[0]-perr[0], popt[1]-perr[1], popt[2]-perr[2]]
except:
popt = [np.nan,np.nan,np.nan]
err_variables = [np.nan,np.nan,np.nan]
fit_y_vals = []
for bin in fit_bins:
fit_val = lognorm(bin, popt[0], popt[1], popt[2])
fit_y_vals.append(fit_val)
err_fit_y_vals = []
for bin in fit_bins:
err_fit_val = lognorm(bin, err_variables[0], err_variables[1], err_variables[2])
err_fit_y_vals.append(err_fit_val)
distribution_data.append(fit_y_vals)
distribution_data.append(fit_bins)
max_percent_of_distr_measured = sum(norm_1_masses)*100./sum(err_fit_y_vals)
percent_of_distr_measured = sum(norm_1_masses)*100./sum(fit_y_vals)
print distribution, percent_of_distr_measured,max_percent_of_distr_measured, 2*(max_percent_of_distr_measured-percent_of_distr_measured)
cnx.close()
#plotting
fig = plt.figure()
ax1 = fig.add_subplot(111)
data = 2
fit = 4
fit_bins = 5
bins = []
colors = ['k','grey','r','b','g','orange','r']
ticks = [50,60,70,80,100,120,160,200,300,400,600,800]
for distribution, distribution_data in distributions.iteritems():
bins = distribution_data[1]
normed_log_masses = distribution_data[2]
normed_1_masses = distribution_data[3]
fit_masses = distribution_data[4]
#ax1.scatter (distributions['FR'][1] ,distributions['FR'][data], color = colors[0], )
#ax1.plot (distributions['FR'][1] ,distributions['FR'][fit], color = colors[0], label = 'FR')
#ax1.scatter (distributions['BB'][1] ,distributions['BB'][data], color = colors[1], )
#ax1.plot (distributions['BB'][1] ,distributions['BB'][fit], color = colors[1], label = 'BB')
ax1.plot (distributions['LRT'][1] ,distributions['LRT'][data], color = 'orange', marker = 's', linewidth=0,label = 'W. Pacific/Asia')
ax1.plot (distributions['LRT'][5] ,distributions['LRT'][fit], color = colors[5], linewidth = 1.5)
ax1.plot (distributions['SPac'][1] ,distributions['SPac'][data], 'og-', linewidth=0,label = 'S. Pacific')
ax1.plot (distributions['SPac'][5] ,distributions['SPac'][fit], color = colors[4], linewidth = 1.5)
ax1.plot (distributions['NPac'][1] ,distributions['NPac'][data], '<b-', linewidth=0,label = 'N. Pacific')
ax1.plot (distributions['NPac'][5] ,distributions['NPac'][fit], color = colors[3], linewidth = 1.5)
ax1.plot (distributions['Cont'][1] ,distributions['Cont'][data], '>r-', linewidth=0,label = 'N. Canada')
ax1.plot (distributions['Cont'][5] ,distributions['Cont'][fit], color = colors[2], linewidth = 1.5)
ax1.plot (distributions['All_FT'][1] ,distributions['All_FT'][data], 'hk-', linewidth=0,label = 'All nighttime data')
ax1.plot (distributions['All_FT'][5] ,distributions['All_FT'][fit], color = colors[0], linewidth = 1.5)
legend = ax1.legend(loc='upper center', numpoints=1, bbox_to_anchor=(0.5, 1.18), ncol=3)
ax1.set_xscale('log')
ax1.set_xlim(60,400)
ax1.set_ylim(0,1.1)
ax1.set_xlabel('VED (nm)')
ax1.set_ylabel('dM/dlogVED')
ax1.xaxis.set_major_formatter(plt.FormatStrFormatter('%d'))
ax1.xaxis.set_major_locator(plt.FixedLocator(ticks))
#plt.text(0.9,0.9, '(b)',transform=ax1.transAxes)
plt.savefig('FT mass distributions - by cluster -t.png', bbox_inches = 'tight',bbox_extra_artists=(legend,))
plt.show()
sys.exit()
##
fig = plt.figure()
ax1 = fig.add_subplot(111)
data = 2
fit = 4
fit_bins = 5
bins = []
colors = ['k','grey','magenta','grey','g','b','r']
ticks = [50,60,70,80,100,120,160,200,300,400,600,800]
for distribution, distribution_data in distributions.iteritems():
bins = distribution_data[1]
normed_log_masses = distribution_data[2]
normed_1_masses = distribution_data[3]
fit_masses = distribution_data[4]
ax1.scatter (distributions['FR'][1] ,distributions['FR'][data], color = colors[0], )
ax1.plot (distributions['FR'][5] ,distributions['FR'][fit], color = colors[0], label = 'FR')
ax1.scatter (distributions['BB'][1] ,distributions['BB'][data], color = colors[1], )
ax1.plot (distributions['BB'][5] ,distributions['BB'][fit], color = colors[1], linestyle = '--',label = 'BB')
#ax1.scatter (distributions['LRT'][1] ,distributions['LRT'][data], color = colors[5], marker = 'o' , s = 40)
#ax1.plot (distributions['LRT'][5] ,distributions['LRT'][fit], color = colors[5], label = 'LRT', linewidth = 1.5)
#ax1.scatter (distributions['SPac'][1] ,distributions['SPac'][data], color = colors[4], marker = '>' ,)
#ax1.plot (distributions['SPac'][5] ,distributions['SPac'][fit], color = colors[4], label = 'SPac', linewidth = 1.5)
#ax1.scatter (distributions['GBPS'][1] ,distributions['GBPS'][data], color = colors[6], marker = '*' ,)
#ax1.plot (distributions['GBPS'][5] ,distributions['GBPS'][fit], color = colors[6], label = 'GBPS', linewidth = 1.5)
#ax1.scatter (distributions['NPac'][1] ,distributions['NPac'][data], color = colors[3], marker = 's' ,)
#ax1.plot (distributions['NPac'][5] ,distributions['NPac'][fit], color = colors[3], label = 'NPac', linewidth = 1.5)
#ax1.scatter (distributions['Cont'][1] ,distributions['Cont'][data], color = colors[2], marker = '<' ,)
#ax1.plot (distributions['Cont'][5] ,distributions['Cont'][fit], color = colors[2], label = 'Cont', linewidth = 1.5)
plt.legend(numpoints=1)
ax1.set_xscale('log')
ax1.set_xlim(40,750)
ax1.set_ylim(0,130)
ax1.set_xlabel('VED (nm)')
ax1.set_ylabel('dM/dlogVED')
ax1.xaxis.set_major_formatter(plt.FormatStrFormatter('%d'))
ax1.xaxis.set_major_locator(plt.FixedLocator(ticks))
plt.text(0.9,0.9, '(a)',transform=ax1.transAxes)
#plt.savefig('FR and BB mass distributions.png', bbox_inches = 'tight')
plt.show()
| |
# Copyright 2015 The Shaderc Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import expect
from glslc_test_framework import inside_glslc_testsuite
from placeholder import SpecializedString
from environment import File, Directory
@inside_glslc_testsuite('Include')
class VerifyIncludeOneSibling(expect.StdoutMatch):
"""Tests #including a sibling file."""
environment = Directory('.', [
File('a.vert', '#version 140\ncontent a\n#include "b"\n'),
File('b', 'content b\n')])
glslc_args = ['-E', 'a.vert']
expected_stdout = \
"""#version 140
#extension GL_GOOGLE_include_directive : enable
#line 0 "a.vert"
content a
#line 0 "b"
content b
#line 3 "a.vert"
"""
@inside_glslc_testsuite('Include')
class VerifyIncludeNotFound(expect.ErrorMessage):
"""Tests #including a not existing sibling file."""
environment = Directory('.', [
File('a.vert', '#version 140\ncontent a\n#include "b"\n')])
glslc_args = ['-E', 'a.vert']
expected_error = [
"a.vert:3: error: '#include' : Cannot find or open include file.\n",
'1 error generated.\n'
]
@inside_glslc_testsuite('Include')
class VerifyCompileIncludeOneSibling(expect.ValidObjectFile):
"""Tests #including a sibling file via full compilation."""
environment = Directory('.', [
File('a.vert', '#version 140\nvoid foo(){}\n#include "b"\n'),
File('b', 'void main(){foo();}\n')])
glslc_args = ['a.vert']
@inside_glslc_testsuite('Include')
class VerifyIncludeWithoutNewline(expect.StdoutMatch):
"""Tests a #include without a newline."""
environment = Directory('.', [
File('a.vert', '#version 140\n#include "b"'),
File('b', 'content b\n')])
glslc_args = ['-E', 'a.vert']
expected_stdout = \
"""#version 140
#extension GL_GOOGLE_include_directive : enable
#line 0 "a.vert"
#line 0 "b"
content b
#line 2 "a.vert"
"""
@inside_glslc_testsuite('Include')
class VerifyCompileIncludeWithoutNewline(expect.ValidObjectFile):
"""Tests a #include without a newline via full compilation."""
environment = Directory('.', [
File('a.vert',
"""#version 140
void main
#include "b"
"""),
File('b',
"""#define PAR ()
PAR{}
""")])
glslc_args = ['a.vert']
@inside_glslc_testsuite('Include')
class VerifyIncludeTwoSiblings(expect.StdoutMatch):
"""Tests #including two sibling files."""
environment = Directory('.', [
File('b.vert', '#version 140\n#include "a"\ncontent b\n#include "c"\n'),
File('a', 'content a\n'),
File('c', 'content c\n')])
glslc_args = ['-E', 'b.vert']
expected_stdout = \
"""#version 140
#extension GL_GOOGLE_include_directive : enable
#line 0 "b.vert"
#line 0 "a"
content a
#line 2 "b.vert"
content b
#line 0 "c"
content c
#line 4 "b.vert"
"""
@inside_glslc_testsuite('Include')
class VerifyCompileIncludeTwoSiblings(expect.ValidObjectFile):
"""Tests #including two sibling files via full compilation."""
environment = Directory('.', [
File('b.vert',
"""#version 140
#include "a"
void bfun(){afun();}
#include "c"
"""),
File('a',
"""void afun(){}
#define BODY {}
"""),
File('c', 'void main() BODY\n')])
glslc_args = ['b.vert']
@inside_glslc_testsuite('Include')
class VerifyNestedIncludeAmongSiblings(expect.StdoutMatch):
"""Tests #include inside #included sibling files."""
environment = Directory('.', [
File('a.vert', '#version 140\n#include "b"\ncontent a\n'),
File('b', 'content b\n#include "c"\n'),
File('c', 'content c\n')])
glslc_args = ['-E', 'a.vert']
expected_stdout = \
"""#version 140
#extension GL_GOOGLE_include_directive : enable
#line 0 "a.vert"
#line 0 "b"
content b
#line 0 "c"
content c
#line 2 "b"
#line 2 "a.vert"
content a
"""
@inside_glslc_testsuite('Include')
class VerifyCompileNestedIncludeAmongSiblings(expect.ValidObjectFile):
"""Tests #include inside #included sibling files via full compilation."""
environment = Directory('.', [
File('a.vert',
"""#version 140
#define BODY {}
#include "b"
void main(){cfun();}
"""),
File('b',
"""void bfun() BODY
#include "c"
"""),
File('c',
"""#define BF bfun()
void cfun(){BF;}
""")])
glslc_args = ['a.vert']
@inside_glslc_testsuite('Include')
class VerifyIncludeSubdir(expect.StdoutMatch):
"""Tests #including a file from a subdirectory."""
environment = Directory('.', [
File('a.vert', '#version 140\ncontent a1\n#include "subdir/a"\ncontent a2\n'),
Directory('subdir', [File('a', 'content suba\n')])])
glslc_args = ['-E', 'a.vert']
expected_stdout = \
"""#version 140
#extension GL_GOOGLE_include_directive : enable
#line 0 "a.vert"
content a1
#line 0 "subdir/a"
content suba
#line 3 "a.vert"
content a2
"""
@inside_glslc_testsuite('Include')
class VerifyCompileIncludeSubdir(expect.ValidObjectFile):
"""Tests #including a file from a subdirectory via full compilation."""
environment = Directory('.', [
File('a.vert',
"""#version 140
#define BODY {}
#include "subdir/a"
void afun()BODY
"""),
Directory('subdir', [File('a', 'void main() BODY\n')])])
glslc_args = ['a.vert']
@inside_glslc_testsuite('Include')
class VerifyIncludeDeepSubdir(expect.StdoutMatch):
"""Tests #including a file from a subdirectory nested a few levels down."""
environment = Directory('.', [
File('a.vert',
'#version 140\ncontent a1\n#include "dir/subdir/subsubdir/a"\ncontent a2\n'),
Directory('dir', [
Directory('subdir', [
Directory('subsubdir', [File('a', 'content incl\n')])])])])
glslc_args = ['-E', 'a.vert']
expected_stdout = \
"""#version 140
#extension GL_GOOGLE_include_directive : enable
#line 0 "a.vert"
content a1
#line 0 "dir/subdir/subsubdir/a"
content incl
#line 3 "a.vert"
content a2
"""
@inside_glslc_testsuite('Include')
class VerifyCompileIncludeDeepSubdir(expect.ValidObjectFile):
"""Tests #including a file from a subdirectory nested a few levels down
via full compilation."""
environment = Directory('.', [
File('a.vert',
"""#version 140
#define BODY {}
#include "dir/subdir/subsubdir/a"
void afun()BODY
"""),
Directory('dir', [
Directory('subdir', [
Directory('subsubdir', [File('a', 'void main() BODY\n')])])])])
glslc_args = ['a.vert']
@inside_glslc_testsuite('Include')
class TestWrongPoundVersionInIncludingFile(expect.ValidObjectFileWithWarning):
"""Tests that warning message for #version directive in the including file
has the correct filename."""
environment = Directory('.', [
File('a.vert', '#version 100000000\n#include "b.glsl"\n'),
File('b.glsl', 'void main() {}\n')])
glslc_args = ['-c', 'a.vert']
expected_warning = [
'a.vert: warning: version 100000000 is unknown.\n',
'1 warning generated.\n'
]
# TODO(antiagainst): now #version in included files results in an error.
# Fix this after #version in included files are supported.
# TODO(dneto): I'm not sure what the expected result should be.
@inside_glslc_testsuite('Include')
class TestWrongPoundVersionInIncludedFile(expect.ErrorMessage):
"""Tests that warning message for #version directive in the included file
has the correct filename."""
environment = Directory('.', [
File('a.vert', '#version 140\n#include "b.glsl"\nvoid main() {}'),
File('b.glsl', '#version 10000000\n')])
glslc_args = ['-E', 'a.vert']
expected_error = [
"b.glsl:1: error: '#version' : must occur first in shader\n",
'1 error generated.\n'
]
@inside_glslc_testsuite('Include')
class VerifyRelativeInclude(expect.StdoutMatch):
"""Tests #including a relative sibling."""
environment = Directory('.', [
File('a.vert', '#version 140\ncontent a\n#include "foo/b.glsl"\n'),
Directory('foo', [
File('b.glsl', '#include "c.glsl"\ncontent b\n'),
File('c.glsl', 'content c\n')
])])
glslc_args = ['-E', 'a.vert']
expected_stdout = \
"""#version 140
#extension GL_GOOGLE_include_directive : enable
#line 0 "a.vert"
content a
#line 0 "foo/b.glsl"
#line 0 "foo/c.glsl"
content c
#line 1 "foo/b.glsl"
content b
#line 3 "a.vert"
"""
@inside_glslc_testsuite('Include')
class VerifyNestedRelativeInclude(expect.StdoutMatch):
"""Tests #including a relative child file."""
environment = Directory('.', [
File('a.vert', '#version 140\ncontent a\n#include "foo/b.glsl"\n'),
Directory('foo', [
File('b.glsl', '#include "bar/c.glsl"\ncontent b\n'),
Directory('bar', [
File('c.glsl', 'content c\n')
])
])
])
glslc_args = ['-E', 'a.vert']
expected_stdout = \
"""#version 140
#extension GL_GOOGLE_include_directive : enable
#line 0 "a.vert"
content a
#line 0 "foo/b.glsl"
#line 0 "foo/bar/c.glsl"
content c
#line 1 "foo/b.glsl"
content b
#line 3 "a.vert"
"""
@inside_glslc_testsuite('Include')
class VerifyRelativeIncludeWithDashI(expect.StdoutMatch):
"""Tests #including a relative file from a -I directory."""
environment = Directory('.', [
File('a.vert', '#version 140\ncontent a\n#include "bar/b.glsl"\n'),
Directory('foo', [
Directory('bar', [
File('b.glsl', '#include "c.glsl"\ncontent b\n'),
]),
File('c.glsl', 'content c\n')
])
])
glslc_args = ['-E', 'a.vert', '-Ifoo']
expected_stdout = \
"""#version 140
#extension GL_GOOGLE_include_directive : enable
#line 0 "a.vert"
content a
#line 0 "foo/bar/b.glsl"
#line 0 "foo/c.glsl"
content c
#line 1 "foo/bar/b.glsl"
content b
#line 3 "a.vert"
"""
@inside_glslc_testsuite('Include')
class VerifyRelativeOverridesDashI(expect.StdoutMatch):
"""Tests that relative includes override -I parameters."""
environment = Directory('.', [
File('a.vert', '#version 140\ncontent a\n#include "b.glsl"\n'),
File('b.glsl', 'content base_b\n'),
Directory('foo', [
File('b.glsl', '#include "c.glsl"\ncontent b\n'),
File('c.glsl', 'content c\n')
])
])
glslc_args = ['-E', 'a.vert', '-Ifoo']
expected_stdout = \
"""#version 140
#extension GL_GOOGLE_include_directive : enable
#line 0 "a.vert"
content a
#line 0 "b.glsl"
content base_b
#line 3 "a.vert"
"""
@inside_glslc_testsuite('Include')
class VerifyRelativeParent(expect.StdoutMatch):
"""Tests #including a parent file."""
environment = Directory('.', [
File('a.vert', '#version 140\ncontent a\n#include "b.glsl"\n'),
File('c.glsl', 'content c\n'),
Directory('foo', [
File('b.glsl', '#include "../c.glsl"\ncontent b\n')
])
])
glslc_args = ['-E', 'a.vert', '-Ifoo']
expected_stdout = \
"""#version 140
#extension GL_GOOGLE_include_directive : enable
#line 0 "a.vert"
content a
#line 0 "foo/b.glsl"
#line 0 "foo/../c.glsl"
content c
#line 1 "foo/b.glsl"
content b
#line 3 "a.vert"
"""
@inside_glslc_testsuite('Include')
class VerifyRelativeNeighbourDirectory(expect.StdoutMatch):
"""Tests #including a relative file in a neighbour directory."""
environment = Directory('.', [
File('a.vert', '#version 140\ncontent a\n#include "foo/b.glsl"\n'),
Directory('foo', [
File('b.glsl', '#include "../bar/c.glsl"\ncontent b\n')
]),
Directory('bar', [
File('c.glsl', 'content c\n')
])
])
glslc_args = ['-E', 'a.vert']
expected_stdout = \
"""#version 140
#extension GL_GOOGLE_include_directive : enable
#line 0 "a.vert"
content a
#line 0 "foo/b.glsl"
#line 0 "foo/../bar/c.glsl"
content c
#line 1 "foo/b.glsl"
content b
#line 3 "a.vert"
"""
@inside_glslc_testsuite('Include')
class VerifyRelativeOnlyToSelf(expect.ErrorMessage):
"""Tests that a relative includes are only relative to yourself."""
environment = Directory('.', [
File('a.vert', '#version 140\ncontent a\n#include "foo/b.glsl"\n'),
File('c.glsl', 'content c\n'),
Directory('foo', [
File('b.glsl', '#include "c.glsl"\ncontent b\n')
]),
])
glslc_args = ['-E', 'a.vert']
expected_error = [
"foo/b.glsl:1: error: '#include' : Cannot find or open include file.\n",
'1 error generated.\n'
]
@inside_glslc_testsuite('Include')
class VerifyRelativeFromAbsolutePath(expect.StdoutMatch):
"""Tests that absolute files can relatively include."""
environment = Directory('.', [
File('a.vert', '#version 140\ncontent a\n#include "b.glsl"\n'),
File('b.glsl', 'content b\n')
])
glslc_args = ['-E', SpecializedString('$directory/a.vert')]
expected_stdout = SpecializedString(
"""#version 140
#extension GL_GOOGLE_include_directive : enable
#line 0 "$directory/a.vert"
content a
#line 0 "$directory/b.glsl"
content b
#line 3 "$directory/a.vert"
""")
@inside_glslc_testsuite('Include')
class VerifyDashIAbsolutePath(expect.StdoutMatch):
"""Tests that -I parameters can be absolute paths."""
environment = Directory('.', [
File('a.vert', '#version 140\ncontent a\n#include "b.glsl"\n'),
Directory('foo', {
File('b.glsl', 'content b\n')
})
])
glslc_args = ['-E', 'a.vert', '-I', SpecializedString('$directory/foo')]
expected_stdout = SpecializedString(
"""#version 140
#extension GL_GOOGLE_include_directive : enable
#line 0 "a.vert"
content a
#line 0 "$directory/foo/b.glsl"
content b
#line 3 "a.vert"
""")
| |
from datetime import datetime
import time
from .queue import Queue
from . import context
from .redishelpers import redis_zaddbyscore, redis_zpopbyscore, redis_key, redis_group_command
from past.utils import old_div
from future.builtins import range
class QueueRaw(Queue):
is_raw = True
def __init__(self, queue_id, **kwargs):
Queue.__init__(self, queue_id, **kwargs)
queue_type = Queue.get_queue_type(queue_id)
if "set" in queue_type:
self.is_set = True
if "_timed" in self.id:
self.is_timed = True
self.is_sorted = True
elif "_sorted" in self.id:
self.is_sorted = True
self.has_subqueues = bool(self.get_config().get("has_subqueues"))
current_config = context.get_current_config()
# redis key used to store the known subqueues of this raw queue.
self.redis_key_known_subqueues = redis_key("known_subqueues", self)
# redis key used to store this queue.
self.redis_key = redis_key("queue", self)
# global redis key used to store started job ids
self.redis_key_started = redis_key("started_jobs")
def empty(self):
""" Empty a queue. """
with context.connections.redis.pipeline(transaction=True) as pipe:
pipe.delete(self.redis_key)
pipe.delete(self.redis_key_known_subqueues)
pipe.execute()
def get_known_subqueues(self):
""" Returns all known subqueues """
if not self.has_subqueues:
return set()
return set(context.connections.redis.smembers(self.redis_key_known_subqueues))
def size(self):
""" Returns the total number of queued jobs on the queue """
if self.id.endswith("/"):
return sum(Queue(q).size() for q in self.get_known_subqueues())
# ZSET
if self.is_sorted:
return context.connections.redis.zcard(self.redis_key)
# SET
elif self.is_set:
return context.connections.redis.scard(self.redis_key)
# LIST
else:
return context.connections.redis.llen(self.redis_key)
def enqueue_raw_jobs(self, params_list):
""" Add Jobs to this queue with raw parameters. They are not yet in MongoDB. """
if len(params_list) == 0:
return
if self.is_subqueue:
context.connections.redis.sadd(self.redis_key_known_subqueues, self.id)
# ZSET
if self.is_sorted:
if not isinstance(params_list, dict) and self.is_timed:
now = time.time()
params_list = {x: now for x in params_list}
context.connections.redis.zadd(self.redis_key, **params_list)
# SET
elif self.is_set:
context.connections.redis.sadd(self.redis_key, *params_list)
# LIST
else:
context.connections.redis.rpush(self.redis_key, *params_list)
context.metric("queues.%s.enqueued" % self.id, len(params_list))
context.metric("queues.all.enqueued", len(params_list))
def remove_raw_jobs(self, params_list):
""" Remove jobs from a raw queue with their raw params. """
if len(params_list) == 0:
return
# ZSET
if self.is_sorted:
context.connections.redis.zrem(self.redis_key, *iter(params_list))
# SET
elif self.is_set:
context.connections.redis.srem(self.redis_key, *params_list)
else:
# O(n)! Use with caution.
for k in params_list:
context.connections.redis.lrem(self.redis_key, 1, k)
context.metric("queues.%s.removed" % self.id, len(params_list))
context.metric("queues.all.removed", len(params_list))
def list_raw_jobs(self, skip=0, limit=20):
return self._get_queue_content(skip, limit)
def _get_queue_content(self, skip, limit):
# ZSET
if self.is_sorted:
return context.connections.redis.zrange(
self.redis_key,
skip,
skip + limit - 1)
# SET
elif self.is_set:
return context.connections.redis.srandmember(self.redis_key, limit)
# LIST
else:
return context.connections.redis.lrange(
self.redis_key,
skip,
skip + limit - 1)
def get_retry_queue(self):
""" Return the name of the queue where retried jobs will be queued """
return self.get_config().get("retry_queue") or "default"
def count_jobs_to_dequeue(self):
""" Returns the number of jobs that can be dequeued right now from the queue. """
# timed ZSET
if self.is_timed:
return context.connections.redis.zcount(
self.redis_key,
"-inf",
time.time())
# In all other cases, it's the same as .size()
else:
return self.size()
def dequeue_jobs(self, max_jobs=1, job_class=None, worker=None):
queue_config = self.get_config()
statuses_no_storage = queue_config.get("statuses_no_storage")
job_factory = queue_config.get("job_factory")
if not job_factory:
raise Exception("No job_factory configured for raw queue %s" % self.id)
retry_queue = self.get_retry_queue()
params = []
# ZSET with times
if self.is_timed:
current_time = time.time()
# When we have a pushback_seconds argument, we never pop items from
# the queue, instead we push them back by an amount of time so
# that they don't get dequeued again until
# the task finishes.
pushback_time = current_time + float(queue_config.get("pushback_seconds") or 0)
if pushback_time > current_time:
params = redis_zaddbyscore()(
keys=[self.redis_key],
args=[
"-inf", current_time, 0, max_jobs, pushback_time
])
else:
params = redis_zpopbyscore()(
keys=[self.redis_key],
args=[
"-inf", current_time, 0, max_jobs
])
# ZSET
elif self.is_sorted:
# TODO Lua?
with context.connections.redis.pipeline(transaction=True) as pipe:
pipe.zrange(self.redis_key, 0, max_jobs - 1)
pipe.zremrangebyrank(self.redis_key, 0, max_jobs - 1)
params = pipe.execute()[0]
# SET
elif self.is_set:
params = redis_group_command("spop", max_jobs, self.redis_key)
# LIST
else:
params = redis_group_command("lpop", max_jobs, self.redis_key)
if len(params) == 0:
# We didn't dequeue anything. Does this mean the queue is empty?
# TODO LUA this with the above
if self.is_subqueue and self.size() == 0:
context.connections.redis.srem(self.redis_key_known_subqueues, self.id)
return
if worker:
worker.status = "spawn"
job_data = [job_factory(p) for p in params]
for j in job_data:
j["status"] = "started"
j["queue"] = retry_queue
j["datequeued"] = datetime.now()
j["raw_queue"] = self.id
if worker:
j["worker"] = worker.id
for job in job_class.insert(job_data, statuses_no_storage=statuses_no_storage):
yield job
def get_sorted_graph(
self,
start=0,
stop=100,
slices=100,
include_inf=False,
exact=False):
""" Returns a graph of the distribution of jobs in a sorted set """
if not self.is_sorted:
raise Exception("Not a sorted queue")
with context.connections.redis.pipeline(transaction=exact) as pipe:
interval = old_div(float(stop - start), slices)
for i in range(0, slices):
pipe.zcount(self.redis_key,
(start + i * interval),
"(%s" % (start + (i + 1) * interval))
if include_inf:
pipe.zcount(self.redis_key, stop, "+inf")
pipe.zcount(self.redis_key, "-inf", "(%s" % start)
data = pipe.execute()
if include_inf:
return data[-1:] + data[:-1]
return data
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
################################################################################
### @brief creates swagger json files from doc headers of rest files
###
### find files in
### arangod/RestHandler/*.cpp
### js/actions/api-*.js
###
### @usage generateSwagger.py < RestXXXX.cpp > restSwagger.json
###
### @file
###
### DISCLAIMER
###
### Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
###
### Licensed under the Apache License, Version 2.0 (the "License");
### you may not use this file except in compliance with the License.
### You may obtain a copy of the License at
###
### http://www.apache.org/licenses/LICENSE-2.0
###
### Unless required by applicable law or agreed to in writing, software
### distributed under the License is distributed on an "AS IS" BASIS,
### WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
### See the License for the specific language governing permissions and
### limitations under the License.
###
### Copyright holder is triAGENS GmbH, Cologne, Germany
###
### @author Dr. Frank Celler
### @author Thomas Richter
### @author Copyright 2014, triAGENS GmbH, Cologne, Germany
################################################################################
import sys, re, json, string, os, cgi
rc = re.compile
MS = re.M | re.S
################################################################################
### @brief swagger
################################################################################
swagger = {
'apiVersion': '0.1',
'swaggerVersion': '1.1',
'basePath': '/',
'apis': []
}
################################################################################
### @brief operation
################################################################################
operation = {}
################################################################################
### @brief C_FILE
################################################################################
C_FILE = False
################################################################################
### @brief DEBUG
################################################################################
DEBUG = False
################################################################################
### @brief trim_text
################################################################################
def trim_text(txt):
r = rc(r"""[ \t]+$""")
txt = r.sub("", txt)
return txt
################################################################################
### @brief parameters
###
### suche die erste {
### suche die letzten }
### gib alles dazwischen zurck
################################################################################
def parameters(line):
(l, c, line) = line.partition('{')
(line, c , r) = line.rpartition('}')
line = BackTicks(line, wordboundary = ['{','}'])
return line
################################################################################
### @brief BackTicks
###
### `word` -> <b>word</b>
################################################################################
def BackTicks(txt, wordboundary = ['<em>','</em>']):
r = rc(r"""([\(\s'/">]|^|.)\`(.*?)\`([<\s\.\),:;'"?!/-]|$)""", MS)
subpattern = '\\1' + wordboundary[0] + '\\2' + wordboundary[1] + '\\3'
return r.sub(subpattern, txt)
################################################################################
### @brief AsteriskItalic
###
### *word* -> <b>word</b>
################################################################################
def AsteriskItalic(txt, wordboundary = ['<em>','</em>']):
r = rc(r"""([\(\s'/">]|^|.)\*(.*?)\*([<\s\.\),:;'"?!/-]|$)""", MS)
subpattern = '\\1' + wordboundary[0] + '\\2' + wordboundary[1] + '\\3'
return r.sub(subpattern, txt)
################################################################################
### @brief AsteriskBold
###
### **word** -> <b>word</b>
################################################################################
def AsteriskBold(txt, wordboundary = ['<em>','</em>']):
r = rc(r"""([\(\s'/">]|^|.)\*\*(.*?)\*\*([<\s\.\),:;'"?!/-]|$)""", MS)
subpattern = '\\1' + wordboundary[0] + '\\2' + wordboundary[1] + '\\3'
return r.sub(subpattern, txt)
################################################################################
### @brief FA
###
### @FA{word} -> <b>word</b>
################################################################################
def FA(txt, wordboundary = ['<b>','</b>']):
r = rc(r"""([\(\s'/">]|^|.)@FA\{(.*?)\}([<\s\.\),:;'"?!/-]|$)""", MS)
subpattern = '\\1' + wordboundary[0] + '\\2' + wordboundary[1] + '\\3'
return r.sub(subpattern, txt)
################################################################################
### @brief FN
###
### @FN{word} -> <b>word</b>
################################################################################
def FN(txt, wordboundary = ['<b>','</b>']):
r = rc(r"""([\(\s'/">]|^|.)@FN\{(.*?)\}([<\s\.\),:;'"?!/-])""", MS)
subpattern = '\\1' + wordboundary[0] + '\\2' + wordboundary[1] + '\\3'
return r.sub(subpattern, txt)
################################################################################
### @brief LIT
###
### @LIT{word} -> <b>word</b>
################################################################################
def LIT(txt, wordboundary = ['<b>','</b>']):
r = rc(r"""([\(\s'/">]|^)@LIT\{(.*?)\}([<\s\.\),:;'"?!/-])""", MS)
subpattern = '\\1' + wordboundary[0] + '\\2' + wordboundary[1] + '\\3'
return r.sub(subpattern, txt)
################################################################################
### @brief Typegraphy
################################################################################
def Typography(txt):
if C_FILE:
txt = txt[4:-1]
else:
txt = txt[0:-1]
txt = BackTicks(txt)
txt = AsteriskBold(txt)
txt = AsteriskItalic(txt)
txt = FN(txt)
txt = LIT(txt)
txt = FA(txt)
# no way to find out the correct link for Swagger,
# so replace all @ref elements with just "the manual"
r = rc(r"""@ref [a-zA-Z0-9]+""", MS)
txt = r.sub("the manual", txt)
txt = re.sub(r"@endDocuBlock", "", txt)
return txt
################################################################################
### @brief InitializationError
################################################################################
class InitializationError(Exception): pass
################################################################################
### @brief StateMachine
################################################################################
class StateMachine:
def __init__(self):
self.handlers = []
self.startState = None
self.endStates = []
def add_state(self, handler, end_state=0):
self.handlers.append(handler)
if end_state:
self.endStates.append(handler)
def set_start(self, handler):
self.startState = handler
def run(self, cargo=None):
if not self.startState:
raise InitializationError,\
"must call .set_start() before .run()"
if not self.endStates:
raise InitializationError, \
"at least one state must be an end_state"
handler = self.startState
while 1:
(newState, cargo) = handler(cargo)
if newState in self.endStates:
newState(cargo)
break
elif newState not in self.handlers:
raise RuntimeError, "Invalid target %s" % newState
else:
handler = newState
################################################################################
### @brief Regexen
################################################################################
class Regexen:
def __init__(self):
self.DESCRIPTION_LI = re.compile('^-\s.*$')
self.DESCRIPTION_SP = re.compile('^\s\s.*$')
self.DESCRIPTION_BL = re.compile('^\s*$')
self.EMPTY_LINE = re.compile('^\s*$')
self.END_EXAMPLE_ARANGOSH_RUN = re.compile('.*@END_EXAMPLE_ARANGOSH_RUN')
self.EXAMPLES = re.compile('.*@EXAMPLES')
self.EXAMPLE_ARANGOSH_RUN = re.compile('.*@EXAMPLE_ARANGOSH_RUN{')
self.FILE = re.compile('.*@file')
self.RESTBODYPARAM = re.compile('.*@RESTBODYPARAM')
self.RESTDESCRIPTION = re.compile('.*@RESTDESCRIPTION')
self.RESTDONE = re.compile('.*@RESTDONE')
self.RESTHEADER = re.compile('.*@RESTHEADER{')
self.RESTHEADERPARAM = re.compile('.*@RESTHEADERPARAM{')
self.RESTHEADERPARAMETERS = re.compile('.*@RESTHEADERPARAMETERS')
self.RESTQUERYPARAM = re.compile('.*@RESTQUERYPARAM{')
self.RESTQUERYPARAMETERS = re.compile('.*@RESTQUERYPARAMETERS')
self.RESTRETURNCODE = re.compile('.*@RESTRETURNCODE{')
self.RESTRETURNCODES = re.compile('.*@RESTRETURNCODES')
self.RESTURLPARAM = re.compile('.*@RESTURLPARAM{')
self.RESTURLPARAMETERS = re.compile('.*@RESTURLPARAMETERS')
self.NON_COMMENT = re.compile('^[^/].*')
################################################################################
### @brief checks for end of comment
################################################################################
def check_end_of_comment(line, r):
if C_FILE:
return r.NON_COMMENT.match(line)
else:
return r.RESTDONE.match(line)
################################################################################
### @brief next_step
################################################################################
def next_step(fp, line, r):
global operation
if not line: return eof, (fp, line)
elif check_end_of_comment(line, r): return skip_code, (fp, line)
elif r.EXAMPLE_ARANGOSH_RUN.match(line): return example_arangosh_run, (fp, line)
elif r.RESTBODYPARAM.match(line): return restbodyparam, (fp, line)
elif r.RESTDESCRIPTION.match(line): return restdescription, (fp, line)
elif r.RESTHEADER.match(line): return restheader, (fp, line)
elif r.RESTHEADERPARAM.match(line): return restheaderparam, (fp, line)
elif r.RESTHEADERPARAMETERS.match(line): return restheaderparameters, (fp, line)
elif r.RESTQUERYPARAM.match(line): return restqueryparam, (fp, line)
elif r.RESTQUERYPARAMETERS.match(line): return restqueryparameters, (fp, line)
elif r.RESTRETURNCODE.match(line): return restreturncode, (fp, line)
elif r.RESTRETURNCODES.match(line): return restreturncodes, (fp, line)
elif r.RESTURLPARAM.match(line): return resturlparam, (fp, line)
elif r.RESTURLPARAMETERS.match(line): return resturlparameters, (fp, line)
if r.EXAMPLES.match(line):
operation['examples'] = ""
return examples, (fp, line)
return None, None
################################################################################
### @brief generic handler
################################################################################
def generic_handler(cargo, r, message):
global DEBUG
if DEBUG: print >> sys.stderr, message
(fp, last) = cargo
while 1:
(next, c) = next_step(fp, fp.readline(), r)
if next:
return next, c
################################################################################
### @brief generic handler with description
################################################################################
def generic_handler_desc(cargo, r, message, op, para, name):
global DEBUG, C_FILE, operation
if DEBUG: print >> sys.stderr, message
(fp, last) = cargo
inLI = False
inUL = False
while 1:
line = fp.readline()
(next, c) = next_step(fp, line, r)
if next:
para[name] = trim_text(para[name])
if op:
operation[op].append(para)
return next, c
if C_FILE and line[0:4] == "////":
continue
line = Typography(line)
if r.DESCRIPTION_LI.match(line):
line = "<li>" + line[2:]
inLI = True
elif inLI and r.DESCRIPTION_SP.match(line):
line = line[2:]
elif inLI and r.DESCRIPTION_BL.match(line):
line = ""
else:
inLI = False
if not inUL and inLI:
line = " <ul class=\"swagger-list\">" + line
inUL = True
elif inUL and not inLI:
line = "</ul> " + line
inUL = False
if not inLI and r.EMPTY_LINE.match(line):
line = "<br><br>"
para[name] += line + ' '
################################################################################
### @brief restheader
################################################################################
def restheader(cargo, r=Regexen()):
global swagger, operation
(fp, last) = cargo
temp = parameters(last).split(',')
(method, path) = temp[0].split()
summary = temp[1]
summaryList = summary.split()
nickname = summaryList[0] + ''.join([word.capitalize() for word in summaryList[1:]])
operation = {
'httpMethod': method,
'nickname': nickname,
'parameters': [],
'summary': summary,
'notes': '',
'examples': '',
'errorResponses': []
}
api = {
'path': FA(path, wordboundary = ['{', '}']),
'operations': [ operation ]
}
swagger['apis'].append(api)
return generic_handler(cargo, r, "resturlparameters")
################################################################################
### @brief resturlparameters
################################################################################
def resturlparameters(cargo, r=Regexen()):
return generic_handler(cargo, r, "resturlparameters")
################################################################################
### @brief resturlparam
################################################################################
def resturlparam(cargo, r=Regexen()):
(fp, last) = cargo
parametersList = parameters(last).split(',')
if parametersList[2] == 'required':
required = True
else:
required = False
para = {
'name': parametersList[0],
'paramType': 'path',
'description': '',
'dataType': parametersList[1].capitalize(),
'required': required
}
return generic_handler_desc(cargo, r, "resturlparam", 'parameters', para, 'description')
################################################################################
### @brief restqueryparameters
################################################################################
def restqueryparameters(cargo, r=Regexen()):
return generic_handler(cargo, r, "restqueryparameters")
################################################################################
### @brief restheaderparameters
################################################################################
def restheaderparameters(cargo, r=Regexen()):
return generic_handler(cargo, r, "restheaderparameters")
################################################################################
### @brief restheaderparameters
################################################################################
def restheaderparam(cargo, r=Regexen()):
(fp, last) = cargo
parametersList = parameters(last).split(',')
para = {
'paramType': 'header',
'dataType': parametersList[1].capitalize(),
'name': parametersList[0],
'description': ''
}
return generic_handler_desc(cargo, r, "restheaderparam", 'parameters', para, 'description')
################################################################################
### @brief restbodyparam
################################################################################
def restbodyparam(cargo, r=Regexen()):
(fp, last) = cargo
parametersList = parameters(last).split(',')
if parametersList[2] == 'required':
required = True
else:
required = False
para = {
'name': parametersList[0],
'paramType': 'body',
'description': '',
'dataType': parametersList[1].capitalize(),
'required': required
}
return generic_handler_desc(cargo, r, "restbodyparam", 'parameters', para, 'description')
################################################################################
### @brief restqueryparam
################################################################################
def restqueryparam(cargo, r=Regexen()):
(fp, last) = cargo
parametersList = parameters(last).split(',')
if parametersList[2] == 'required':
required = True
else:
required = False
para = {
'name': parametersList[0],
'paramType': 'query',
'description': '',
'dataType': parametersList[1].capitalize(),
'required': required
}
return generic_handler_desc(cargo, r, "restqueryparam", 'parameters', para, 'description')
################################################################################
### @brief restdescription
################################################################################
def restdescription(cargo, r=Regexen()):
return generic_handler_desc(cargo, r, "restdescription", None, operation, 'notes')
################################################################################
### @brief restreturncodes
################################################################################
def restreturncodes(cargo, r=Regexen()):
return generic_handler(cargo, r, "restreturncodes")
################################################################################
### @brief restreturncode
################################################################################
def restreturncode(cargo, r=Regexen()):
(fp, last) = cargo
returncode = {
'code': parameters(last),
'reason': ''
}
return generic_handler_desc(cargo, r, "restreturncode", 'errorResponses', returncode, 'reason')
################################################################################
### @brief examples
################################################################################
def examples(cargo, r=Regexen()):
return generic_handler_desc(cargo, r, "examples", None, operation, 'examples')
################################################################################
### @brief example_arangosh_run
################################################################################
def example_arangosh_run(cargo, r=Regexen()):
global DEBUG, C_FILE
if DEBUG: print >> sys.stderr, "example_arangosh_run"
fp, last = cargo
# new examples code TODO should include for each example own object in json file
examplefile = open(os.path.join(os.path.dirname(__file__), '../Examples/' + parameters(last) + '.generated'))
operation['examples'] += '<br><br><pre><code class="json">'
for line in examplefile.readlines():
operation['examples'] += cgi.escape(line)
operation['examples'] += '</code></pre><br>'
line = ""
while not r.END_EXAMPLE_ARANGOSH_RUN.match(line):
line = fp.readline()
if not line:
return eof, (fp, line)
return examples, (fp, line)
################################################################################
### @brief eof
################################################################################
def eof(cargo):
global DEBUG, C_FILE
if DEBUG: print >> sys.stderr, "eof"
print json.dumps(swagger, indent=4, separators=(',',': '))
################################################################################
### @brief error
################################################################################
def error(cargo):
global DEBUG, C_FILE
if DEBUG: print >> sys.stderr, "error"
sys.stderr.write('Unidentifiable line:\n' + cargo)
################################################################################
### @brief comment
################################################################################
def comment(cargo, r=Regexen()):
global DEBUG, C_FILE
if DEBUG: print >> sys.stderr, "comment"
(fp, last) = cargo
while 1:
line = fp.readline()
if not line: return eof, (fp, line)
if r.FILE.match(line): C_FILE = True
next, c = next_step(fp, line, r)
if next:
return next, c
################################################################################
### @brief skip_code
###
### skip all non comment lines
################################################################################
def skip_code(cargo, r=Regexen()):
global DEBUG, C_FILE
if DEBUG: print >> sys.stderr, "skip_code"
(fp, last) = cargo
if not C_FILE:
return comment((fp, last), r)
while 1:
line = fp.readline()
if not line:
return eof, (fp, line)
if not r.NON_COMMENT.match(line):
return comment((fp, line), r)
################################################################################
### @brief main
################################################################################
if __name__ == "__main__":
automat = StateMachine()
automat.add_state(comment)
automat.add_state(eof, end_state=1)
automat.add_state(error, end_state=1)
automat.add_state(example_arangosh_run)
automat.add_state(examples)
automat.add_state(skip_code)
automat.add_state(restbodyparam)
automat.add_state(restdescription)
automat.add_state(restheader)
automat.add_state(restheaderparam)
automat.add_state(restheaderparameters)
automat.add_state(restqueryparam)
automat.add_state(restqueryparameters)
automat.add_state(restreturncode)
automat.add_state(restreturncodes)
automat.add_state(resturlparam)
automat.add_state(resturlparameters)
automat.set_start(skip_code)
automat.run((sys.stdin, ''))
## -----------------------------------------------------------------------------
## --SECTION-- END-OF-FILE
## -----------------------------------------------------------------------------
## Local Variables:
## mode: outline-minor
## outline-regexp: "^\\(### @brief\\|## --SECTION--\\|# -\\*- \\)"
## End:
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| |
# Copyright 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from oslo_db import exception as db_exc
from oslotest import base
from neutron_lib.callbacks import events
from neutron_lib.callbacks import exceptions
from neutron_lib.callbacks import manager
from neutron_lib.callbacks import priority_group
from neutron_lib.callbacks import resources
PRI_HIGH = 0
PRI_MED = 5000
PRI_LOW = 10000
class ObjectWithCallback(object):
def __init__(self):
self.counter = 0
def callback(self, *args, **kwargs):
self.counter += 1
class GloriousObjectWithCallback(ObjectWithCallback):
pass
def callback_1(*args, **kwargs):
callback_1.counter += 1
callback_id_1 = manager._get_id(callback_1)
def callback_2(*args, **kwargs):
callback_2.counter += 1
callback_id_2 = manager._get_id(callback_2)
def callback_raise(*args, **kwargs):
raise Exception()
def callback_raise_retriable(*args, **kwargs):
raise db_exc.DBDeadlock()
def callback_3(resource, event, trigger, payload):
callback_3.counter += 1
class CallBacksManagerTestCase(base.BaseTestCase):
def setUp(self):
super(CallBacksManagerTestCase, self).setUp()
self.manager = manager.CallbacksManager()
self.event_payload = events.EventPayload(object())
callback_1.counter = 0
callback_2.counter = 0
callback_3.counter = 0
def test_subscribe(self):
self.manager.subscribe(
callback_1, resources.PORT, events.BEFORE_CREATE)
self.assertIsNotNone(
self.manager._callbacks[resources.PORT][events.BEFORE_CREATE])
self.assertIn(callback_id_1, self.manager._index)
self.assertEqual(self.__module__ + '.callback_1-%s' %
hash(callback_1), callback_id_1)
def test_subscribe_unknown(self):
self.manager.subscribe(
callback_1, 'my_resource', 'my-event')
self.assertIsNotNone(
self.manager._callbacks['my_resource']['my-event'])
self.assertIn(callback_id_1, self.manager._index)
def test_subscribe_is_idempotent(self):
self.manager.subscribe(
callback_1, resources.PORT, events.BEFORE_CREATE)
self.manager.subscribe(
callback_1, resources.PORT, events.BEFORE_CREATE)
self.assertEqual(
1,
len(self.manager._callbacks[resources.PORT][events.BEFORE_CREATE]))
callbacks = self.manager._index[callback_id_1][resources.PORT]
self.assertEqual(1, len(callbacks))
def test_subscribe_multiple_callbacks(self):
self.manager.subscribe(
callback_1, resources.PORT, events.BEFORE_CREATE)
self.manager.subscribe(
callback_2, resources.PORT, events.BEFORE_CREATE)
self.assertEqual(2, len(self.manager._index))
self.assertEqual(
1,
len(self.manager._callbacks[resources.PORT][events.BEFORE_CREATE]))
self.assertEqual(
2,
len(self.manager._callbacks
[resources.PORT][events.BEFORE_CREATE][0][1]))
def test_unsubscribe_during_iteration(self):
def unsub(r, e, *a, **k):
return self.manager.unsubscribe(unsub, r, e)
self.manager.subscribe(unsub, resources.PORT,
events.BEFORE_CREATE)
self.manager.publish(resources.PORT, events.BEFORE_CREATE, mock.ANY,
payload=self.event_payload)
self.assertNotIn(unsub, self.manager._index)
def test_unsubscribe(self):
self.manager.subscribe(
callback_1, resources.PORT, events.BEFORE_CREATE)
self.manager.unsubscribe(
callback_1, resources.PORT, events.BEFORE_CREATE)
self.assertNotIn(
callback_id_1,
self.manager._callbacks[resources.PORT][events.BEFORE_CREATE])
self.assertNotIn(callback_id_1, self.manager._index)
def test_unsubscribe_unknown_callback(self):
self.manager.subscribe(
callback_2, resources.PORT, events.BEFORE_CREATE)
self.manager.unsubscribe(callback_1, mock.ANY, mock.ANY)
self.assertEqual(1, len(self.manager._index))
def test_fail_to_unsubscribe(self):
self.manager.subscribe(
callback_1, resources.PORT, events.BEFORE_CREATE)
self.assertRaises(exceptions.Invalid,
self.manager.unsubscribe,
callback_1, resources.PORT, None)
self.assertRaises(exceptions.Invalid,
self.manager.unsubscribe,
callback_1, None, events.BEFORE_CREATE)
def test_unsubscribe_is_idempotent(self):
self.manager.subscribe(
callback_1, resources.PORT, events.BEFORE_CREATE)
self.manager.unsubscribe(
callback_1, resources.PORT, events.BEFORE_CREATE)
self.manager.unsubscribe(
callback_1, resources.PORT, events.BEFORE_CREATE)
self.assertNotIn(callback_id_1, self.manager._index)
self.assertNotIn(callback_id_1,
self.manager._callbacks[resources.PORT]
[events.BEFORE_CREATE])
def test_unsubscribe_by_resource(self):
self.manager.subscribe(
callback_1, resources.PORT, events.BEFORE_CREATE)
self.manager.subscribe(
callback_1, resources.PORT, events.BEFORE_DELETE)
self.manager.subscribe(
callback_2, resources.PORT, events.BEFORE_DELETE)
self.manager.unsubscribe_by_resource(callback_1, resources.PORT)
self.assertEqual(
0,
len(self.manager._callbacks
[resources.PORT][events.BEFORE_CREATE]))
self.assertEqual(
1,
len(self.manager._callbacks[resources.PORT][events.BEFORE_DELETE]))
self.assertIn(
callback_id_2,
self.manager._callbacks
[resources.PORT][events.BEFORE_DELETE][0][1])
self.assertNotIn(callback_id_1, self.manager._index)
def test_unsubscribe_all(self):
self.manager.subscribe(
callback_1, resources.PORT, events.BEFORE_CREATE)
self.manager.subscribe(
callback_1, resources.PORT, events.BEFORE_DELETE)
self.manager.subscribe(
callback_1, resources.ROUTER, events.BEFORE_CREATE)
self.manager.unsubscribe_all(callback_1)
self.assertNotIn(
callback_id_1,
self.manager._callbacks[resources.PORT][events.BEFORE_CREATE])
self.assertNotIn(callback_id_1, self.manager._index)
def test_publish_none(self):
self.manager.publish(resources.PORT, events.BEFORE_CREATE, mock.ANY,
payload=self.event_payload)
self.assertEqual(0, callback_1.counter)
self.assertEqual(0, callback_2.counter)
def test_feebly_referenced_callback(self):
self.manager.subscribe(lambda *x, **y: None, resources.PORT,
events.BEFORE_CREATE)
self.manager.publish(resources.PORT, events.BEFORE_CREATE, mock.ANY,
payload=self.event_payload)
def test_publish_with_exception(self):
with mock.patch.object(self.manager, '_notify_loop') as n:
n.return_value = ['error']
self.assertRaises(exceptions.CallbackFailure,
self.manager.publish,
mock.ANY, events.BEFORE_CREATE, mock.ANY,
payload=self.event_payload)
expected_calls = [
mock.call(mock.ANY, 'before_create', mock.ANY,
self.event_payload),
mock.call(mock.ANY, 'abort_create', mock.ANY,
self.event_payload)
]
n.assert_has_calls(expected_calls)
def test_publish_with_precommit_exception(self):
with mock.patch.object(self.manager, '_notify_loop') as n:
n.return_value = ['error']
self.assertRaises(exceptions.CallbackFailure,
self.manager.publish,
mock.ANY, events.PRECOMMIT_UPDATE, mock.ANY,
payload=self.event_payload)
expected_calls = [
mock.call(mock.ANY, 'precommit_update', mock.ANY,
self.event_payload),
]
n.assert_has_calls(expected_calls)
def test_publish_handle_exception(self):
self.manager.subscribe(
callback_raise, resources.PORT, events.BEFORE_CREATE)
e = self.assertRaises(exceptions.CallbackFailure, self.manager.publish,
resources.PORT, events.BEFORE_CREATE, self,
payload=self.event_payload)
self.assertIsInstance(e.errors[0], exceptions.NotificationError)
def test_publish_handle_retriable_exception(self):
self.manager.subscribe(
callback_raise_retriable, resources.PORT, events.BEFORE_CREATE)
self.assertRaises(db_exc.RetryRequest, self.manager.publish,
resources.PORT, events.BEFORE_CREATE, self,
payload=self.event_payload)
def test_publish_called_once_with_no_failures(self):
with mock.patch.object(self.manager, '_notify_loop') as n:
n.return_value = False
self.manager.publish(resources.PORT, events.BEFORE_CREATE,
mock.ANY,
payload=self.event_payload)
n.assert_called_once_with(
resources.PORT, events.BEFORE_CREATE, mock.ANY,
self.event_payload)
def test__notify_loop_single_event(self):
self.manager.subscribe(
callback_1, resources.PORT, events.BEFORE_CREATE)
self.manager.subscribe(
callback_2, resources.PORT, events.BEFORE_CREATE)
self.manager._notify_loop(
resources.PORT, events.BEFORE_CREATE, mock.ANY,
payload=mock.ANY)
self.assertEqual(1, callback_1.counter)
self.assertEqual(1, callback_2.counter)
def test__notify_loop_multiple_events(self):
self.manager.subscribe(
callback_1, resources.PORT, events.BEFORE_CREATE)
self.manager.subscribe(
callback_1, resources.ROUTER, events.BEFORE_DELETE)
self.manager.subscribe(
callback_2, resources.PORT, events.BEFORE_CREATE)
self.manager._notify_loop(
resources.PORT, events.BEFORE_CREATE, mock.ANY,
payload=mock.ANY)
self.manager._notify_loop(
resources.ROUTER, events.BEFORE_DELETE, mock.ANY,
payload=mock.ANY)
self.assertEqual(2, callback_1.counter)
self.assertEqual(1, callback_2.counter)
def test_clearing_subscribers(self):
self.manager.subscribe(
callback_1, resources.PORT, events.BEFORE_CREATE)
self.manager.subscribe(
callback_2, resources.PORT, events.AFTER_CREATE)
self.assertEqual(2, len(self.manager._callbacks[resources.PORT]))
self.assertEqual(2, len(self.manager._index))
self.manager.clear()
self.assertEqual(0, len(self.manager._callbacks))
self.assertEqual(0, len(self.manager._index))
def test_callback_priority(self):
pri_first = priority_group.PRIORITY_DEFAULT - 100
pri_last = priority_group.PRIORITY_DEFAULT + 100
# lowest priority value should be first in the _callbacks
self.manager.subscribe(callback_1, 'my-resource', 'my-event')
self.manager.subscribe(callback_2, 'my-resource',
'my-event', pri_last)
self.manager.subscribe(callback_3, 'my-resource',
'my-event', pri_first)
callbacks = self.manager._callbacks['my-resource']['my-event']
# callbacks should be sorted based on priority for resource and event
self.assertEqual(3, len(callbacks))
self.assertEqual(pri_first, callbacks[0][0])
self.assertEqual(priority_group.PRIORITY_DEFAULT, callbacks[1][0])
self.assertEqual(pri_last, callbacks[2][0])
@mock.patch('neutron_lib.callbacks.manager.CallbacksManager._del_callback')
def test_del_callback_called_on_unsubscribe(self, mock_cb):
self.manager.subscribe(callback_1, 'my-resource', 'my-event')
callback_id = self.manager._find(callback_1)
callbacks = self.manager._callbacks['my-resource']['my-event']
self.assertEqual(1, len(callbacks))
self.manager.unsubscribe(callback_1, 'my-resource', 'my-event')
mock_cb.assert_called_once_with(callbacks, callback_id)
@mock.patch("neutron_lib.callbacks.manager.LOG")
def test_callback_order(self, _logger):
self.manager.subscribe(callback_1, 'my-resource', 'my-event', PRI_MED)
self.manager.subscribe(callback_2, 'my-resource', 'my-event', PRI_HIGH)
self.manager.subscribe(callback_3, 'my-resource', 'my-event', PRI_LOW)
self.assertEqual(
3, len(self.manager._callbacks['my-resource']['my-event']))
self.manager.unsubscribe(callback_3, 'my-resource', 'my-event')
self.manager.publish('my-resource', 'my-event', mock.ANY,
payload=self.event_payload)
# callback_3 should be deleted and not executed
self.assertEqual(
2, len(self.manager._callbacks['my-resource']['my-event']))
self.assertEqual(0, callback_3.counter)
# executed callbacks should have counter incremented
self.assertEqual(1, callback_2.counter)
self.assertEqual(1, callback_1.counter)
callback_ids = _logger.debug.mock_calls[4][1][1]
# callback_2 should be first in exceution as it has higher priority
self.assertEqual(callback_id_2, callback_ids[0])
self.assertEqual(callback_id_1, callback_ids[1])
@mock.patch("neutron_lib.callbacks.manager.LOG")
def test__notify_loop_skip_log_errors(self, _logger):
self.manager.subscribe(
callback_raise, resources.PORT, events.BEFORE_CREATE)
self.manager.subscribe(
callback_raise, resources.PORT, events.PRECOMMIT_CREATE)
self.manager._notify_loop(
resources.PORT, events.BEFORE_CREATE, mock.ANY, payload=mock.ANY)
self.manager._notify_loop(
resources.PORT, events.PRECOMMIT_CREATE, mock.ANY,
payload=mock.ANY)
self.assertFalse(_logger.exception.call_count)
self.assertTrue(_logger.debug.call_count)
def test_object_instances_as_subscribers(self):
"""Ensures that the manager doesn't think these are equivalent."""
a = GloriousObjectWithCallback()
b = ObjectWithCallback()
c = ObjectWithCallback()
for o in (a, b, c):
self.manager.subscribe(
o.callback, resources.PORT, events.BEFORE_CREATE)
# ensure idempotency remains for a single object
self.manager.subscribe(
o.callback, resources.PORT, events.BEFORE_CREATE)
self.manager.publish(resources.PORT, events.BEFORE_CREATE, mock.ANY,
payload=events.EventPayload(object()))
self.assertEqual(1, a.counter)
self.assertEqual(1, b.counter)
self.assertEqual(1, c.counter)
def test_publish_invalid_payload(self):
self.assertRaises(exceptions.Invalid, self.manager.publish,
resources.PORT, events.AFTER_DELETE, self,
payload=object())
def test_publish_empty_payload(self):
notify_payload = []
def _memo(resource, event, trigger, payload=None):
notify_payload.append(payload)
self.manager.subscribe(_memo, 'x', 'y')
self.manager.publish('x', 'y', self)
self.assertIsNone(notify_payload[0])
def test_publish_payload(self):
notify_payload = []
def _memo(resource, event, trigger, payload=None):
notify_payload.append(payload)
self.manager.subscribe(_memo, 'x', 'y')
self.manager.publish('x', 'y', self, payload=self.event_payload)
self.assertEqual(self.event_payload, notify_payload[0])
| |
from __future__ import unicode_literals
import os
import re
from unittest import skipUnless
from django.contrib.gis.gdal import HAS_GDAL
from django.core.management import call_command
from django.db import connection, connections
from django.test import TestCase, skipUnlessDBFeature
from django.test.utils import modify_settings
from django.utils.six import StringIO
from ..test_data import TEST_DATA
if HAS_GDAL:
from django.contrib.gis.gdal import Driver, GDALException, GDAL_VERSION
from django.contrib.gis.utils.ogrinspect import ogrinspect
from .models import AllOGRFields
@skipUnless(HAS_GDAL, "InspectDbTests needs GDAL support")
class InspectDbTests(TestCase):
@skipUnlessDBFeature("gis_enabled")
def test_geom_columns(self):
"""
Test the geo-enabled inspectdb command.
"""
out = StringIO()
call_command(
'inspectdb',
table_name_filter=lambda tn: tn == 'inspectapp_allogrfields',
stdout=out
)
output = out.getvalue()
if connection.features.supports_geometry_field_introspection:
self.assertIn('geom = models.PolygonField()', output)
self.assertIn('point = models.PointField()', output)
else:
self.assertIn('geom = models.GeometryField(', output)
self.assertIn('point = models.GeometryField(', output)
self.assertIn('objects = models.GeoManager()', output)
@skipUnlessDBFeature("supports_3d_storage")
def test_3d_columns(self):
out = StringIO()
call_command(
'inspectdb',
table_name_filter=lambda tn: tn == 'inspectapp_fields3d',
stdout=out
)
output = out.getvalue()
if connection.features.supports_geometry_field_introspection:
self.assertIn('point = models.PointField(dim=3)', output)
self.assertIn('line = models.LineStringField(dim=3)', output)
self.assertIn('poly = models.PolygonField(dim=3)', output)
else:
self.assertIn('point = models.GeometryField(', output)
self.assertIn('line = models.GeometryField(', output)
self.assertIn('poly = models.GeometryField(', output)
self.assertIn('objects = models.GeoManager()', output)
@skipUnless(HAS_GDAL, "OGRInspectTest needs GDAL support")
@modify_settings(
INSTALLED_APPS={'append': 'django.contrib.gis'},
)
class OGRInspectTest(TestCase):
maxDiff = 1024
def test_poly(self):
shp_file = os.path.join(TEST_DATA, 'test_poly', 'test_poly.shp')
model_def = ogrinspect(shp_file, 'MyModel')
expected = [
'# This is an auto-generated Django model module created by ogrinspect.',
'from django.contrib.gis.db import models',
'',
'class MyModel(models.Model):',
' float = models.FloatField()',
' int = models.{}()'.format('BigIntegerField' if GDAL_VERSION >= (2, 0) else 'FloatField'),
' str = models.CharField(max_length=80)',
' geom = models.PolygonField(srid=-1)',
' objects = models.GeoManager()',
]
self.assertEqual(model_def, '\n'.join(expected))
def test_poly_multi(self):
shp_file = os.path.join(TEST_DATA, 'test_poly', 'test_poly.shp')
model_def = ogrinspect(shp_file, 'MyModel', multi_geom=True)
self.assertIn('geom = models.MultiPolygonField(srid=-1)', model_def)
# Same test with a 25D-type geometry field
shp_file = os.path.join(TEST_DATA, 'gas_lines', 'gas_leitung.shp')
model_def = ogrinspect(shp_file, 'MyModel', multi_geom=True)
self.assertIn('geom = models.MultiLineStringField(srid=-1)', model_def)
def test_date_field(self):
shp_file = os.path.join(TEST_DATA, 'cities', 'cities.shp')
model_def = ogrinspect(shp_file, 'City')
expected = [
'# This is an auto-generated Django model module created by ogrinspect.',
'from django.contrib.gis.db import models',
'',
'class City(models.Model):',
' name = models.CharField(max_length=80)',
' population = models.{}()'.format('BigIntegerField' if GDAL_VERSION >= (2, 0) else 'FloatField'),
' density = models.FloatField()',
' created = models.DateField()',
' geom = models.PointField(srid=-1)',
' objects = models.GeoManager()',
]
self.assertEqual(model_def, '\n'.join(expected))
def test_time_field(self):
# Getting the database identifier used by OGR, if None returned
# GDAL does not have the support compiled in.
ogr_db = get_ogr_db_string()
if not ogr_db:
self.skipTest("Unable to setup an OGR connection to your database")
try:
# Writing shapefiles via GDAL currently does not support writing OGRTime
# fields, so we need to actually use a database
model_def = ogrinspect(ogr_db, 'Measurement',
layer_key=AllOGRFields._meta.db_table,
decimal=['f_decimal'])
except GDALException:
self.skipTest("Unable to setup an OGR connection to your database")
self.assertTrue(model_def.startswith(
'# This is an auto-generated Django model module created by ogrinspect.\n'
'from django.contrib.gis.db import models\n'
'\n'
'class Measurement(models.Model):\n'
))
# The ordering of model fields might vary depending on several factors (version of GDAL, etc.)
self.assertIn(' f_decimal = models.DecimalField(max_digits=0, decimal_places=0)', model_def)
self.assertIn(' f_int = models.IntegerField()', model_def)
self.assertIn(' f_datetime = models.DateTimeField()', model_def)
self.assertIn(' f_time = models.TimeField()', model_def)
self.assertIn(' f_float = models.FloatField()', model_def)
self.assertIn(' f_char = models.CharField(max_length=10)', model_def)
self.assertIn(' f_date = models.DateField()', model_def)
self.assertIsNotNone(re.search(
r' geom = models.PolygonField\(([^\)])*\)\n' # Some backends may have srid=-1
r' objects = models.GeoManager\(\)', model_def))
def test_management_command(self):
shp_file = os.path.join(TEST_DATA, 'cities', 'cities.shp')
out = StringIO()
call_command('ogrinspect', shp_file, 'City', stdout=out)
output = out.getvalue()
self.assertIn('class City(models.Model):', output)
def get_ogr_db_string():
"""
Construct the DB string that GDAL will use to inspect the database.
GDAL will create its own connection to the database, so we re-use the
connection settings from the Django test.
"""
db = connections.databases['default']
# Map from the django backend into the OGR driver name and database identifier
# http://www.gdal.org/ogr/ogr_formats.html
#
# TODO: Support Oracle (OCI).
drivers = {
'django.contrib.gis.db.backends.postgis': ('PostgreSQL', "PG:dbname='%(db_name)s'", ' '),
'django.contrib.gis.db.backends.mysql': ('MySQL', 'MYSQL:"%(db_name)s"', ','),
'django.contrib.gis.db.backends.spatialite': ('SQLite', '%(db_name)s', '')
}
db_engine = db['ENGINE']
if db_engine not in drivers:
return None
drv_name, db_str, param_sep = drivers[db_engine]
# Ensure that GDAL library has driver support for the database.
try:
Driver(drv_name)
except:
return None
# SQLite/Spatialite in-memory databases
if db['NAME'] == ":memory:":
return None
# Build the params of the OGR database connection string
params = [db_str % {'db_name': db['NAME']}]
def add(key, template):
value = db.get(key, None)
# Don't add the parameter if it is not in django's settings
if value:
params.append(template % value)
add('HOST', "host='%s'")
add('PORT', "port='%s'")
add('USER', "user='%s'")
add('PASSWORD', "password='%s'")
return param_sep.join(params)
| |
"""
Implementation of Harwell-Boeing read/write.
At the moment not the full Harwell-Boeing format is supported. Supported
features are:
- assembled, non-symmetric, real matrices
- integer for pointer/indices
- exponential format for float values, and int format
"""
# TODO:
# - Add more support (symmetric/complex matrices, non-assembled matrices ?)
# XXX: reading is reasonably efficient (>= 85 % is in numpy.fromstring), but
# takes a lot of memory. Being faster would require compiled code.
# write is not efficient. Although not a terribly exciting task,
# having reusable facilities to efficiently read/write fortran-formatted files
# would be useful outside this module.
import warnings
import numpy as np
from scipy.sparse import csc_matrix
from ._fortran_format_parser import FortranFormatParser, IntFormat, ExpFormat
__all__ = ["MalformedHeader", "hb_read", "hb_write", "HBInfo", "HBFile",
"HBMatrixType"]
class MalformedHeader(Exception):
pass
class LineOverflow(Warning):
pass
def _nbytes_full(fmt, nlines):
"""Return the number of bytes to read to get every full lines for the
given parsed fortran format."""
return (fmt.repeat * fmt.width + 1) * (nlines - 1)
class HBInfo:
@classmethod
def from_data(cls, m, title="Default title", key="0", mxtype=None, fmt=None):
"""Create a HBInfo instance from an existing sparse matrix.
Parameters
----------
m : sparse matrix
the HBInfo instance will derive its parameters from m
title : str
Title to put in the HB header
key : str
Key
mxtype : HBMatrixType
type of the input matrix
fmt : dict
not implemented
Returns
-------
hb_info : HBInfo instance
"""
m = m.tocsc(copy=False)
pointer = m.indptr
indices = m.indices
values = m.data
nrows, ncols = m.shape
nnon_zeros = m.nnz
if fmt is None:
# +1 because HB use one-based indexing (Fortran), and we will write
# the indices /pointer as such
pointer_fmt = IntFormat.from_number(np.max(pointer+1))
indices_fmt = IntFormat.from_number(np.max(indices+1))
if values.dtype.kind in np.typecodes["AllFloat"]:
values_fmt = ExpFormat.from_number(-np.max(np.abs(values)))
elif values.dtype.kind in np.typecodes["AllInteger"]:
values_fmt = IntFormat.from_number(-np.max(np.abs(values)))
else:
raise NotImplementedError("type %s not implemented yet" % values.dtype.kind)
else:
raise NotImplementedError("fmt argument not supported yet.")
if mxtype is None:
if not np.isrealobj(values):
raise ValueError("Complex values not supported yet")
if values.dtype.kind in np.typecodes["AllInteger"]:
tp = "integer"
elif values.dtype.kind in np.typecodes["AllFloat"]:
tp = "real"
else:
raise NotImplementedError("type %s for values not implemented"
% values.dtype)
mxtype = HBMatrixType(tp, "unsymmetric", "assembled")
else:
raise ValueError("mxtype argument not handled yet.")
def _nlines(fmt, size):
nlines = size // fmt.repeat
if nlines * fmt.repeat != size:
nlines += 1
return nlines
pointer_nlines = _nlines(pointer_fmt, pointer.size)
indices_nlines = _nlines(indices_fmt, indices.size)
values_nlines = _nlines(values_fmt, values.size)
total_nlines = pointer_nlines + indices_nlines + values_nlines
return cls(title, key,
total_nlines, pointer_nlines, indices_nlines, values_nlines,
mxtype, nrows, ncols, nnon_zeros,
pointer_fmt.fortran_format, indices_fmt.fortran_format,
values_fmt.fortran_format)
@classmethod
def from_file(cls, fid):
"""Create a HBInfo instance from a file object containing a matrix in the
HB format.
Parameters
----------
fid : file-like matrix
File or file-like object containing a matrix in the HB format.
Returns
-------
hb_info : HBInfo instance
"""
# First line
line = fid.readline().strip("\n")
if not len(line) > 72:
raise ValueError("Expected at least 72 characters for first line, "
"got: \n%s" % line)
title = line[:72]
key = line[72:]
# Second line
line = fid.readline().strip("\n")
if not len(line.rstrip()) >= 56:
raise ValueError("Expected at least 56 characters for second line, "
"got: \n%s" % line)
total_nlines = _expect_int(line[:14])
pointer_nlines = _expect_int(line[14:28])
indices_nlines = _expect_int(line[28:42])
values_nlines = _expect_int(line[42:56])
rhs_nlines = line[56:72].strip()
if rhs_nlines == '':
rhs_nlines = 0
else:
rhs_nlines = _expect_int(rhs_nlines)
if not rhs_nlines == 0:
raise ValueError("Only files without right hand side supported for "
"now.")
# Third line
line = fid.readline().strip("\n")
if not len(line) >= 70:
raise ValueError("Expected at least 72 character for third line, got:\n"
"%s" % line)
mxtype_s = line[:3].upper()
if not len(mxtype_s) == 3:
raise ValueError("mxtype expected to be 3 characters long")
mxtype = HBMatrixType.from_fortran(mxtype_s)
if mxtype.value_type not in ["real", "integer"]:
raise ValueError("Only real or integer matrices supported for "
"now (detected %s)" % mxtype)
if not mxtype.structure == "unsymmetric":
raise ValueError("Only unsymmetric matrices supported for "
"now (detected %s)" % mxtype)
if not mxtype.storage == "assembled":
raise ValueError("Only assembled matrices supported for now")
if not line[3:14] == " " * 11:
raise ValueError("Malformed data for third line: %s" % line)
nrows = _expect_int(line[14:28])
ncols = _expect_int(line[28:42])
nnon_zeros = _expect_int(line[42:56])
nelementals = _expect_int(line[56:70])
if not nelementals == 0:
raise ValueError("Unexpected value %d for nltvl (last entry of line 3)"
% nelementals)
# Fourth line
line = fid.readline().strip("\n")
ct = line.split()
if not len(ct) == 3:
raise ValueError("Expected 3 formats, got %s" % ct)
return cls(title, key,
total_nlines, pointer_nlines, indices_nlines, values_nlines,
mxtype, nrows, ncols, nnon_zeros,
ct[0], ct[1], ct[2],
rhs_nlines, nelementals)
def __init__(self, title, key,
total_nlines, pointer_nlines, indices_nlines, values_nlines,
mxtype, nrows, ncols, nnon_zeros,
pointer_format_str, indices_format_str, values_format_str,
right_hand_sides_nlines=0, nelementals=0):
"""Do not use this directly, but the class ctrs (from_* functions)."""
self.title = title
self.key = key
if title is None:
title = "No Title"
if len(title) > 72:
raise ValueError("title cannot be > 72 characters")
if key is None:
key = "|No Key"
if len(key) > 8:
warnings.warn("key is > 8 characters (key is %s)" % key, LineOverflow)
self.total_nlines = total_nlines
self.pointer_nlines = pointer_nlines
self.indices_nlines = indices_nlines
self.values_nlines = values_nlines
parser = FortranFormatParser()
pointer_format = parser.parse(pointer_format_str)
if not isinstance(pointer_format, IntFormat):
raise ValueError("Expected int format for pointer format, got %s"
% pointer_format)
indices_format = parser.parse(indices_format_str)
if not isinstance(indices_format, IntFormat):
raise ValueError("Expected int format for indices format, got %s" %
indices_format)
values_format = parser.parse(values_format_str)
if isinstance(values_format, ExpFormat):
if mxtype.value_type not in ["real", "complex"]:
raise ValueError("Inconsistency between matrix type %s and "
"value type %s" % (mxtype, values_format))
values_dtype = np.float64
elif isinstance(values_format, IntFormat):
if mxtype.value_type not in ["integer"]:
raise ValueError("Inconsistency between matrix type %s and "
"value type %s" % (mxtype, values_format))
# XXX: fortran int -> dtype association ?
values_dtype = int
else:
raise ValueError("Unsupported format for values %r" % (values_format,))
self.pointer_format = pointer_format
self.indices_format = indices_format
self.values_format = values_format
self.pointer_dtype = np.int32
self.indices_dtype = np.int32
self.values_dtype = values_dtype
self.pointer_nlines = pointer_nlines
self.pointer_nbytes_full = _nbytes_full(pointer_format, pointer_nlines)
self.indices_nlines = indices_nlines
self.indices_nbytes_full = _nbytes_full(indices_format, indices_nlines)
self.values_nlines = values_nlines
self.values_nbytes_full = _nbytes_full(values_format, values_nlines)
self.nrows = nrows
self.ncols = ncols
self.nnon_zeros = nnon_zeros
self.nelementals = nelementals
self.mxtype = mxtype
def dump(self):
"""Gives the header corresponding to this instance as a string."""
header = [self.title.ljust(72) + self.key.ljust(8)]
header.append("%14d%14d%14d%14d" %
(self.total_nlines, self.pointer_nlines,
self.indices_nlines, self.values_nlines))
header.append("%14s%14d%14d%14d%14d" %
(self.mxtype.fortran_format.ljust(14), self.nrows,
self.ncols, self.nnon_zeros, 0))
pffmt = self.pointer_format.fortran_format
iffmt = self.indices_format.fortran_format
vffmt = self.values_format.fortran_format
header.append("%16s%16s%20s" %
(pffmt.ljust(16), iffmt.ljust(16), vffmt.ljust(20)))
return "\n".join(header)
def _expect_int(value, msg=None):
try:
return int(value)
except ValueError as e:
if msg is None:
msg = "Expected an int, got %s"
raise ValueError(msg % value) from e
def _read_hb_data(content, header):
# XXX: look at a way to reduce memory here (big string creation)
ptr_string = "".join([content.read(header.pointer_nbytes_full),
content.readline()])
ptr = np.fromstring(ptr_string,
dtype=int, sep=' ')
ind_string = "".join([content.read(header.indices_nbytes_full),
content.readline()])
ind = np.fromstring(ind_string,
dtype=int, sep=' ')
val_string = "".join([content.read(header.values_nbytes_full),
content.readline()])
val = np.fromstring(val_string,
dtype=header.values_dtype, sep=' ')
try:
return csc_matrix((val, ind-1, ptr-1),
shape=(header.nrows, header.ncols))
except ValueError as e:
raise e
def _write_data(m, fid, header):
m = m.tocsc(copy=False)
def write_array(f, ar, nlines, fmt):
# ar_nlines is the number of full lines, n is the number of items per
# line, ffmt the fortran format
pyfmt = fmt.python_format
pyfmt_full = pyfmt * fmt.repeat
# for each array to write, we first write the full lines, and special
# case for partial line
full = ar[:(nlines - 1) * fmt.repeat]
for row in full.reshape((nlines-1, fmt.repeat)):
f.write(pyfmt_full % tuple(row) + "\n")
nremain = ar.size - full.size
if nremain > 0:
f.write((pyfmt * nremain) % tuple(ar[ar.size - nremain:]) + "\n")
fid.write(header.dump())
fid.write("\n")
# +1 is for Fortran one-based indexing
write_array(fid, m.indptr+1, header.pointer_nlines,
header.pointer_format)
write_array(fid, m.indices+1, header.indices_nlines,
header.indices_format)
write_array(fid, m.data, header.values_nlines,
header.values_format)
class HBMatrixType:
"""Class to hold the matrix type."""
# q2f* translates qualified names to Fortran character
_q2f_type = {
"real": "R",
"complex": "C",
"pattern": "P",
"integer": "I",
}
_q2f_structure = {
"symmetric": "S",
"unsymmetric": "U",
"hermitian": "H",
"skewsymmetric": "Z",
"rectangular": "R"
}
_q2f_storage = {
"assembled": "A",
"elemental": "E",
}
_f2q_type = dict([(j, i) for i, j in _q2f_type.items()])
_f2q_structure = dict([(j, i) for i, j in _q2f_structure.items()])
_f2q_storage = dict([(j, i) for i, j in _q2f_storage.items()])
@classmethod
def from_fortran(cls, fmt):
if not len(fmt) == 3:
raise ValueError("Fortran format for matrix type should be 3 "
"characters long")
try:
value_type = cls._f2q_type[fmt[0]]
structure = cls._f2q_structure[fmt[1]]
storage = cls._f2q_storage[fmt[2]]
return cls(value_type, structure, storage)
except KeyError as e:
raise ValueError("Unrecognized format %s" % fmt) from e
def __init__(self, value_type, structure, storage="assembled"):
self.value_type = value_type
self.structure = structure
self.storage = storage
if value_type not in self._q2f_type:
raise ValueError("Unrecognized type %s" % value_type)
if structure not in self._q2f_structure:
raise ValueError("Unrecognized structure %s" % structure)
if storage not in self._q2f_storage:
raise ValueError("Unrecognized storage %s" % storage)
@property
def fortran_format(self):
return self._q2f_type[self.value_type] + \
self._q2f_structure[self.structure] + \
self._q2f_storage[self.storage]
def __repr__(self):
return "HBMatrixType(%s, %s, %s)" % \
(self.value_type, self.structure, self.storage)
class HBFile:
def __init__(self, file, hb_info=None):
"""Create a HBFile instance.
Parameters
----------
file : file-object
StringIO work as well
hb_info : HBInfo, optional
Should be given as an argument for writing, in which case the file
should be writable.
"""
self._fid = file
if hb_info is None:
self._hb_info = HBInfo.from_file(file)
else:
#raise OSError("file %s is not writable, and hb_info "
# "was given." % file)
self._hb_info = hb_info
@property
def title(self):
return self._hb_info.title
@property
def key(self):
return self._hb_info.key
@property
def type(self):
return self._hb_info.mxtype.value_type
@property
def structure(self):
return self._hb_info.mxtype.structure
@property
def storage(self):
return self._hb_info.mxtype.storage
def read_matrix(self):
return _read_hb_data(self._fid, self._hb_info)
def write_matrix(self, m):
return _write_data(m, self._fid, self._hb_info)
def hb_read(path_or_open_file):
"""Read HB-format file.
Parameters
----------
path_or_open_file : path-like or file-like
If a file-like object, it is used as-is. Otherwise, it is opened
before reading.
Returns
-------
data : scipy.sparse.csc_matrix instance
The data read from the HB file as a sparse matrix.
Notes
-----
At the moment not the full Harwell-Boeing format is supported. Supported
features are:
- assembled, non-symmetric, real matrices
- integer for pointer/indices
- exponential format for float values, and int format
Examples
--------
We can read and write a harwell-boeing format file:
>>> from scipy.io.harwell_boeing import hb_read, hb_write
>>> from scipy.sparse import csr_matrix, eye
>>> data = csr_matrix(eye(3)) # create a sparse matrix
>>> hb_write("data.hb", data) # write a hb file
>>> print(hb_read("data.hb")) # read a hb file
(0, 0) 1.0
(1, 1) 1.0
(2, 2) 1.0
"""
def _get_matrix(fid):
hb = HBFile(fid)
return hb.read_matrix()
if hasattr(path_or_open_file, 'read'):
return _get_matrix(path_or_open_file)
else:
with open(path_or_open_file) as f:
return _get_matrix(f)
def hb_write(path_or_open_file, m, hb_info=None):
"""Write HB-format file.
Parameters
----------
path_or_open_file : path-like or file-like
If a file-like object, it is used as-is. Otherwise, it is opened
before writing.
m : sparse-matrix
the sparse matrix to write
hb_info : HBInfo
contains the meta-data for write
Returns
-------
None
Notes
-----
At the moment not the full Harwell-Boeing format is supported. Supported
features are:
- assembled, non-symmetric, real matrices
- integer for pointer/indices
- exponential format for float values, and int format
Examples
--------
We can read and write a harwell-boeing format file:
>>> from scipy.io.harwell_boeing import hb_read, hb_write
>>> from scipy.sparse import csr_matrix, eye
>>> data = csr_matrix(eye(3)) # create a sparse matrix
>>> hb_write("data.hb", data) # write a hb file
>>> print(hb_read("data.hb")) # read a hb file
(0, 0) 1.0
(1, 1) 1.0
(2, 2) 1.0
"""
m = m.tocsc(copy=False)
if hb_info is None:
hb_info = HBInfo.from_data(m)
def _set_matrix(fid):
hb = HBFile(fid, hb_info)
return hb.write_matrix(m)
if hasattr(path_or_open_file, 'write'):
return _set_matrix(path_or_open_file)
else:
with open(path_or_open_file, 'w') as f:
return _set_matrix(f)
| |
import json
import os
import re
from django.core.urlresolvers import reverse
from django.test import TestCase
from corehq.apps.app_manager.tests.util import add_build
from corehq.apps.app_manager.util import new_careplan_module
from corehq.apps.app_manager.views import AppSummaryView
from corehq.apps.builds.models import BuildSpec
from corehq import toggles
from corehq.apps.users.models import WebUser
from corehq.apps.domain.models import Domain
from corehq.apps.app_manager.models import (
AdvancedModule,
Application,
Module,
ReportModule,
ShadowModule,
)
from .test_form_versioning import BLANK_TEMPLATE
class TestViews(TestCase):
app = None
build = None
@classmethod
def setUpClass(cls):
cls.domain = Domain(name='app-manager-testviews-domain', is_active=True)
cls.domain.save()
cls.username = 'cornelius'
cls.password = 'fudge'
cls.user = WebUser.create(cls.domain.name, cls.username, cls.password, is_active=True)
cls.user.is_superuser = True
cls.user.save()
cls.build = add_build(version='2.7.0', build_number=20655)
cls.app = Application.new_app(cls.domain.name, "TestApp")
cls.app.build_spec = BuildSpec.from_string('2.7.0/latest')
toggles.CUSTOM_PROPERTIES.set("domain:{domain}".format(domain=cls.domain.name), True)
def setUp(self):
self.client.login(username=self.username, password=self.password)
@classmethod
def tearDownClass(cls):
cls.user.delete()
cls.build.delete()
if cls.app:
cls.app.delete()
cls.domain.delete()
def test_download_file_bad_xform_404(self):
'''
This tests that the `download_file` view returns
HTTP code 404 for XML that cannot be generated...
in some sense it does not exist.
'''
module = self.app.add_module(Module.new_module("Module0", "en"))
# These builds are checked in to the repo for use in tests
build1 = {'version': '1.2.dev', 'build_number': 7106}
build2 = {'version': '2.7.0', 'build_number': 20655}
add_build(**build1)
add_build(**build2)
with open(os.path.join(os.path.dirname(__file__), "data", "invalid_form.xml")) as f:
xform_str = f.read()
self.app.new_form(module.id, name="Form0-0", attachment=xform_str, lang="en")
self.app.save()
response = self.client.get(reverse('app_download_file', kwargs=dict(domain=self.domain.name,
app_id=self.app.get_id,
path='modules-0/forms-0.xml')))
self.assertEqual(response.status_code, 404)
def test_edit_commcare_profile(self):
app = Application.new_app(self.domain.name, "TestApp")
app.save()
data = {
"custom_properties": {
"random": "value",
"another": "value"
}
}
response = self.client.post(reverse('edit_commcare_profile', args=[self.domain.name, app._id]),
json.dumps(data),
content_type='application/json')
content = json.loads(response.content)
custom_properties = content["changed"]["custom_properties"]
self.assertEqual(custom_properties["random"], "value")
self.assertEqual(custom_properties["another"], "value")
data = {
"custom_properties": {
"random": "changed",
}
}
response = self.client.post(reverse('edit_commcare_profile', args=[self.domain.name, app._id]),
json.dumps(data),
content_type='application/json')
content = json.loads(response.content)
custom_properties = content["changed"]["custom_properties"]
self.assertEqual(custom_properties["random"], "changed")
def _test_status_codes(self, names, kwargs, follow=False):
for name in names:
response = self.client.get(reverse(name, kwargs=kwargs), follow=follow)
self.assertEqual(response.status_code, 200)
def _json_content_from_get(self, name, kwargs, data={}):
response = self.client.get(reverse(name, kwargs=kwargs), data)
self.assertEqual(response.status_code, 200)
return json.loads(response.content)
def test_basic_app(self):
module = self.app.add_module(Module.new_module("Module0", "en"))
form = self.app.new_form(module.id, "Form0", "en", attachment=BLANK_TEMPLATE.format(xmlns='xmlns-0.0'))
self.app.save()
kwargs = {
'domain': self.domain.name,
'app_id': self.app.id,
}
self._test_status_codes([
'view_app',
'release_manager',
AppSummaryView.urlname,
], kwargs)
self.build = self.app.make_build()
self.build.save()
content = self._json_content_from_get('current_app_version', {
'domain': self.domain.name,
'app_id': self.app.id,
})
self.assertEqual(content['currentVersion'], 2)
content = self._json_content_from_get('paginate_releases', {
'domain': self.domain.name,
'app_id': self.app.id,
}, {'limit': 5})
self.assertEqual(len(content), 1)
content = content[0]
self.assertEqual(content['copy_of'], self.app.id)
kwargs['module_id'] = module.id
self._test_status_codes(['view_module'], kwargs)
kwargs['form_id'] = form.id
self._test_status_codes(['view_form', 'form_source'], kwargs)
def test_advanced_module(self):
module = self.app.add_module(AdvancedModule.new_module("Module0", "en"))
self.app.save()
self._test_status_codes(['view_module'], {
'domain': self.domain.name,
'app_id': self.app.id,
'module_id': module.id,
})
def test_report_module(self):
module = self.app.add_module(ReportModule.new_module("Module0", "en"))
self.app.save()
self._test_status_codes(['view_module'], {
'domain': self.domain.name,
'app_id': self.app.id,
'module_id': module.id,
})
def test_shadow_module(self):
module = self.app.add_module(ShadowModule.new_module("Module0", "en"))
self.app.save()
self._test_status_codes(['view_module'], {
'domain': self.domain.name,
'app_id': self.app.id,
'module_id': module.id,
})
def test_careplan_module(self):
target_module = self.app.add_module(Module.new_module("Module0", "en"))
target_module.case_type = 'person'
module = new_careplan_module(self.app, 'Module1', 'en', target_module)
self.app.save()
self._test_status_codes(['view_module'], {
'domain': self.domain.name,
'app_id': self.app.id,
'module_id': module.id,
})
def test_dashboard(self):
self._test_status_codes(['dashboard_new_user'], {
'domain': self.domain.name,
})
# This redirects to the dashboard
self._test_status_codes(['default_app'], {
'domain': self.domain.name,
}, True)
class TestTemplateAppViews(TestCase):
@classmethod
def setUpClass(cls):
cls.domain = Domain(name='template-app-testviews-domain', is_active=True)
cls.domain.save()
cls.username = 'cornelius'
cls.password = 'fudge'
cls.user = WebUser.create(cls.domain.name, cls.username, cls.password, is_active=True)
cls.user.is_superuser = True
cls.user.save()
def setUp(self):
self.client.login(username=self.username, password=self.password)
@classmethod
def tearDownClass(cls):
cls.user.delete()
cls.domain.delete()
def _check_response(self, response):
self.assertEqual(response.status_code, 302)
redirect_location = response['Location']
[app_id] = re.compile(r'[a-fA-F0-9]{32}').findall(redirect_location)
expected = '{}/modules-0/forms-0/'.format(app_id)
self.assertTrue(redirect_location.endswith(expected))
self.addCleanup(lambda: Application.get_db().delete_doc(app_id))
def test_case_management_app_from_template(self):
response = self.client.get(reverse('app_from_template', kwargs={
'domain': self.domain.name,
'slug': 'case_management'
}), follow=False)
self._check_response(response)
def test_survey_app_from_template(self):
response = self.client.get(reverse('app_from_template', kwargs={
'domain': self.domain.name,
'slug': 'survey'
}), follow=False)
self._check_response(response)
def test_default_new_app(self):
response = self.client.get(reverse('default_new_app', kwargs={
'domain': self.domain.name,
}), follow=False)
self._check_response(response)
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Testing topi conv2d operator for VTA"""
import json
import os
import pytest
import numpy as np
from collections import namedtuple
import tvm
from tvm import te
from tvm import relay
from tvm import autotvm
from tvm.contrib import util
from tvm.contrib.pickle_memoize import memoize
from tvm import topi
import tvm.topi.testing
import vta
from vta import program_fpga, reconfig_runtime
import vta.testing
from vta.testing import simulator
Workload = namedtuple(
"Conv2DWorkload",
[
"batch",
"height",
"width",
"in_filter",
"out_filter",
"hkernel",
"wkernel",
"hpad",
"wpad",
"hstride",
"wstride",
],
)
# Get batch info from env
env = vta.get_env()
# ResNet18 workloads
resnet_wkls = [
# Workloads of resnet18 on imagenet
# ('resnet-18.C1', Workload(env.BATCH, 224, 224, 3, 64, 7, 7, 3, 3, 2, 2)),
("resnet-18.C2", Workload(env.BATCH, 56, 56, 64, 64, 3, 3, 1, 1, 1, 1)),
("resnet-18.C3", Workload(env.BATCH, 56, 56, 64, 128, 3, 3, 1, 1, 2, 2)),
("resnet-18.C4", Workload(env.BATCH, 56, 56, 64, 128, 1, 1, 0, 0, 2, 2)),
("resnet-18.C5", Workload(env.BATCH, 28, 28, 128, 128, 3, 3, 1, 1, 1, 1)),
("resnet-18.C6", Workload(env.BATCH, 28, 28, 128, 256, 3, 3, 1, 1, 2, 2)),
("resnet-18.C7", Workload(env.BATCH, 28, 28, 128, 256, 1, 1, 0, 0, 2, 2)),
("resnet-18.C8", Workload(env.BATCH, 14, 14, 256, 256, 3, 3, 1, 1, 1, 1)),
("resnet-18.C9", Workload(env.BATCH, 14, 14, 256, 512, 3, 3, 1, 1, 2, 2)),
("resnet-18.C10", Workload(env.BATCH, 14, 14, 256, 512, 1, 1, 0, 0, 2, 2)),
("resnet-18.C11", Workload(env.BATCH, 7, 7, 512, 512, 3, 3, 1, 1, 1, 1)),
]
# FIXME: we need a custom clip operator to circumvent a pattern detection limitation
@tvm.te.tag_scope(tag=topi.tag.ELEMWISE)
def my_clip(x, a_min, a_max):
"""Unlike topi's current clip, put min and max into two stages."""
const_min = tvm.tir.const(a_min, x.dtype)
const_max = tvm.tir.const(a_max, x.dtype)
x = te.compute(x.shape, lambda *i: tvm.te.min(x(*i), const_max), name="clipA")
x = te.compute(x.shape, lambda *i: tvm.te.max(x(*i), const_min), name="clipB")
return x
def run_conv2d(env, remote, wl, target, check_correctness=True, print_ir=False, samples=4):
# Workload assertions
assert wl.hpad == wl.wpad
# Perform packing only if we are targeting the accelerator
if "arm_cpu" in target.keys:
data_pack = False
layout = "NCHW"
conv2d_fcompute = topi.arm_cpu.conv2d_nchw_spatial_pack
conv2d_fschedule = topi.arm_cpu.schedule_conv2d_nchw_spatial_pack
elif "vta" in target.keys:
data_pack = True
layout = "NCHW%dn%dc" % (env.BATCH, env.BLOCK_IN)
conv2d_fcompute = vta.top.conv2d_packed
conv2d_fschedule = vta.top.schedule_conv2d_packed
# Derive shapes depending upon packing
a_shape = (wl.batch, wl.in_filter, wl.height, wl.width)
w_shape = (wl.out_filter, wl.in_filter, wl.hkernel, wl.wkernel)
b_shape = (wl.batch, wl.out_filter, 1, 1)
if data_pack:
data_shape = (
wl.batch // env.BATCH,
wl.in_filter // env.BLOCK_IN,
wl.height,
wl.width,
env.BATCH,
env.BLOCK_IN,
)
kernel_shape = (
wl.out_filter // env.BLOCK_OUT,
wl.in_filter // env.BLOCK_IN,
wl.hkernel,
wl.wkernel,
env.BLOCK_OUT,
env.BLOCK_IN,
)
bias_shape = (
wl.batch // env.BATCH,
wl.out_filter // env.BLOCK_OUT,
1,
1,
env.BATCH,
env.BLOCK_OUT,
)
else:
data_shape = a_shape
kernel_shape = w_shape
bias_shape = b_shape
data = te.placeholder(data_shape, name="data", dtype=env.inp_dtype)
kernel = te.placeholder(kernel_shape, name="kernel", dtype=env.wgt_dtype)
bias = te.placeholder(bias_shape, name="bias", dtype=env.acc_dtype)
padding = relay.nn.get_pad_tuple2d((wl.hpad, wl.wpad))
# Define base computation schedule
with target:
if data_pack:
res = conv2d_fcompute(
data, kernel, (wl.hstride, wl.wstride), padding, (1, 1), layout, env.acc_dtype
)
else:
res = conv2d_fcompute(
data, kernel, (wl.hstride, wl.wstride), padding, (1, 1), env.acc_dtype
)
res = topi.right_shift(res, 8)
res = topi.add(res, bias)
res = my_clip(res, 0, (1 << env.OUT_WIDTH - 1) - 1)
res = topi.cast(res, env.out_dtype)
# Derive base schedule
s = conv2d_fschedule([res])
if print_ir:
print(vta.lower(s, [data, kernel, bias, res], simple_mode=True))
# Derive number of ops
fout_height = (wl.height + 2 * wl.hpad - wl.hkernel) // wl.hstride + 1
fout_width = (wl.width + 2 * wl.wpad - wl.wkernel) // wl.wstride + 1
num_ops = (
2
* wl.batch
* fout_height
* fout_width
* wl.hkernel
* wl.wkernel
* wl.out_filter
* wl.in_filter
)
# @memoize("vta.tests.test_benchmark_topi.conv2d.verify_nhwc")
def get_ref_data():
# derive min max for act, wgt, and bias types (max non inclusive)
a_min, a_max = 0 - (1 << (env.INP_WIDTH - 1)), (1 << (env.INP_WIDTH - 1))
w_min, w_max = 0 - (1 << (env.WGT_WIDTH - 1)), (1 << (env.WGT_WIDTH - 1))
b_min, b_max = 0 - 1 << (env.INP_WIDTH + env.WGT_WIDTH - 2), 1 << (
env.INP_WIDTH + env.WGT_WIDTH - 2
)
a_np = np.random.randint(a_min, a_max, size=a_shape).astype(data.dtype)
w_np = np.random.randint(w_min, w_max, size=w_shape).astype(kernel.dtype)
b_np = np.random.randint(b_min, b_max, size=b_shape).astype(env.acc_dtype)
r_np = tvm.topi.testing.conv2d_nchw_python(
a_np.astype(env.acc_dtype),
w_np.astype(env.acc_dtype),
(wl.hstride, wl.wstride),
wl.hpad,
).astype(env.acc_dtype)
return a_np, w_np, b_np, r_np
# Data in original format
data_np, kernel_np, bias_np, res_ref = get_ref_data()
if data_pack:
data_np = data_np.reshape(
wl.batch // env.BATCH,
env.BATCH,
wl.in_filter // env.BLOCK_IN,
env.BLOCK_IN,
wl.height,
wl.width,
).transpose((0, 2, 4, 5, 1, 3))
kernel_np = kernel_np.reshape(
wl.out_filter // env.BLOCK_OUT,
env.BLOCK_OUT,
wl.in_filter // env.BLOCK_IN,
env.BLOCK_IN,
wl.hkernel,
wl.wkernel,
).transpose((0, 2, 4, 5, 1, 3))
bias_np = bias_np.reshape(
wl.batch // env.BATCH, wl.out_filter // env.BLOCK_OUT, 1, 1, env.BATCH, env.BLOCK_OUT
)
# Build
if "vta" in target.keys:
mod = vta.build(
s, [data, kernel, bias, res], target=target, target_host=env.target_host, name="conv2d"
)
else:
mod = tvm.build(
s, [data, kernel, bias, res], target=target, target_host=env.target_host, name="conv2d"
)
temp = util.tempdir()
mod.save(temp.relpath("conv2d.o"))
remote.upload(temp.relpath("conv2d.o"))
f = remote.load_module("conv2d.o")
ctx = remote.context(str(target))
res_np = np.zeros(topi.util.get_const_tuple(res.shape)).astype(res.dtype)
data_arr = tvm.nd.array(data_np, ctx)
kernel_arr = tvm.nd.array(kernel_np, ctx)
bias_arr = tvm.nd.array(bias_np, ctx)
res_arr = tvm.nd.array(res_np, ctx)
time_f = f.time_evaluator("conv2d", ctx, number=samples)
# In vta sim mode, collect simulator runtime statistics
stats = {}
cost = None
if env.TARGET in ["sim", "tsim"]:
# Check if we're in local RPC mode (allows us to rebuild the
# runtime on the fly when varying the VTA designs)
local_rpc = int(os.environ.get("VTA_LOCAL_SIM_RPC", "0"))
if local_rpc:
if env.TARGET == "sim":
remote.get_function("vta.simulator.profiler_clear")()
else:
remote.get_function("vta.tsim.profiler_clear")()
cost = time_f(data_arr, kernel_arr, bias_arr, res_arr)
if env.TARGET == "sim":
stats = json.loads(remote.get_function("vta.simulator.profiler_status")())
else:
stats = json.loads(remote.get_function("vta.tsim.profiler_status")())
else:
simulator.clear_stats()
cost = time_f(data_arr, kernel_arr, bias_arr, res_arr)
stats = simulator.stats()
else:
cost = time_f(data_arr, kernel_arr, bias_arr, res_arr)
# Check correctness
correct = False
if check_correctness:
res_orig = res_arr.asnumpy()
if data_pack:
res_orig = res_orig.transpose((0, 4, 1, 5, 2, 3)).reshape(
wl.batch, wl.out_filter, fout_height, fout_width
)
bias_np = bias_np.transpose((0, 4, 1, 5, 2, 3)).reshape(wl.batch, wl.out_filter, 1, 1)
res_ref = res_ref >> env.WGT_WIDTH
res_ref += bias_np
res_ref = np.clip(res_ref, 0, (1 << env.OUT_WIDTH - 1) - 1)
res_ref = res_ref.astype(env.out_dtype)
correct = np.allclose(res_orig, res_ref)
gops = (num_ops / cost.mean) / float(10 ** 9)
status = "PASSED" if correct else "FAILED"
if "arm_cpu" in target.keys:
device = "CPU"
elif "vta" in target.keys:
device = "VTA"
print("%s CONV2D TEST %s: Time cost = %g sec/op, %g GOPS" % (device, status, cost.mean, gops))
return correct, cost, stats
@pytest.mark.parametrize("device", ["vta", "arm_cpu"])
def test_conv2d(device):
def _run(env, remote):
if device == "vta":
target = env.target
if env.TARGET not in ["sim", "tsim"]:
assert tvm.runtime.enabled("rpc")
program_fpga(remote, bitstream=None)
reconfig_runtime(remote)
elif device == "arm_cpu":
target = env.target_vta_cpu
with autotvm.tophub.context(target): # load pre-tuned schedule parameters
for _, wl in resnet_wkls:
print(wl)
run_conv2d(env, remote, wl, target)
vta.testing.run(_run)
if __name__ == "__main__":
test_conv2d(device="arm_cpu")
test_conv2d(device="vta")
| |
from django.conf import settings
from django.core.cache import cache
from django.utils.cache import patch_response_headers, add_never_cache_headers, get_max_age
from django.utils.decorators import method_decorator
from django.utils.functional import wraps
from django_cache_middleware.decorators.utils import _combine_functions, _quotify_function
def add_cache_headers(cache_timeout=None, method=False):
"""
Adds some useful headers to the given HttpResponse object:
ETag, Last-Modified, Expires and Cache-Control
Each header is only added if it isn't already set.
cache_timeout is in seconds.
The CACHE_MIDDLEWARE_SECONDS setting is used by default.
"""
# Allow the decorator to be called with or without
# the cache_timeout argument.
if callable(cache_timeout):
viewfunc = cache_timeout
cache_timeout = None
else:
viewfunc = None
def decorator(viewfunc):
@wraps(viewfunc)
def patched_view(request, *args, **kwargs):
if settings.CACHE_MIDDLEWARE_ANONYMOUS_ONLY:
# If the site is using the cache middleware, and it is set
# to only cache for anonymous users, then we will check their
# authentication status. This works because the middleware
# supports the vary_on_view decorators.
authenticated = request.user.is_authenticated()
else:
# This is the normal use-case. We want to avoid accessing the
# request.user object as much as possible. Set authenticated
# to None to indicate that we did not even check.
authenticated = None
if authenticated is False and not hasattr(viewfunc, '_vary_on_view'):
# If we did check the user's authentication status, and found
# that they are anonymous, then we want ensure that we are
# using a "vary on view" decorator. If it was not defined
# specifically on the view, then we can assume that we just
# need to cache all anonymous users exactly the same. For this,
# the vary_on_authentication_status can be used.
response = vary_on_authentication_status(viewfunc)(request, *args, **kwargs)
else:
# If we did not check the authentication status, or we did
# check and found that they are authenticated, then we can
# just use the view as it is.
response = viewfunc(request, *args, **kwargs)
if request.method != 'GET':
return response
if response.status_code != 200:
return response
if request.is_secure():
return response
if authenticated is True:
# We checked if the user is authenticated, and they were,
# so do not cache the response.
add_never_cache_headers(response)
else:
# Either the user is not authenticated, or we did not check.
# In either case, add the caching headers.
patch_response_headers(response, cache_timeout=cache_timeout)
return response
return patched_view
if method:
decorator = method_decorator(decorator)
if viewfunc:
return decorator(viewfunc)
else:
return decorator
def simple_response_cache(view_func):
"""
This does not use any site-specific cache keys, so only use it where the
response does not vary by site. Place this above add_cache_headers
when decorating a view.
"""
@wraps(view_func)
def wrapped_view(request, *args, **kwargs):
# Build a cache key using only the view function and the arguments;
# nothing related to the request or current site.
view_path = '.'.join((view_func.__module__, view_func.__name__))
signature = HashableTuple((args, kwargs)).hash
cache_key = ':'.join((view_path, signature))
# Retrieve the response from the cache, or generate a new one.
if getattr(request, '_purging', False):
response = None
else:
response = cache.get(cache_key)
if response is None:
response = view_func(request, *args, **kwargs)
max_age = get_max_age(response)
if max_age is not None:
cache.set(cache_key, response, max_age)
else:
response['X-From-Cache'] = True
# Stop the middleware from caching it too.
request._cache_update_cache = False
return response
return wrapped_view
def cache_upstream(cache_timeout=None):
"""
Add HTTP cache headers to responses, but do not actually cache them.
Use this to cache a URL in Varnish but not the page-level cache.
"""
# Allow the decorator to be called with or without
# the cache_timeout argument.
if callable(cache_timeout):
viewfunc = cache_timeout
cache_timeout = None
else:
viewfunc = None
def decorator(viewfunc):
@wraps(viewfunc)
def patched_view(request, *args, **kwargs):
response = viewfunc(request, *args, **kwargs)
patch_response_headers(response, cache_timeout=cache_timeout)
request._cache_update_cache = False
return response
return patched_view
if viewfunc:
return decorator(viewfunc)
else:
return decorator
def vary_on_view(value_func):
"""
A view decorator that allows the cache middleware to cache responses on
a per-request basis, using the result of the value_func to generate the
response's cache key.
The argument "value_func" must be a view-like function that accepts the
same arguments as the view that is being decorated. It must return a value
to be used in the response's cache key.
This decorator adds a custom response header, which the cache middleware
will use when generating the response's cache key.
If the response has Vary:Cookie set in its headers (it probably will if
vary_on_view is needed), the cache middleware will ignore the cookie value
when generating the response's cache key. The actual response is not
affected, which allows the client to do its own caching as usual.
"""
value_func = _quotify_function(value_func)
def decorator(func):
# Build the decorated view that adds the custom response header.
def inner(request, *args, **kwargs):
response = func(request, *args, **kwargs)
if response.status_code == 200:
if not response.has_header('X-Vary-On-View'):
response['X-Vary-On-View'] = ''
response['X-Vary-On-View'] += value_func(request, *args, **kwargs)
return response
# Attach the value_func to the view for the middlware to use. Support
# nested usage of this decorator by making it return a combined list.
if hasattr(func, '_vary_on_view'):
inner._vary_on_view = _combine_functions(func._vary_on_view, value_func)
else:
inner._vary_on_view = value_func
return inner
return decorator
@vary_on_view
def vary_on_authentication_status(request, *args, **kwargs):
"""
All authenticated users share a single cached response.
All anonymous users share a single cached response.
"""
return request.user.is_authenticated()
@vary_on_view
def vary_on_staff_status(request, *args, **kwargs):
"""
All staff members share a single cached response.
All other users share a single cached response.
"""
return request.user.is_staff
@vary_on_view
def vary_on_user_type(request, *args, **kwargs):
"""
All staff members share a single cached response.
All regular users share a single cached response.
All anonymous users share a single cached response.
"""
return int(request.user.is_authenticated()) + int(request.user.is_staff)
@vary_on_view
def vary_on_user_id(request, *args, **kwargs):
"""
Authenticated users each have their own cached response.
All anonymous users share a single cached response.
Avoids caching individual responses for anonymous users when
your view gives all anonymous users the same response.
"""
if request.user.is_authenticated():
return request.user.id
else:
return 0
| |
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test class for Management Interface used by iLO modules."""
from unittest import mock
from ironic.common import exception
from ironic.common import states
from ironic.common import utils
from ironic.conductor import task_manager
from ironic.conductor import utils as conductor_utils
from ironic.drivers.modules.ilo import common as ilo_common
from ironic.drivers.modules.ilo import inspect as ilo_inspect
from ironic.drivers.modules.ilo import power as ilo_power
from ironic.drivers.modules import inspect_utils
from ironic.tests.unit.drivers.modules.ilo import test_common
class IloInspectTestCase(test_common.BaseIloTest):
def test_get_properties(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
properties = ilo_common.REQUIRED_PROPERTIES.copy()
properties.update(ilo_common.SNMP_PROPERTIES)
properties.update(ilo_common.SNMP_OPTIONAL_PROPERTIES)
self.assertEqual(properties,
task.driver.inspect.get_properties())
@mock.patch.object(ilo_common, 'parse_driver_info', spec_set=True,
autospec=True)
def test_validate(self, driver_info_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.inspect.validate(task)
driver_info_mock.assert_called_once_with(task.node)
@mock.patch.object(ilo_inspect, '_get_capabilities', spec_set=True,
autospec=True)
@mock.patch.object(inspect_utils, 'create_ports_if_not_exist',
spec_set=True, autospec=True)
@mock.patch.object(ilo_inspect, '_get_essential_properties', spec_set=True,
autospec=True)
@mock.patch.object(ilo_power.IloPower, 'get_power_state', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_inspect_essential_ok(self, get_ilo_object_mock,
power_mock,
get_essential_mock,
create_port_mock,
get_capabilities_mock):
ilo_object_mock = get_ilo_object_mock.return_value
properties = {'memory_mb': '512', 'local_gb': '10',
'cpus': '1', 'cpu_arch': 'x86_64'}
macs = {'Port 1': 'aa:aa:aa:aa:aa:aa', 'Port 2': 'bb:bb:bb:bb:bb:bb'}
capabilities = {}
result = {'properties': properties, 'macs': macs}
get_essential_mock.return_value = result
get_capabilities_mock.return_value = capabilities
power_mock.return_value = states.POWER_ON
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.inspect.inspect_hardware(task)
self.assertEqual(properties, task.node.properties)
power_mock.assert_called_once_with(mock.ANY, task)
get_essential_mock.assert_called_once_with(task.node,
ilo_object_mock)
get_capabilities_mock.assert_called_once_with(task.node,
ilo_object_mock)
create_port_mock.assert_called_once_with(
task, ['aa:aa:aa:aa:aa:aa', 'bb:bb:bb:bb:bb:bb'])
@mock.patch.object(ilo_inspect.LOG, 'warning',
spec_set=True, autospec=True)
@mock.patch.object(ilo_inspect, '_get_capabilities', spec_set=True,
autospec=True)
@mock.patch.object(inspect_utils, 'create_ports_if_not_exist',
spec_set=True, autospec=True)
@mock.patch.object(ilo_inspect, '_get_essential_properties', spec_set=True,
autospec=True)
@mock.patch.object(ilo_power.IloPower, 'get_power_state', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_inspect_essential_ok_local_gb_zero(self, get_ilo_object_mock,
power_mock,
get_essential_mock,
create_port_mock,
get_capabilities_mock,
log_mock):
ilo_object_mock = get_ilo_object_mock.return_value
properties = {'memory_mb': '512', 'local_gb': 0,
'cpus': '1', 'cpu_arch': 'x86_64'}
macs = {'Port 1': 'aa:aa:aa:aa:aa:aa', 'Port 2': 'bb:bb:bb:bb:bb:bb'}
capabilities = {}
result = {'properties': properties, 'macs': macs}
get_essential_mock.return_value = result
get_capabilities_mock.return_value = capabilities
power_mock.return_value = states.POWER_ON
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
properties = task.node.properties
properties['local_gb'] = 10
task.node.properties = properties
task.node.save()
expected_properties = {'memory_mb': '512', 'local_gb': 10,
'cpus': '1', 'cpu_arch': 'x86_64'}
task.driver.inspect.inspect_hardware(task)
self.assertEqual(expected_properties, task.node.properties)
power_mock.assert_called_once_with(mock.ANY, task)
get_essential_mock.assert_called_once_with(task.node,
ilo_object_mock)
self.assertTrue(log_mock.called)
get_capabilities_mock.assert_called_once_with(task.node,
ilo_object_mock)
create_port_mock.assert_called_once_with(
task, ['aa:aa:aa:aa:aa:aa', 'bb:bb:bb:bb:bb:bb'])
@mock.patch.object(ilo_inspect.LOG, 'warning',
spec_set=True, autospec=True)
@mock.patch.object(ilo_inspect, '_get_capabilities', spec_set=True,
autospec=True)
@mock.patch.object(inspect_utils, 'create_ports_if_not_exist',
spec_set=True, autospec=True)
@mock.patch.object(ilo_inspect, '_get_essential_properties', spec_set=True,
autospec=True)
@mock.patch.object(ilo_power.IloPower, 'get_power_state', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_inspect_ok_gen8(self, get_ilo_object_mock,
power_mock,
get_essential_mock,
create_port_mock,
get_capabilities_mock,
log_mock):
ilo_object_mock = get_ilo_object_mock.return_value
properties = {'memory_mb': '512', 'local_gb': 10,
'cpus': '1', 'cpu_arch': 'x86_64'}
macs = {'Port 1': 'aa:aa:aa:aa:aa:aa', 'Port 2': 'bb:bb:bb:bb:bb:bb'}
capabilities = {'server_model': 'Gen8'}
result = {'properties': properties, 'macs': macs}
get_essential_mock.return_value = result
get_capabilities_mock.return_value = capabilities
power_mock.return_value = states.POWER_ON
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
expected_properties = {'memory_mb': '512', 'local_gb': 10,
'cpus': '1', 'cpu_arch': 'x86_64',
'capabilities': 'server_model:Gen8'}
task.driver.inspect.inspect_hardware(task)
self.assertEqual(expected_properties, task.node.properties)
power_mock.assert_called_once_with(mock.ANY, task)
get_essential_mock.assert_called_once_with(task.node,
ilo_object_mock)
self.assertTrue(log_mock.called)
get_capabilities_mock.assert_called_once_with(task.node,
ilo_object_mock)
create_port_mock.assert_called_once_with(
task, ['aa:aa:aa:aa:aa:aa', 'bb:bb:bb:bb:bb:bb'])
@mock.patch.object(ilo_inspect.LOG, 'warning',
spec_set=True, autospec=True)
@mock.patch.object(ilo_inspect, '_get_security_parameters',
spec_set=True, autospec=True)
@mock.patch.object(ilo_inspect, '_get_capabilities', spec_set=True,
autospec=True)
@mock.patch.object(inspect_utils, 'create_ports_if_not_exist',
spec_set=True, autospec=True)
@mock.patch.object(ilo_inspect, '_get_essential_properties', spec_set=True,
autospec=True)
@mock.patch.object(ilo_power.IloPower, 'get_power_state', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_inspect_ok_gen10(self, get_ilo_object_mock,
power_mock,
get_essential_mock,
create_port_mock,
get_capabilities_mock,
get_security_params_mock,
log_mock):
ilo_object_mock = get_ilo_object_mock.return_value
properties = {'memory_mb': '512', 'local_gb': 10,
'cpus': '1', 'cpu_arch': 'x86_64'}
macs = {'NIC.LOM.1.1': 'aa:aa:aa:aa:aa:aa'}
capabilities = {'server_model': 'Gen10'}
security_params = (
{'security_parameters': {'Password Complexity': 'ok'}})
result = {'properties': properties, 'macs': macs}
get_essential_mock.return_value = result
get_capabilities_mock.return_value = capabilities
get_security_params_mock.return_value = security_params
power_mock.return_value = states.POWER_ON
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
expected_properties = {
'memory_mb': '512', 'local_gb': 10, 'cpus': '1',
'cpu_arch': 'x86_64', 'capabilities': 'server_model:Gen10',
'security_parameters': {'Password Complexity': 'ok'}}
task.driver.inspect.inspect_hardware(task)
self.assertEqual(expected_properties, task.node.properties)
power_mock.assert_called_once_with(mock.ANY, task)
get_essential_mock.assert_called_once_with(task.node,
ilo_object_mock)
self.assertFalse(log_mock.called)
get_capabilities_mock.assert_called_once_with(task.node,
ilo_object_mock)
create_port_mock.assert_called_once_with(
task, ['aa:aa:aa:aa:aa:aa'])
@mock.patch.object(ilo_inspect, '_get_capabilities', spec_set=True,
autospec=True)
@mock.patch.object(inspect_utils, 'create_ports_if_not_exist',
spec_set=True, autospec=True)
@mock.patch.object(ilo_inspect, '_get_essential_properties', spec_set=True,
autospec=True)
@mock.patch.object(conductor_utils, 'node_power_action', spec_set=True,
autospec=True)
@mock.patch.object(ilo_power.IloPower, 'get_power_state', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_inspect_essential_ok_power_off(self, get_ilo_object_mock,
power_mock,
set_power_mock,
get_essential_mock,
create_port_mock,
get_capabilities_mock):
ilo_object_mock = get_ilo_object_mock.return_value
properties = {'memory_mb': '512', 'local_gb': '10',
'cpus': '1', 'cpu_arch': 'x86_64'}
macs = {'Port 1': 'aa:aa:aa:aa:aa:aa', 'Port 2': 'bb:bb:bb:bb:bb:bb'}
capabilities = {}
result = {'properties': properties, 'macs': macs}
get_essential_mock.return_value = result
get_capabilities_mock.return_value = capabilities
power_mock.return_value = states.POWER_OFF
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.inspect.inspect_hardware(task)
self.assertEqual(properties, task.node.properties)
power_mock.assert_called_once_with(mock.ANY, task)
set_power_mock.assert_any_call(task, states.POWER_ON)
get_essential_mock.assert_called_once_with(task.node,
ilo_object_mock)
get_capabilities_mock.assert_called_once_with(task.node,
ilo_object_mock)
create_port_mock.assert_called_once_with(
task, ['aa:aa:aa:aa:aa:aa', 'bb:bb:bb:bb:bb:bb'])
@mock.patch.object(ilo_inspect, '_get_capabilities', spec_set=True,
autospec=True)
@mock.patch.object(inspect_utils, 'create_ports_if_not_exist',
spec_set=True, autospec=True)
@mock.patch.object(ilo_inspect, '_get_essential_properties', spec_set=True,
autospec=True)
@mock.patch.object(ilo_power.IloPower, 'get_power_state', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_inspect_essential_capabilities_ok(self, get_ilo_object_mock,
power_mock,
get_essential_mock,
create_port_mock,
get_capabilities_mock):
ilo_object_mock = get_ilo_object_mock.return_value
properties = {'memory_mb': '512', 'local_gb': '10',
'cpus': '1', 'cpu_arch': 'x86_64'}
macs = {'Port 1': 'aa:aa:aa:aa:aa:aa', 'Port 2': 'bb:bb:bb:bb:bb:bb'}
capability_str = 'sriov_enabled:true'
capabilities = {'sriov_enabled': 'true'}
result = {'properties': properties, 'macs': macs}
get_essential_mock.return_value = result
get_capabilities_mock.return_value = capabilities
power_mock.return_value = states.POWER_ON
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.inspect.inspect_hardware(task)
expected_properties = {'memory_mb': '512', 'local_gb': '10',
'cpus': '1', 'cpu_arch': 'x86_64',
'capabilities': capability_str}
self.assertEqual(expected_properties, task.node.properties)
power_mock.assert_called_once_with(mock.ANY, task)
get_essential_mock.assert_called_once_with(task.node,
ilo_object_mock)
get_capabilities_mock.assert_called_once_with(task.node,
ilo_object_mock)
create_port_mock.assert_called_once_with(
task, ['aa:aa:aa:aa:aa:aa', 'bb:bb:bb:bb:bb:bb'])
@mock.patch.object(ilo_inspect, '_get_capabilities', spec_set=True,
autospec=True)
@mock.patch.object(inspect_utils, 'create_ports_if_not_exist',
spec_set=True, autospec=True)
@mock.patch.object(ilo_inspect, '_get_essential_properties', spec_set=True,
autospec=True)
@mock.patch.object(ilo_power.IloPower, 'get_power_state', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_inspect_essential_capabilities_exist_ok(self, get_ilo_object_mock,
power_mock,
get_essential_mock,
create_port_mock,
get_capabilities_mock):
ilo_object_mock = get_ilo_object_mock.return_value
properties = {'memory_mb': '512', 'local_gb': '10',
'cpus': '1', 'cpu_arch': 'x86_64',
'somekey': 'somevalue'}
macs = {'Port 1': 'aa:aa:aa:aa:aa:aa', 'Port 2': 'bb:bb:bb:bb:bb:bb'}
result = {'properties': properties, 'macs': macs}
capabilities = {'sriov_enabled': 'true'}
get_essential_mock.return_value = result
get_capabilities_mock.return_value = capabilities
power_mock.return_value = states.POWER_ON
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.properties = {'capabilities': 'boot_mode:uefi'}
expected_capabilities = ('sriov_enabled:true,'
'boot_mode:uefi')
set1 = set(expected_capabilities.split(','))
task.driver.inspect.inspect_hardware(task)
end_capabilities = task.node.properties['capabilities']
set2 = set(end_capabilities.split(','))
self.assertEqual(set1, set2)
expected_properties = {'memory_mb': '512', 'local_gb': '10',
'cpus': '1', 'cpu_arch': 'x86_64',
'capabilities': end_capabilities}
power_mock.assert_called_once_with(mock.ANY, task)
self.assertEqual(task.node.properties, expected_properties)
get_essential_mock.assert_called_once_with(task.node,
ilo_object_mock)
get_capabilities_mock.assert_called_once_with(task.node,
ilo_object_mock)
create_port_mock.assert_called_once_with(
task, ['aa:aa:aa:aa:aa:aa', 'bb:bb:bb:bb:bb:bb'])
class TestInspectPrivateMethods(test_common.BaseIloTest):
def test__get_essential_properties_ok(self):
ilo_mock = mock.MagicMock(spec=['get_essential_properties'])
properties = {'memory_mb': '512', 'local_gb': '10',
'cpus': '1', 'cpu_arch': 'x86_64'}
macs = {'Port 1': 'aa:aa:aa:aa:aa:aa', 'Port 2': 'bb:bb:bb:bb:bb:bb'}
result = {'properties': properties, 'macs': macs}
ilo_mock.get_essential_properties.return_value = result
actual_result = ilo_inspect._get_essential_properties(self.node,
ilo_mock)
self.assertEqual(result, actual_result)
def test__get_essential_properties_fail(self):
ilo_mock = mock.MagicMock(
spec=['get_additional_capabilities', 'get_essential_properties'])
# Missing key: cpu_arch
properties = {'memory_mb': '512', 'local_gb': '10',
'cpus': '1'}
macs = {'Port 1': 'aa:aa:aa:aa:aa:aa', 'Port 2': 'bb:bb:bb:bb:bb:bb'}
result = {'properties': properties, 'macs': macs}
ilo_mock.get_essential_properties.return_value = result
result = self.assertRaises(exception.HardwareInspectionFailure,
ilo_inspect._get_essential_properties,
self.node,
ilo_mock)
self.assertEqual(
str(result),
("Failed to inspect hardware. Reason: Server didn't return the "
"key(s): cpu_arch"))
def test__get_essential_properties_fail_invalid_format(self):
ilo_mock = mock.MagicMock(
spec=['get_additional_capabilities', 'get_essential_properties'])
# Not a dict
properties = ['memory_mb', '512', 'local_gb', '10',
'cpus', '1']
macs = ['aa:aa:aa:aa:aa:aa', 'bb:bb:bb:bb:bb:bb']
capabilities = ''
result = {'properties': properties, 'macs': macs}
ilo_mock.get_essential_properties.return_value = result
ilo_mock.get_additional_capabilities.return_value = capabilities
self.assertRaises(exception.HardwareInspectionFailure,
ilo_inspect._get_essential_properties,
self.node, ilo_mock)
def test__get_essential_properties_fail_mac_invalid_format(self):
ilo_mock = mock.MagicMock(spec=['get_essential_properties'])
properties = {'memory_mb': '512', 'local_gb': '10',
'cpus': '1', 'cpu_arch': 'x86_64'}
# Not a dict
macs = 'aa:aa:aa:aa:aa:aa'
result = {'properties': properties, 'macs': macs}
ilo_mock.get_essential_properties.return_value = result
self.assertRaises(exception.HardwareInspectionFailure,
ilo_inspect._get_essential_properties,
self.node, ilo_mock)
def test__get_essential_properties_hardware_port_empty(self):
ilo_mock = mock.MagicMock(
spec=['get_additional_capabilities', 'get_essential_properties'])
properties = {'memory_mb': '512', 'local_gb': '10',
'cpus': '1', 'cpu_arch': 'x86_64'}
# Not a dictionary
macs = None
result = {'properties': properties, 'macs': macs}
capabilities = ''
ilo_mock.get_essential_properties.return_value = result
ilo_mock.get_additional_capabilities.return_value = capabilities
self.assertRaises(exception.HardwareInspectionFailure,
ilo_inspect._get_essential_properties,
self.node, ilo_mock)
def test__get_essential_properties_hardware_port_not_dict(self):
ilo_mock = mock.MagicMock(spec=['get_essential_properties'])
properties = {'memory_mb': '512', 'local_gb': '10',
'cpus': '1', 'cpu_arch': 'x86_64'}
# Not a dict
macs = 'aa:bb:cc:dd:ee:ff'
result = {'properties': properties, 'macs': macs}
ilo_mock.get_essential_properties.return_value = result
result = self.assertRaises(
exception.HardwareInspectionFailure,
ilo_inspect._get_essential_properties, self.node, ilo_mock)
@mock.patch.object(utils, 'get_updated_capabilities', spec_set=True,
autospec=True)
def test__get_capabilities_ok(self, capability_mock):
ilo_mock = mock.MagicMock(spec=['get_server_capabilities'])
capabilities = {'ilo_firmware_version': 'xyz'}
ilo_mock.get_server_capabilities.return_value = capabilities
cap = ilo_inspect._get_capabilities(self.node, ilo_mock)
self.assertEqual(cap, capabilities)
def test__validate_ok(self):
properties = {'memory_mb': '512', 'local_gb': '10',
'cpus': '2', 'cpu_arch': 'x86_arch'}
macs = {'Port 1': 'aa:aa:aa:aa:aa:aa'}
data = {'properties': properties, 'macs': macs}
valid_keys = ilo_inspect.IloInspect.ESSENTIAL_PROPERTIES
ilo_inspect._validate(self.node, data)
self.assertEqual(sorted(set(properties)), sorted(valid_keys))
def test__validate_essential_keys_fail_missing_key(self):
properties = {'memory_mb': '512', 'local_gb': '10',
'cpus': '1'}
macs = {'Port 1': 'aa:aa:aa:aa:aa:aa'}
data = {'properties': properties, 'macs': macs}
self.assertRaises(exception.HardwareInspectionFailure,
ilo_inspect._validate, self.node, data)
def test___create_supported_capabilities_dict(self):
capabilities = {}
expected = {}
for key in ilo_inspect.CAPABILITIES_KEYS:
capabilities.update({key: 'true'})
expected.update({key: 'true'})
capabilities.update({'unknown_property': 'true'})
cap = ilo_inspect._create_supported_capabilities_dict(capabilities)
self.assertEqual(expected, cap)
def test___create_supported_capabilities_dict_excluded_capability(self):
capabilities = {}
expected = {}
for key in ilo_inspect.CAPABILITIES_KEYS - {'has_ssd'}:
capabilities.update({key: 'true'})
expected.update({key: 'true'})
cap = ilo_inspect._create_supported_capabilities_dict(capabilities)
self.assertEqual(expected, cap)
def test___create_supported_capabilities_dict_gpu_capabilities(self):
capabilities = {'gpu_Nvidia_count': 1, 'gpu_Nvidia_Tesla_M10_count': 1,
'gpu_Nvidia_Tesla_M10': True}
expected = {}
expected.update(capabilities)
for key in ilo_inspect.CAPABILITIES_KEYS:
capabilities.update({key: 'true'})
expected.update({key: 'true'})
capabilities.update({'unknown_property': 'true'})
cap = ilo_inspect._create_supported_capabilities_dict(capabilities)
self.assertEqual(expected, cap)
| |
# -*- coding: utf-8 -*-
"""
S3 Adobe PDF codec
@copyright: 2011 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3RL_PDF"]
# Import the necessary libraries
from gluon import *
from gluon import current
from gluon.storage import Storage
from gluon.contenttype import contenttype
from gluon.languages import lazyT
from ..s3rest import S3Request
from ..s3codec import S3Codec
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
from copy import deepcopy
# Import the specialist libraries
try:
from PIL import Image
from PIL import ImageOps
from PIL import ImageStat
PILImported = True
except(ImportError):
try:
import Image
import ImageOps
import ImageStat
PILImported = True
except(ImportError):
PILImported = False
try:
from reportlab.lib.enums import TA_CENTER, TA_RIGHT
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfgen import canvas
from reportlab.lib.fonts import tt2ps
from reportlab.rl_config import canvas_basefontname as _baseFontName
from reportlab.platypus import BaseDocTemplate, SimpleDocTemplate, PageTemplate
from reportlab.platypus.frames import Frame
from reportlab.platypus import Spacer, PageBreak, FrameBreak, Paragraph
from reportlab.platypus import Table, TableStyle
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.units import inch
from reportlab.lib.units import cm
from reportlab.lib import colors
from reportlab.lib.colors import Color
from reportlab.lib.pagesizes import A4, LETTER, landscape, portrait
from reportlab.platypus.flowables import Flowable
reportLabImported = True
except ImportError:
reportLabImported = False
BaseDocTemplate = object
inch = 72.0
canvas = Storage()
canvas.Canvas = None
PDF_WIDTH = 0
PDF_HEIGHT = 1
# =============================================================================
class S3RL_PDF(S3Codec):
"""
Simple Report Labs PDF format codec
"""
# -------------------------------------------------------------------------
def __init__(self):
"""
Constructor
"""
# Error codes
T = current.T
self.ERROR = Storage(
PIL_ERROR = T("PIL (Python Image Library) not installed, images cannot be embedded in the PDF report"),
RL_ERROR = T("Reportlab not installed")
)
# -------------------------------------------------------------------------
def encode(self, r, **attr):
"""
Export data as a PDF spreadsheet
@param r: the S3Request object
@param attr: dictionary of parameters:
* pdf_callback: callback to be used rather than r
* list_fields: Fields to include in list views
* pdf_componentname: The name of the component to use
This should be a component of the resource
* pdf_title: The title of the report
* pdf_filename: The filename of the report
* pdf_header: The header (maybe a callback)
* rHeader: used if pdf_header doesn't exist
* pdf_header_padding: add this amount of space between the header and the body
* pdf_footer: The footer (maybe a callback)
* rFooter: used if pdf_footer doesn't exist
* pdf_footer_padding: add this amount of space between the body and the footer
* use_colour: True to add colour to the cells. default False
* pdf_groupby: How to group the results
* pdf_orderby: How to sort rows (within any level of grouping)
* pdf_hide_comments: don't show the comments in a table
* pdf_table_autogrow: Indicates that a table should grow to
fill the available space. Valid values:
H - Horizontal
V - Vertical
B - Both
* pdf_paper_alignment: Portrait (default) or Landscape
"""
if not PILImported:
current.session.warning = self.ERROR.PIL_ERROR
if not reportLabImported:
current.session.error = self.ERROR.RL_ERROR
redirect(URL(extension=""))
# Environment
request = current.request
response = current.response
self.r = r
self.list_fields = attr.get("list_fields")
self.pdf_groupby = attr.get("pdf_groupby")
self.pdf_orderby = attr.get("pdf_orderby")
self.pdf_hide_comments = attr.get("pdf_hide_comments")
self.table_autogrow = attr.get("pdf_table_autogrow")
self.pdf_header_padding = attr.get("pdf_header_padding",0)
self.pdf_footer_padding = attr.get("pdf_footer_padding",0)
# Get the title & filename
now = request.now.isoformat()[:19].replace("T", " ")
title = attr.get("pdf_title")
if title == None:
title = "Report"
docTitle = "%s %s" % (title, now)
self.filename = attr.get("pdf_filename")
if self.filename == None:
self.filename = "%s_%s.pdf" % (title, now)
# get the pdf document template
paper_size = attr.get("paper_size")
pdf_paper_alignment = attr.get("pdf_paper_alignment","Portrait")
doc = EdenDocTemplate(title=docTitle,
paper_size = paper_size,
paper_alignment = pdf_paper_alignment)
# Get the header
header_flowable = None
header = attr.get("pdf_header")
if not header:
header = attr.get("rheader")
if header:
header_flowable = self.get_html_flowable(header,
doc.printable_width)
if self.pdf_header_padding:
header_flowable.append(Spacer(1,self.pdf_header_padding))
# Get the footer
footer_flowable = None
footer = attr.get("pdf_footer")
if not footer:
footer = attr.get("rFooter")
if footer:
footer_flowable = self.get_html_flowable(footer,
doc.printable_width)
if self.pdf_footer_padding:
footer_flowable.append(Spacer(1, self.pdf_footer_padding))
# Build report template
# Get data for the body of the text
data = None
body_flowable = None
doc.calc_body_size(header_flowable, footer_flowable)
callback = attr.get("pdf_callback")
if callback:
body_flowable = self.get_html_flowable(callback(r),
doc.printable_width)
elif attr.get("pdf_componentname"):
componentname = attr.get("pdf_componentname")
(prefix, component) = componentname.split("_", 1)
resource = current.manager.define_resource(request.controller,
request.function,
components = component,
id = r.id,
)
body_flowable = self.get_resource_flowable(resource.components[component],
doc)
else:
if r.component:
resource = r.component
else:
resource = r.resource
body_flowable = self.get_resource_flowable(resource,
doc)
styleSheet = getSampleStyleSheet()
self.normalstyle = styleSheet["Normal"]
self.normalstyle.fontName = "Helvetica"
self.normalstyle.fontSize = 9
# Build the pdf
doc.build(header_flowable,
body_flowable,
footer_flowable,
)
# return the generated pdf
# Set content type and disposition headers
if response:
disposition = "attachment; filename=\"%s\"" % self.filename
response.headers["Content-Type"] = contenttype(".pdf")
response.headers["Content-disposition"] = disposition
# Return the stream
doc.output.seek(0)
return doc.output.read()
def get_html_flowable(self, rules, printable_width):
"""
Function to convert the rules passed in to a flowable.
the rules (for example) could be a rHeader callback
"""
# let's assume that it's a callable
try:
# switch the representation to html so the callback doesn't barf
repr = self.r.representation
self.r.representation = "html"
html = rules(self.r)
self.r.representation = repr
except:
# okay so maybe it wasn't ... it could be an HTML object
html = rules
parser = S3html2pdf(pageWidth = printable_width,
exclude_class_list=["tabs"])
result = parser.parse(html)
return result
def get_resource_flowable(self, resource, doc):
fields = resource.readable_fields()
if not fields:
fields = [table.id]
fnames = []
flabel = []
fobjs = []
# get a list of fields, if the list_fields attribute is provided
# then use that to extract the fields that are required, otherwise
# use the list of readable fields.
if not self.list_fields:
self.list_fields = []
for field in fields:
if field.type == "id":
continue
if self.pdf_hide_comments and field.name == "comments":
continue
self.list_fields.append(field.name)
fnames.append(field.name)
flabel.append(field.label)
else:
for lf in self.list_fields:
# if the list fields contains the label then use that
# otherwise look for the label in the list of fields
if isinstance(lf, (tuple, list)):
if lf[1] == "id":
continue
fnames.append(lf[1])
flabel.append(lf[0])
else:
if lf == "id":
continue
for field in fields:
if field.name == lf:
fnames.append(field.name)
flabel.append(field.label)
break
# from the list of fields get a list of field objects, used to get the
# represent function for the data returned from the call to sqltable()
for field_name in fnames:
found = False
for field in fields:
if field.name == field_name:
fobjs.append(field)
found = True
break
if not found:
if "$" in field_name:
_field = resource.resolve_selector(field_name)["field"]
fobjs.append(_field)
else:
fobjs.append(Field(field_name))
# Merge the groupby and order by into a comma separated string
if self.pdf_groupby:
if self.pdf_orderby:
orderby = "%s,%s" % (self.pdf_groupby, self.pdf_orderby)
else:
orderby = self.pdf_groupby
else:
orderby = self.pdf_orderby
# Get the data...
sqltable = resource.sqltable(self.list_fields,
orderby = orderby,
start = None, # needs to be None to get all records
limit = None,
)
data = []
# Convert the data into the represent format
if sqltable:
for record in sqltable.sqlrows.records:
row = []
cnt = 0
for field in fobjs:
repr = field.represent
fname = field.name
try:
value = record[field._tablename][fname]
except:
# The above code doesn't work for virtual fields
# So look through the results in the record
value = ""
tnames = record.keys()
for tname in tnames:
if fname in record[tname]:
value = record[tname][fname]
break
if repr:
try:
row.append(repr(value, show_link=False))
except TypeError:
try:
row.append(repr(value))
except:
row.append(value)
else:
row.append(value)
cnt += 1
data.append(row)
# Now generate the PDF table
pdf_table = S3PDFTable(doc,
raw_data = data,
list_fields = fnames,
labels=flabel,
groupby = self.pdf_groupby,
autogrow = self.table_autogrow,
body_height = doc.body_height,
).build()
return pdf_table
# -------------------------------------------------------------------------
class EdenDocTemplate(BaseDocTemplate):
"""
The standard document template for eden reports
It allows for the following page templates:
1) First Page
2) Even Page
3) Odd Page
4) Landscape Page
"""
def __init__(self,
title = "Sahana Eden",
margin = (0.5*inch, # top
0.3*inch, # left
0.5*inch, # bottom
0.3*inch), # right
margin_inside = 0.0*inch, # used for odd even pages
paper_size = None,
paper_alignment = "Portrait"):
"""
Set up the standard page templates
"""
self.output = StringIO()
self.defaultPage = paper_alignment
if paper_size:
self.paper_size = paper_size
else:
settings = current.deployment_settings
if settings.get_paper_size() == "Letter":
self.paper_size = LETTER
else:
self.paper_size = A4
self.topMargin = margin[0]
self.leftMargin = margin[1]
self.bottomMargin = margin[2]
self.rightMargin = margin[3]
self.insideMargin = margin_inside
BaseDocTemplate.__init__(self,
self.output,
title = title,
leftMargin = self.leftMargin,
rightMargin = self.rightMargin,
topMargin = self.topMargin,
bottomMargin = self.bottomMargin,
)
self.MINIMUM_MARGIN_SIZE = 0.2 * inch
self.body_flowable = None
self._calc()
def get_flowable_size(self, flowable):
"""
Function to return the size a flowable will require
"""
if not flowable:
return (0,0)
if not isinstance(flowable, list):
flowable = [flowable]
w = 0
h = 0
for f in flowable:
if f:
size = f.wrap(self.printable_width,
self.printable_height)
if size[0] > w:
w = size[PDF_WIDTH]
h += size[PDF_HEIGHT]
return (w, h)
def _calc(self):
if self.defaultPage == "Landscape":
self.pagesize = landscape(self.paper_size)
else:
self.pagesize = portrait(self.paper_size)
BaseDocTemplate._calc(self)
self.height = self.pagesize[PDF_HEIGHT]
self.width = self.pagesize[PDF_WIDTH]
self.printable_width = self.width - \
self.leftMargin - \
self.rightMargin - \
self.insideMargin
self.printable_height = self.height - \
self.topMargin - \
self.bottomMargin
def calc_body_size(self,
header_flowable,
footer_flowable,
):
"""
Helper function to calculate the various sizes of the page
"""
self._calc() # in case we changed margins sizes etc
self.height = self.pagesize[PDF_HEIGHT]
self.width = self.pagesize[PDF_WIDTH]
self.printable_width = self.width - \
self.leftMargin - \
self.rightMargin - \
self.insideMargin
self.printable_height = self.height - \
self.topMargin - \
self.bottomMargin
header_size = self.get_flowable_size(header_flowable)
footer_size = self.get_flowable_size(footer_flowable)
self.header_height = header_size[PDF_HEIGHT]
self.footer_height = footer_size[PDF_HEIGHT]
self.body_height = self.printable_height - \
self.header_height - \
self.footer_height
def build(self,
header_flowable,
body_flowable,
footer_flowable,
canvasmaker=canvas.Canvas):
"""
Build the document using the flowables.
Set up the page templates that the document can use
"""
self.header_flowable = header_flowable
self.body_flowable = body_flowable
self.footer_flowable = footer_flowable
self.calc_body_size(header_flowable,
footer_flowable,
)
showBoundary = 0 # for debugging set to 1, otherwise 0
body_frame = Frame(self.leftMargin,
self.bottomMargin + self.footer_height,
self.printable_width,
self.body_height,
leftPadding = 0,
bottomPadding = 0,
rightPadding = 0,
topPadding = 0,
id = "body",
showBoundary = showBoundary
)
self.body_frame = body_frame
self.normalPage = PageTemplate (id = 'Normal',
frames = [body_frame,],
onPage = self.add_page_decorators,
pagesize = self.pagesize
)
# @todo set these page templates up
# self.evenPage = PageTemplate (id='even',
# frames=frame_list,
# onPage=self.onEvenPage,
# pagesize=self.pagesize
# )
# self.oddPage = PageTemplate (id='odd',
# frames=frame_list,
# onPage=self.onOddPage,
# pagesize=self.pagesize
# )
self.landscapePage = PageTemplate (id='Landscape',
frames = [body_frame,],
onPage=self.add_page_decorators,
pagesize=landscape(self.pagesize)
)
if self.defaultPage == "Landscape":
self.addPageTemplates(self.landscapePage)
else:
self.addPageTemplates(self.normalPage)
BaseDocTemplate.build(self, self.body_flowable, canvasmaker=canvasmaker)
def add_page_decorators(self, canvas, doc):
if self.header_flowable:
top = self.bottomMargin + self.printable_height
for flow in self.header_flowable:
height = self.get_flowable_size(flow)[PDF_HEIGHT]
bottom = top - height
flow.drawOn(canvas,
self.leftMargin,
bottom
)
top = bottom
if self.footer_flowable:
top = self.bottomMargin + self.footer_height
for flow in self.footer_flowable:
height = self.get_flowable_size(flow)[PDF_HEIGHT]
bottom = top - height
flow.drawOn(canvas,
self.leftMargin,
bottom
)
top = bottom
def addParagraph(self, text, style=None, append=True):
"""
Method to create a paragraph that may be inserted into the document
@param text: The text for the paragraph
@param append: If True then the paragraph will be stored in the
document flow ready for generating the pdf.
@return The paragraph
This method can return the paragraph rather than inserting into the
document. This is useful if the paragraph needs to be first
inserted in another flowable, before being added to the document.
An example of when this is useful is when large amounts of text
(such as a comment) are added to a cell of a table.
"""
if text != "":
if style == None:
styleSheet = getSampleStyleSheet()
style = styleSheet["Normal"]
para = Paragraph(text, style)
if append and self.body_flowable:
self.body_flowable.append(para)
return para
return ""
def cellStyle(self, style, cell):
"""
Add special styles to the text in a cell
"""
if style == "*GREY":
return [("TEXTCOLOR",cell, cell, colors.lightgrey)]
elif style == "*RED":
return [("TEXTCOLOR",cell, cell, colors.red)]
return []
def addCellStyling(self, table, style):
"""
Add special styles to the text in a table
"""
row = 0
for line in table:
col = 0
for cell in line:
try:
if cell.startswith("*"):
(instruction,sep,text) = cell.partition(" ")
style += self.cellStyle(instruction, (col, row))
table[row][col] = text
except:
pass
col += 1
row += 1
return (table, style)
# end of class EdenDocTemplate
# -----------------------------------------------------------------------------
class S3PDFTable(object):
"""
Class to build a table that can then be placed in a pdf document
The table will be formatted so that is fits on the page. This class
doesn't need to be called directly. Rather see S3PDF.addTable()
"""
def __init__(self,
document,
raw_data,
list_fields = None,
labels = None,
groupby = None,
hide_comments = False,
autogrow = False,
body_height = 0,
):
"""
Method to create a table object
@param document: A S3PDF object
@param raw_data: A list of rows
@param list_fields: A list of field names
@param labels: a list of labels
@param groupby: A field name that is to be used as a sub-group
All the records that share the same pdf_groupby value
will be clustered together
@param hide_comments: Any comment field will be hidden
"""
settings = current.deployment_settings
if settings.get_paper_size() == "Letter":
self.paper_size = LETTER
else:
self.paper_size = A4
self.pdf = document
self.raw_data = raw_data
self.list_fields = list_fields
self.labels = labels
self.pdf_groupby = groupby
self.hideComments = hide_comments
self.autogrow = autogrow
self.body_height = body_height
self.data = []
self.subheadingList = []
self.subheadingLevel = {}
self.pages = []
self.colWidths = []
self.newColWidth = [] # @todo: remove this (but see presentation)
self.rowHeights = []
self.style = None
# temp document to test the table size, default to A4 portrait
# @todo: use custom template
# @todo: set pagesize for pdf component not whole document
self.tempDoc = EdenDocTemplate()
# self.tempDoc.setPageTemplates(self.pdf.pageHeader,
# self.pdf.pageFooter)
# self.tempDoc.pagesize = portrait(self.paper_size)
# Set up style constants
self.headerColour = Color(0.73, 0.76, 1)
self.oddColour = Color(0.92, 0.92, 1)
self.evenColour = Color(0.83, 0.84, 1)
self.MIN_COMMENT_COL_WIDTH = 200
self.fontsize = 12
# -------------------------------------------------------------------------
def build(self):
"""
Method to build the table.
@return: A list of Table objects. Normally this will be a list with
just one table object, but if the table needs to be split
across columns then one object per page will be created.
"""
if self.pdf_groupby:
data = self.group_data()
self.data = [self.labels] + data
elif self.raw_data != None:
self.data = [self.labels] + self.raw_data
if len(self.data) == 0:
return None
endCol = len(self.labels) - 1
rowCnt = len(self.data)
self.style = self.tableStyle(0, rowCnt, endCol)
tempTable = Table(self.data, repeatRows=1,
style=self.style, hAlign="LEFT"
)
self.tempDoc.build(None, [tempTable], None)
self.newColWidth = [tempTable._colWidths]
self.rowHeights = [tempTable._rowHeights]
self.pages.append(self.data)
if not self.tweakDoc(tempTable):
#print "Need to split the table"
self.pages = self.splitTable(tempTable)
return self.presentation()
def group_data(self):
groups = self.pdf_groupby.split(",")
newData = []
data = self.raw_data
level = 0
for field in groups:
level += 1
field = field.strip()
# find the location of field in list_fields
i = 0
rowlength = len(self.list_fields)
while i < rowlength:
if self.list_fields[i] == field:
break
i += 1
list_fields = self.list_fields[0:i]+self.list_fields[i+1:]
self.list_fields = list_fields
labels = self.labels[0:i]+self.labels[i+1:]
self.labels = labels
currentGroup = None
r = 0
for row in data:
if r+1 in self.subheadingList:
newData.append(row)
r += 1
else:
try:
group = row[i]
if group != currentGroup:
line = [group]
newData.append(line)
r += 1
currentGroup = group
self.subheadingList.append(r)
self.subheadingLevel[r] = level
# all existing subheadings after this point need to
# be shuffled down one place.
for x in range (len(self.subheadingList)):
if self.subheadingList[x] > r:
posn = self.subheadingList[x]
self.subheadingList[x] = posn + 1
oldlevel = self.subheadingLevel[posn]
del self.subheadingLevel[posn]
self.subheadingLevel[posn+1] = oldlevel
line = row[0:i]+row[i+1:]
newData.append(line)
r += 1
except:
newData.append(row)
r += 1
data = newData
newData = []
return data
# -------------------------------------------------------------------------
def presentation(self):
"""
This will convert the S3PDFTABLE object to a format that can be
used to add to a S3PDF document object.
This is only used internally but could be used to generate a copy
of a previously generated table
"""
# Build the tables
content = []
currentPage = 0
totalPagesAcross = len(self.newColWidth)
if self.autogrow == "H" or self.autogrow == "B":
printable_width = self.pdf.printable_width
# expand the columns to use all the available space
newColWidth = []
for cols in self.newColWidth:
col_width = 0
for col in cols:
col_width += col
if col_width < printable_width:
surplus = printable_width - col_width
proportion = surplus / col_width
newcols = []
for col in cols:
newcols.append(col+col*proportion)
newColWidth.append(newcols)
self.newColWidth = newColWidth
startRow = 0
for page in self.pages:
if page == []:
currentPage += 1
continue
colWidths = self.newColWidth[currentPage % totalPagesAcross]
if self.autogrow == "V" or self.autogrow == "B":
row_height = self.rowHeights[0][0]
rows = len(page)
if self.body_height > row_height * rows:
rowCnt = int(self.body_height/row_height)
extra_rows = rowCnt - rows
if extra_rows:
cells = len(colWidths)
row = [""] * cells
extra = [row] * extra_rows
page = page + extra
endCol = len(colWidths) - 1
rowCnt = len(page)
self.style = self.tableStyle(startRow, rowCnt, endCol)
(page,self.style) = self.pdf.addCellStyling(page, self.style)
p = Table(page, repeatRows=1,
style=self.style,
hAlign="LEFT",
colWidths=colWidths
)
content.append(p)
# add a page break, except for the last page.
if currentPage + 1 < len(self.pages):
content.append(PageBreak())
currentPage += 1
if currentPage % totalPagesAcross == 0:
startRow += rowCnt - 1 # Don't include the heading
return content
# -------------------------------------------------------------------------
def getAvailableMarginSpace(self):
"""
Internally used method to calculate the amount of space available
on the width of a page.
"""
currentLeftMargin = self.pdf.leftMargin
currentRightMargin = self.pdf.rightMargin
availableMarginSpace = currentLeftMargin \
+ currentRightMargin \
- 2 * self.pdf.MINIMUM_MARGIN_SIZE
return availableMarginSpace
# -------------------------------------------------------------------------
def tweakMargin(self, tableWidth):
"""
Internally used method to adjust the document margins so that the
table will fit into the available space
"""
availableMarginSpace = self.getAvailableMarginSpace()
currentOverlap = tableWidth - self.tempDoc.printable_width
endCol = len(self.labels) - 1
rowCnt = len(self.data)
# Check margins
if currentOverlap < availableMarginSpace:
self.pdf.leftMargin -= currentOverlap / 2
self.pdf.rightMargin -= currentOverlap / 2
return True
return False
# -------------------------------------------------------------------------
def tweakFont(self, tableWidth, newFontSize, colWidths):
"""
Internally used method to adjust the font size used so that the
table will fit into the available space on the page.
"""
# Check font
adjustedWidth = tableWidth * newFontSize / self.fontsize
if (adjustedWidth - self.tempDoc.printable_width) < self.getAvailableMarginSpace():
for i in range(len(colWidths)):
colWidths[i] *= float(newFontSize) / float(self.fontsize)
self.newColWidth = [colWidths]
self.fontsize = newFontSize
return self.tweakMargin(adjustedWidth)
return False
# -------------------------------------------------------------------------
def minorTweaks(self, tableWidth, colWidths):
"""
Internally used method to tweak the formatting so that the table
will fit into the available space on the page.
"""
if self.tweakMargin(tableWidth):
return True
originalFont = self.fontsize
if self.tweakFont(tableWidth, originalFont -1, colWidths):
return True
if self.tweakFont(tableWidth, originalFont -2, colWidths):
return True
if self.tweakFont(tableWidth, originalFont -3, colWidths):
return True
return False
# end of function minorTweaks
# -------------------------------------------------------------------------
def tweakDoc(self, table):
"""
Internally used method to adjust the table so that it will fit
into the available space on the page.
@return: True if it is able to perform minor adjustments and have
the table fit in the page. False means that the table will need to
be split across the columns.
"""
tableWidth = 0
for colWidth in table._colWidths:
tableWidth += colWidth
colWidths = table._colWidths
#print "Doc size %s x %s Table width %s" % (self.tempDoc.printable_width, self.tempDoc.height, total)
if tableWidth > self.tempDoc.printable_width:
# self.pdf.setMargins(0.5*inch, 0.5*inch)
# First massage any comment column by putting it in a paragraph
colNo = 0
for label in self.labels:
# Wrap comments in a paragraph
if label.lower() == "comments":
currentWidth = table._colWidths[colNo]
# print "%s %s" % (colNo, currentWidth)
if currentWidth > self.MIN_COMMENT_COL_WIDTH:
for i in range(1, len(self.data)): # skip the heading
try:
comments = self.data[i][colNo]
comments = self.pdf.addParagraph(comments, append=False)
if comments:
self.data[i][colNo] = comments
except IndexError:
pass
colWidths[colNo] = self.MIN_COMMENT_COL_WIDTH
tableWidth += self.MIN_COMMENT_COL_WIDTH - currentWidth
colNo += 1
if not self.minorTweaks(tableWidth, colWidths):
self.tempDoc.defaultPage = "Landscape"
self.tempDoc._calc()
self.pdf.defaultPage = "Landscape"
self.pdf._calc()
return self.minorTweaks(tableWidth, colWidths)
return True
# -------------------------------------------------------------------------
def splitTable(self, tempTable):
"""
Internally used method to split the table across columns so that it
will fit into the available space on the page.
"""
colWidths = tempTable._colWidths
rowHeights = tempTable._rowHeights
total = 0
colNo = 0
colSplit = []
newColWidth = []
pageColWidth = []
for colW in colWidths:
if total + colW > self.tempDoc.printable_width:
colSplit.append(colNo)
newColWidth.append(pageColWidth)
pageColWidth = [colW]
total = colW
else:
pageColWidth.append(colW)
total += colW
colNo += 1
colSplit.append(len(colWidths))
newColWidth.append(pageColWidth)
self.newColWidth = newColWidth
total = 0
rowNo = 0
lastKnownHeight = 20 # Not all row heights get calculated.
rowSplit = []
for rowH in rowHeights:
if rowH == None:
rowH = lastKnownHeight
else:
lastKnownHeight = rowH
if total + rowH > self.body_height:
rowSplit.append(rowNo)
total = 2 * rowH # 2* is needed to take into account the repeated header row
else:
total += rowH
rowNo += 1
rowSplit.append(rowNo)
# Build the pages of data
pages = []
startRow = 1 # Skip the first row (the heading) because we'll generate our own
for endRow in rowSplit:
startCol = 0
for endCol in colSplit:
page = []
label = []
for colIndex in range(startCol, endCol):
try:
label.append(self.labels[colIndex])
except IndexError:
label.append("")
page.append(label)
for rowIndex in range(startRow, endRow):
line = []
for colIndex in range(startCol, endCol):
try:
line.append(self.data[rowIndex][colIndex])
except IndexError: # No data to add.
# If this is the first column of a subheading row then repeat the subheading
if len(line) == 0 and rowIndex in self.subheadingList:
try:
line.append(self.data[rowIndex][0])
except IndexError:
line.append("")
else:
line.append("")
page.append(line)
pages.append(page)
startCol = endCol
startRow = endRow
return pages
# -------------------------------------------------------------------------
def tableStyle(self, startRow, rowCnt, endCol, colour_required = False):
"""
Internally used method to assign a style to the table
@param startRow: The row from the data that the first data row in
the table refers to. When a table is split the first row in the
table (ignoring the label header row) will not always be the first row
in the data. This is needed to align the two. Currently this parameter
is used to identify sub headings and give them an emphasised styling
@param rowCnt: The number of rows in the table
@param endCol: The last column in the table
@todo: replace endCol with -1
(should work but need to test with a split table)
"""
style = [("FONTNAME", (0, 0), (-1, -1), "Helvetica"),
("FONTSIZE", (0, 0), (-1, -1), self.fontsize),
("VALIGN", (0, 0), (-1, -1), "TOP"),
("LINEBELOW", (0, 0), (endCol, 0), 1, Color(0, 0, 0)),
("FONTNAME", (0, 0), (endCol, 0), "Helvetica-Bold"),
]
if colour_required:
style.append(("BACKGROUND", (0, 0), (endCol, 0), self.headerColour))
else:
style.append(("BACKGROUND", (0, 0), (-1, 0), colors.lightgrey))
style.append(("INNERGRID", (0, 0), (-1, -1), 0.2, colors.lightgrey))
if self.pdf_groupby != None:
style.append(("LEFTPADDING", (0, 0), (-1, -1), 20))
rowColourCnt = 0 # used to alternate the colours correctly when we have subheadings
for i in range(rowCnt):
# If subheading
if startRow + i in self.subheadingList:
level = self.subheadingLevel[startRow + i]
if colour_required:
style.append(("BACKGROUND", (0, i), (endCol, i),
self.headerColour))
style.append(("FONTNAME", (0, i), (endCol, i),
"Helvetica-Bold"))
style.append(("SPAN", (0, i), (endCol, i)))
style.append(("LEFTPADDING", (0, i), (endCol, i), 6 * level))
elif i > 0:
if colour_required:
if rowColourCnt % 2 == 0:
style.append(("BACKGROUND", (0, i), (endCol, i),
self.evenColour))
rowColourCnt += 1
else:
style.append(("BACKGROUND", (0, i), (endCol, i),
self.oddColour))
rowColourCnt += 1
style.append(("BOX", (0, 0), (-1,-1), 1, Color(0, 0, 0)))
return style
#end of class S3PDFTable
# -------------------------------------------------------------------------
class S3html2pdf():
def __init__(self,
pageWidth,
exclude_class_list = []):
"""
Method that takes html in the web2py helper objects
and converts it to pdf
"""
self.exclude_class_list = exclude_class_list
self.pageWidth = pageWidth
self.fontsize = 10
styleSheet = getSampleStyleSheet()
self.plainstyle = styleSheet["Normal"]
self.plainstyle.fontName = "Helvetica"
self.plainstyle.fontSize = 9
self.boldstyle = deepcopy(styleSheet["Normal"])
self.boldstyle.fontName = "Helvetica-Bold"
self.boldstyle.fontSize = 10
self.titlestyle = deepcopy(styleSheet["Normal"])
self.titlestyle.fontName = "Helvetica-Bold"
self.titlestyle.fontSize = 16
self.normalstyle = self.plainstyle
# To add more pdf styles define the style above (just like the titlestyle)
# Then add the style and the name to the lookup dict below
# These can then be added to the html in the code as follows:
# TD("Waybill", _class="pdf_title")
self.style_lookup = {"pdf_title": self.titlestyle
}
def parse(self, html):
result = self.select_tag(html)
return result
def select_tag(self, html, title=False):
if self.exclude_tag(html):
return None
if isinstance(html, TABLE):
return self.parse_table(html)
elif isinstance(html, A):
return self.parse_a(html)
elif isinstance(html, P):
return self.parse_p(html)
elif isinstance(html, IMG):
return self.parse_img(html)
elif isinstance(html, DIV):
return self.parse_div(html)
elif (isinstance(html, str) or isinstance(html, lazyT)):
if title:
para = [Paragraph(html, self.boldstyle)]
else:
para = [Paragraph(html, self.normalstyle)]
self.normalstyle = self.plainstyle
return para
return None
def exclude_tag(self, html):
try:
if html.attributes["_class"] in self.exclude_class_list:
return True
if html.attributes["_class"] in self.style_lookup:
self.normalstyle = self.style_lookup[html.attributes["_class"]]
except:
pass
return False
def parse_div (self,
html
):
content = []
for component in html.components:
result = self.select_tag(component)
if result != None:
content += result
if content == []:
return None
return content
def parse_a (self,
html
):
content = []
for component in html.components:
result = self.select_tag(component)
if result != None:
content += result
if content == []:
return None
return content
def parse_img (self,
html
):
import os
I = None
from reportlab.platypus import Image
if "_src" in html.attributes:
src = html.attributes["_src"]
if os.path.exists(src):
I = Image(src)
else:
src = src.rsplit("/",1)
src = os.path.join(current.request.folder,"uploads/", src[1])
if os.path.exists(src):
I = Image(src)
if not I:
return None
iwidth = I.drawWidth
iheight = I.drawHeight
# @todo: extract the number from a 60px value
# if "_height" in html.attributes:
# height = int(html.attributes["_height"]) * inch / 80.0
# width = iwidth * (height/iheight)
# elif "_width" in html.attributes:
# width = int(html.attributes["_width"]) * inch / 80.0
# height = iheight * (width/iwidth)
# else:
# height = 1.0 * inch
# width = iwidth * (height/iheight)
height = 1.0 * inch
width = iwidth * (height/iheight)
I.drawHeight = height
I.drawWidth = width
return [I]
def parse_p (self, html):
content = []
for component in html.components:
result = self.select_tag(component)
if result != None:
content += result
if content == []:
return None
return content
def parse_table (self, html):
style = [("FONTSIZE", (0, 0), (-1, -1), self.fontsize),
("VALIGN", (0, 0), (-1, -1), "TOP"),
("FONTNAME", (0, 0), (-1, -1), "Helvetica"),
('GRID',(0,0),(-1,-1),0.5,colors.grey),
]
content = []
rowCnt = 0
result = None
for component in html.components:
if self.exclude_tag(component):
continue
if isinstance(component,TR):
result = self.parse_tr(component, style, rowCnt)
rowCnt += 1
if result != None:
content.append(result)
if content == []:
return None
table = Table(content,
style=style,
hAlign="LEFT",
vAlign="Top",
)
cw = table._colWidths
return [table]
def parse_tr (self, html, style, rowCnt):
row = []
colCnt = 0
for component in html.components:
if isinstance(component,(TH,TD)):
if self.exclude_tag(component):
continue
colspan = 1
if "_colspan" in component.attributes:
colspan = component.attributes["_colspan"]
if component.components == []:
row.append("")
else:
for detail in component.components:
result = self.select_tag(detail, title=isinstance(component,TH))
if result != None:
row.append(result)
if isinstance(component,TH):
style.append(("BACKGROUND", (colCnt, rowCnt), (colCnt, rowCnt), colors.lightgrey))
style.append(("FONTNAME", (colCnt, rowCnt), (colCnt, rowCnt), "Helvetica-Bold"))
if colspan > 1:
for i in xrange(1,colspan):
row.append("")
style.append(("SPAN", (colCnt, rowCnt), (colCnt+colspan-1, rowCnt)))
colCnt += colspan
else:
colCnt += 1
if row == []:
return None
return row
# end of class S3html2pdf
| |
# Sublime Text plugin for Tern
import sublime, sublime_plugin
import os, sys, platform, subprocess, webbrowser, json, re, time, atexit
from subprocess import CalledProcessError
try:
# python 2
from utils.renderer import create_renderer
except:
from .utils.renderer import create_renderer
windows = platform.system() == "Windows"
python3 = sys.version_info[0] > 2
is_st2 = int(sublime.version()) < 3000
def is_js_file(view):
return len(view.sel()) > 0 and view.score_selector(sel_end(view.sel()[0]), "source.js") > 0
files = {}
arghints_enabled = False
renderer = None
arg_completion_enabled = False
tern_command = None
tern_arguments = []
def on_deactivated(view):
pfile = files.get(view.file_name(), None)
if pfile and pfile.dirty:
send_buffer(pfile, view)
def on_selection_modified(view):
if not arghints_enabled: return
pfile = get_pfile(view)
if pfile is not None: show_argument_hints(pfile, view)
class Listeners(sublime_plugin.EventListener):
def on_close(self, view):
files.pop(view.file_name(), None)
def on_deactivated(self, view):
if is_st2: on_deactivated(view)
def on_deactivated_async(self, view):
on_deactivated(view)
def on_modified(self, view):
pfile = files.get(view.file_name(), None)
if pfile: pfile_modified(pfile, view)
def on_selection_modified(self, view):
if is_st2: on_selection_modified(view)
def on_selection_modified_async(self, view):
on_selection_modified(view)
def on_query_completions(self, view, prefix, _locations):
if len(view.sel()) == 0: return None
sel = sel_start(view.sel()[0])
if view.score_selector(sel, 'string.quoted') > 0: return None
if view.score_selector(sel, 'comment') > 0: return None
pfile = get_pfile(view)
if pfile is None: return None
completions, fresh = ensure_completions_cached(pfile, view)
if completions is None: return None
if not fresh:
completions = [c for c in completions if c[1].startswith(prefix)]
return completions
class ProjectFile(object):
def __init__(self, name, view, project):
self.project = project
self.name = name
self.dirty = view.is_dirty()
self.cached_completions = None
self.cached_arguments = None
self.showing_arguments = False
self.last_modified = 0
class Project(object):
def __init__(self, dir):
self.dir = dir
self.port = None
self.proc = None
self.last_failed = 0
self.disabled = False
def __del__(self):
kill_server(self)
def get_pfile(view):
if not is_js_file(view): return None
fname = view.file_name()
if fname is None:
fname = os.path.join(os.path.dirname(__file__), get_setting("tern_default_project_dir", "default_project_dir"), str(time.time()))
if fname in files:
pfile = files[fname]
if pfile.project.disabled: return None
return pfile
pdir = project_dir(fname)
if pdir is None: return None
project = None
for f in files.values():
if f.project.dir == pdir:
project = f.project
break
if project is None: project = Project(pdir)
pfile = files[fname] = ProjectFile(fname, view, project)
if project.disabled: return None
return pfile
def project_dir(fname):
dir = os.path.dirname(fname)
if not os.path.isdir(dir): return None
cur = dir
while True:
parent = os.path.dirname(cur[:-1])
if not parent:
break
if os.path.isfile(os.path.join(cur, ".tern-project")):
return cur
cur = parent
return dir
def pfile_modified(pfile, view):
pfile.dirty = True
now = time.time()
if now - pfile.last_modified > .5:
pfile.last_modified = now
if is_st2:
sublime.set_timeout(lambda: maybe_save_pfile(pfile, view, now), 5000)
else:
sublime.set_timeout_async(lambda: maybe_save_pfile(pfile, view, now), 5000)
if pfile.cached_completions and len(view.sel()) > 0 and sel_start(view.sel()[0]) < pfile.cached_completions[0]:
pfile.cached_completions = None
if pfile.cached_arguments and len(view.sel()) >0 and sel_start(view.sel()[0]) < pfile.cached_arguments[0]:
pfile.cached_arguments = None
def maybe_save_pfile(pfile, view, timestamp):
if pfile.last_modified == timestamp and pfile.dirty:
send_buffer(pfile, view)
def server_port(project, ignored=None):
if project.port is not None and project.port != ignored:
return (project.port, True)
if project.port == ignored:
kill_server(project)
port_file = os.path.join(project.dir, ".tern-port")
if os.path.isfile(port_file):
port = int(open(port_file, "r").read())
if port != ignored:
project.port = port
return (port, True)
started = start_server(project)
if started is not None:
project.port = started
return (started, False)
def start_server(project):
if not tern_command: return None
if time.time() - project.last_failed < 30: return None
env = None
if platform.system() == "Darwin":
env = os.environ.copy()
env["PATH"] += ":/usr/local/bin"
proc = subprocess.Popen(tern_command + tern_arguments, cwd=project.dir, env=env,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=windows)
output = ""
while True:
line = proc.stdout.readline().decode("utf-8")
if not line:
sublime.error_message("Failed to start server" + (output and ":\n" + output))
project.last_failed = time.time()
return None
match = re.match("Listening on port (\\d+)", line)
if match:
project.proc = proc
return int(match.group(1))
else:
output += line
def kill_server(project):
if project.proc is None: return
project.proc.stdin.close()
project.proc.wait()
project.proc = None
def relative_file(pfile):
return pfile.name[len(pfile.project.dir) + 1:]
def buffer_fragment(view, pos):
region = None
for js_region in view.find_by_selector("source.js"):
if js_region.a <= pos and js_region.b >= pos:
region = js_region
break
if region is None: return sublime.Region(pos, pos)
start = view.line(max(region.a, pos - 1000)).a
if start < pos - 1500: start = pos - 1500
cur = start
min_indent = 10000
while True:
next = view.find("\\bfunction\\b", cur)
if next is None or next.b > pos or (next.a == -1 and next.b == -1): break
line = view.line(next.a)
if line.a < pos - 1500: line = sublime.Region(pos - 1500, line.b)
indent = count_indentation(view.substr(line))
if indent < min_indent:
min_indent = indent
start = line.a
cur = line.b
return sublime.Region(start, min(pos + 500, region.b))
def count_indentation(line):
count, pos = (0, 0)
while pos < len(line):
ch = line[pos]
if ch == " ": count += 1
elif ch == "\t": count += 4
else: break
pos += 1
return count
def sel_start(sel):
return min(sel.a, sel.b)
def sel_end(sel):
return max(sel.a, sel.b)
class Req_Error(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
localhost = (windows and "127.0.0.1") or "localhost"
def make_request_py2():
import urllib2
opener = urllib2.build_opener(urllib2.ProxyHandler({}))
def f(port, doc):
try:
req = opener.open("http://" + localhost + ":" + str(port) + "/", json.dumps(doc), 1)
return json.loads(req.read())
except urllib2.HTTPError as error:
raise Req_Error(error.read())
return f
def make_request_py3():
import urllib.request, urllib.error
opener = urllib.request.build_opener(urllib.request.ProxyHandler({}))
def f(port, doc):
try:
req = opener.open("http://" + localhost + ":" + str(port) + "/", json.dumps(doc).encode("utf-8"), 1)
return json.loads(req.read().decode("utf-8"))
except urllib.error.URLError as error:
if hasattr(error, "read"):
raise Req_Error(error.read().decode("utf-8"))
else:
raise error
return f
if python3:
make_request = make_request_py3()
else:
make_request = make_request_py2()
def view_js_text(view):
text, pos = ("", 0)
for region in view.find_by_selector("source.js"):
if region.a > pos: text += ";" + re.sub(r'[^\n]', " ", view.substr(sublime.Region(pos + 1, region.a)))
text += view.substr(region)
pos = region.b
return text
def run_command(view, query, pos=None, fragments=True, silent=False):
"""Run the query on the Tern server.
See default queries at http://ternjs.net/doc/manual.html#protocol.
"""
pfile = get_pfile(view)
if pfile is None or pfile.project.disabled or len(view.sel()) == 0: return
if isinstance(query, str): query = {"type": query}
if (pos is None): pos = view.sel()[0].b
port, port_is_old = server_port(pfile.project)
if port is None: return
doc = {"query": query, "files": []}
if not pfile.dirty:
fname, sending_file = (relative_file(pfile), False)
if fragments and view.size() > 8000:
region = buffer_fragment(view, pos)
doc["files"].append({"type": "part",
"name": relative_file(pfile),
"offset": region.a,
"text": view.substr(region)})
pos -= region.a
fname, sending_file = ("#0", False)
else:
doc["files"].append({"type": "full",
"name": relative_file(pfile),
"text": view_js_text(view)})
fname, sending_file = ("#0", True)
query["file"] = fname
query["end"] = pos
data = None
try:
data = make_request(port, doc)
except Req_Error as e:
if not silent: report_error(str(e), pfile.project)
return None
except:
pass
if data is None and port_is_old:
try:
port = server_port(pfile.project, port)[0]
if port is None: return
data = make_request(port, doc)
if data is None: return None
except Exception as e:
if not silent: report_error(str(e), pfile.project)
if sending_file: pfile.dirty = False
return data
def send_buffer(pfile, view):
port = server_port(pfile.project)[0]
if port is None: return False
try:
make_request(port,
{"files": [{"type": "full",
"name": relative_file(pfile),
"text": view_js_text(view)}]})
pfile.dirty = False
return True
except:
return False
def report_error(message, project):
if sublime.ok_cancel_dialog(message, "Disable Tern"):
project.disabled = True
def completion_icon(type):
if type is None or type == "?": return "\t? "
if type.startswith("fn("): return "\tfn "
if type.startswith("["): return "\t[] "
if type == "number": return "\tnum "
if type == "string": return "\tstr "
if type == "bool": return "\tbool "
return "\t{} "
def fn_completion_icon(arguments, retval):
# return " (fn/"+str(len(arguments))+")"
ret = ""
if retval is not None:
ret = retval
return "(" + ", ".join(arguments) + ")" + ret + ("\tfn ")
# create auto complete string from list arguments
def create_arg_str(arguments):
if len(arguments) == 0:
return "${1}"
arg_str = ""
k = 1
for argument in arguments:
arg_str += "${" + str(k) + ":" + argument.replace("$", "\$") + "}, "
k += 1
return arg_str[0:-2]
# parse the type to get the arguments
def get_arguments(type):
type = type[3:type.find(')')] + ",'"
arg_list = []
arg_start = 0
arg_end = 0
# this two variables are used to skip ': {...}' in signature like 'a: {...}'
depth = 0
arg_already = False
for ch in type:
if depth == 0 and ch == ',':
if arg_already:
arg_already = False
elif arg_start != arg_end:
arg_list.append(type[arg_start:arg_end])
arg_start = arg_end+1
elif depth == 0 and ch == ':':
arg_already = True
arg_list.append(type[arg_start:arg_end])
elif ch == '{' or ch == '(' or ch == '[':
depth += 1
elif ch == '}' or ch == ')' or ch == ']':
depth -= 1
elif ch == ' ':
arg_start = arg_end + 1
arg_end += 1
return arg_list
def ensure_completions_cached(pfile, view):
if len(view.sel()) == 0: return (None, False)
pos = view.sel()[0].b
if pfile.cached_completions is not None:
c_start, c_word, c_completions = pfile.cached_completions
if c_start <= pos:
slice = view.substr(sublime.Region(c_start, pos))
if slice.startswith(c_word) and not re.match(".*\\W", slice):
return (c_completions, False)
data = run_command(view, {"type": "completions", "types": True, "includeKeywords": True})
if data is None: return (None, False)
completions = []
completions_arity = []
for rec in data["completions"]:
rec_name = rec.get('name').replace('$', '\\$')
rec_type = rec.get("type", None)
if arg_completion_enabled and rec_type is not None and rec_type.startswith("fn("):
retval = parse_function_type(rec).get('retval')
if retval is None or retval == "()":
retval = ""
elif retval.startswith("{"):
retval = "{}"
elif retval.startswith("["):
retval = "[]"
if retval != "":
retval = " -> " + retval
arguments = get_arguments(rec_type)
fn_name = rec_name + "(" + create_arg_str(arguments) + ")"
completions.append((rec.get("name") + fn_completion_icon(arguments, retval), fn_name))
else:
completions.append((rec.get("name") + completion_icon(rec_type), rec_name))
# put the auto completions of functions with lower arity at the bottom of the autocomplete list
# so they don't clog up the autocompeltions at the top of the list
completions = completions + completions_arity
pfile.cached_completions = (data["start"], view.substr(sublime.Region(data["start"], pos)), completions)
return (completions, True)
def locate_call(view):
if len(view.sel()) == 0: return (None, 0)
sel = view.sel()[0]
if sel.a != sel.b: return (None, 0)
context = view.substr(sublime.Region(max(0, sel.b - 500), sel.b))
pos = len(context)
depth = argpos = 0
while pos > 0:
pos -= 1
ch = context[pos]
if ch == "}" or ch == ")" or ch == "]":
depth += 1
elif ch == "{" or ch == "(" or ch == "[":
if depth > 0: depth -= 1
elif ch == "(": return (pos + sel.b - len(context), argpos)
else: return (None, 0)
elif ch == "," and depth == 0:
argpos += 1
return (None, 0)
def show_argument_hints(pfile, view):
call_start, argpos = locate_call(view)
if call_start is None: return render_argument_hints(pfile, view, None, 0)
if pfile.cached_arguments is not None and pfile.cached_arguments[0] == call_start:
return render_argument_hints(pfile, view, pfile.cached_arguments[1], argpos)
data = run_command(view, {"type": "type", "preferFunction": True}, call_start, silent=True)
if data is not None:
parsed = parse_function_type(data)
if parsed is not None:
parsed['url'] = data.get('url', None)
parsed['doc'] = data.get('doc', None)
pfile.cached_arguments = (call_start, parsed)
render_argument_hints(pfile, view, parsed, argpos)
def render_argument_hints(pfile, view, ftype, argpos):
if ftype is None:
renderer.clean(pfile, view)
else:
renderer.render_arghints(pfile, view, ftype, argpos)
def parse_function_type(data):
type = data["type"]
if not re.match("fn\\(", type): return None
pos = 3
args, retval = ([], None)
while pos < len(type) and type[pos] != ")":
colon = type.find(":", pos)
name = "?"
if colon != -1:
name = type[pos:colon]
if not re.match("[\\w_$]+$", name): name = "?"
else: pos = colon + 2
type_start = pos
depth = 0
while pos < len(type):
ch = type[pos]
if ch == "(" or ch == "[" or ch == "{":
depth += 1
elif ch == ")" or ch == "]" or ch == "}":
if depth > 0: depth -= 1
else: break
elif ch == "," and depth == 0:
break
pos += 1
args.append((name, type[type_start:pos]))
if type[pos] == ",": pos += 2
if type[pos:pos + 5] == ") -> ":
retval = type[pos + 5:]
return {"name": data.get("exprName", None) or data.get("name", None) or "fn",
"args": args,
"retval": retval}
jump_stack = []
class TernArghintCommand(sublime_plugin.TextCommand):
def run(self, edit, **args):
self.view.insert(edit, 0, args.get('msg', ''))
class TernJumpToDef(sublime_plugin.TextCommand):
def run(self, edit, **args):
data = run_command(self.view, {"type": "definition", "lineCharPositions": True})
if data is None: return
file = data.get("file", None)
if file is not None and len(view.sel()) > 0:
# Found an actual definition
row, col = self.view.rowcol(self.view.sel()[0].b)
cur_pos = self.view.file_name() + ":" + str(row + 1) + ":" + str(col + 1)
jump_stack.append(cur_pos)
if len(jump_stack) > 50: jump_stack.pop(0)
real_file = (os.path.join(get_pfile(self.view).project.dir, file) +
":" + str(data["start"]["line"] + 1) + ":" + str(data["start"]["ch"] + 1))
sublime.active_window().open_file(real_file, sublime.ENCODED_POSITION)
else:
url = data.get("url", None)
if url is None:
sublime.error_message("Could not find a definition")
else:
webbrowser.open(url)
class TernJumpBack(sublime_plugin.TextCommand):
def run(self, edit, **args):
if len(jump_stack) > 0:
sublime.active_window().open_file(jump_stack.pop(), sublime.ENCODED_POSITION)
class TernSelectVariable(sublime_plugin.TextCommand):
def run(self, edit, **args):
data = run_command(self.view, "refs", fragments=False)
if data is None: return
file = relative_file(get_pfile(self.view))
shown_error = False
regions = []
for ref in data["refs"]:
if ref["file"].replace('\\','/') != file.replace('\\','/'):
if not shown_error:
sublime.error_message("Not all uses of this variable are file-local. Selecting only local ones.")
shown_error = True
else:
regions.append(sublime.Region(ref["start"], ref["end"]))
self.view.sel().clear()
for r in regions: self.view.sel().add(r)
class TernDescribe(sublime_plugin.TextCommand):
def run(self, edit, **args):
view = self.view
pfile = get_pfile(view)
data = run_command(view, {"type": "documentation"})
if data is None:
return
parsed = parse_function_type(data)
if parsed is not None:
parsed['url'] = data.get('url', None)
parsed['doc'] = data.get('doc', None)
render_argument_hints(pfile, view, parsed, None)
else:
renderer.render_description(pfile, view,
data["type"], data.get("doc", None),
data.get("url", None))
class TernDisableProject(sublime_plugin.TextCommand):
def run(self, edit, **args):
pfile = get_pfile(view)
pfile.project.disabled = False
class TernEnableProject(sublime_plugin.TextCommand):
def run(self, edit, **args):
pfile = get_pfile(view)
pfile.project.disabled = True
# fetch a certain setting from the package settings file and if it doesn't exist check the
# Preferences.sublime-settings file for backwards compatibility.
def get_setting(key, default):
old_settings = sublime.load_settings("Preferences.sublime-settings")
new_settings = sublime.load_settings("Tern.sublime-settings")
setting = new_settings.get(key, None)
if setting is None:
return old_settings.get(key, default)
else:
return new_settings.get(key, default)
plugin_dir = os.path.abspath(os.path.dirname(__file__))
def plugin_loaded():
global arghints_enabled, renderer, tern_command, tern_arguments
global arg_completion_enabled
arghints_enabled = get_setting("tern_argument_hints", False)
arg_completion_enabled = get_setting("tern_argument_completion", False)
if "show_popup" in dir(sublime.View):
default_output_style = "tooltip"
else:
default_output_style = "status"
output_style = get_setting("tern_output_style", get_setting("tern_argument_hints_type", default_output_style))
renderer = create_renderer(output_style)
tern_arguments = get_setting("tern_arguments", [])
if not isinstance(tern_arguments, list):
tern_arguments = [tern_arguments]
tern_command = get_setting("tern_command", None)
if tern_command is None:
if not os.path.isdir(os.path.join(plugin_dir, "node_modules/tern")):
if sublime.ok_cancel_dialog(
"It appears Tern has not been installed. Do you want tern_for_sublime to try and install it? "
"(Note that this will only work if you already have node.js and npm installed on your system.)"
"\n\nTo get rid of this dialog, either uninstall tern_for_sublime, or set the tern_command setting.",
"Yes, install."):
try:
if hasattr(subprocess, "check_output"):
subprocess.check_output(["npm", "--loglevel=silent", "install"], cwd=plugin_dir, shell=windows)
else:
subprocess.check_call(["npm", "--loglevel=silent", "install"], cwd=plugin_dir, shell=windows)
except (IOError, OSError, CalledProcessError) as e:
msg = "Installation failed. Try doing 'npm install' manually in " + plugin_dir + "."
if hasattr(e, "output") and e.output is not None:
msg += "\nError message was:\n\n" + e.output
if hasattr(e, "returncode"):
msg += "\nReturn code was: " + str(e.returncode)
sublime.error_message(msg)
return
tern_command = [get_setting("node_path", "node"), os.path.join(plugin_dir, "node_modules/tern/bin/tern"), "--no-port-file"]
def cleanup():
for f in files.values():
kill_server(f.project)
atexit.register(cleanup)
if is_st2:
sublime.set_timeout(plugin_loaded, 500)
| |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import base64
import logging
import urlparse
from common import chrome_proxy_measurements as measurements
from common.chrome_proxy_measurements import ChromeProxyValidation
from integration_tests import chrome_proxy_metrics as metrics
from metrics import loading
from telemetry.core import exceptions, util
from telemetry.page import page_test
class ChromeProxyDataSaving(page_test.PageTest):
"""Chrome proxy data saving measurement."""
def __init__(self, *args, **kwargs):
super(ChromeProxyDataSaving, self).__init__(*args, **kwargs)
self._metrics = metrics.ChromeProxyMetric()
self._enable_proxy = True
def CustomizeBrowserOptions(self, options):
if self._enable_proxy:
options.AppendExtraBrowserArgs('--enable-spdy-proxy-auth')
def WillNavigateToPage(self, page, tab):
if self._enable_proxy:
measurements.WaitForViaHeader(tab)
tab.ClearCache(force=True)
self._metrics.Start(page, tab)
def ValidateAndMeasurePage(self, page, tab, results):
# Wait for the load event.
tab.WaitForJavaScriptExpression('performance.timing.loadEventStart', 300)
self._metrics.Stop(page, tab)
self._metrics.AddResultsForDataSaving(tab, results)
class ChromeProxyHeaders(ChromeProxyValidation):
"""Correctness measurement for response headers."""
def __init__(self):
super(ChromeProxyHeaders, self).__init__(
restart_after_each_page=True,
metrics=metrics.ChromeProxyMetric())
def AddResults(self, tab, results):
self._metrics.AddResultsForHeaderValidation(tab, results)
class ChromeProxyBypass(ChromeProxyValidation):
"""Correctness measurement for bypass responses."""
def __init__(self):
super(ChromeProxyBypass, self).__init__(
restart_after_each_page=True,
metrics=metrics.ChromeProxyMetric())
def AddResults(self, tab, results):
self._metrics.AddResultsForBypass(tab, results)
class ChromeProxyHTTPSBypass(ChromeProxyValidation):
"""Correctness measurement for bypass responses."""
def __init__(self):
super(ChromeProxyHTTPSBypass, self).__init__(
restart_after_each_page=True,
metrics=metrics.ChromeProxyMetric())
def AddResults(self, tab, results):
self._metrics.AddResultsForHTTPSBypass(tab, results)
class ChromeProxyYouTube(ChromeProxyValidation):
"""Correctness measurement for youtube video playback."""
def __init__(self):
super(ChromeProxyYouTube, self).__init__(
restart_after_each_page=True,
metrics=metrics.ChromeProxyMetric())
def AddResults(self, tab, results):
self._metrics.AddResultsForYouTube(tab, results)
class ChromeProxyHTML5Test(ChromeProxyValidation):
"""Correctness measurement for html5test page."""
def __init__(self):
super(ChromeProxyHTML5Test, self).__init__(
restart_after_each_page=True,
metrics=metrics.ChromeProxyMetric())
def AddResults(self, tab, results):
self._metrics.AddResultsForHTML5Test(tab, results)
class ChromeProxyCorsBypass(ChromeProxyValidation):
"""Correctness measurement for bypass responses for CORS requests."""
def __init__(self):
super(ChromeProxyCorsBypass, self).__init__(
restart_after_each_page=True,
metrics=metrics.ChromeProxyMetric())
def ValidateAndMeasurePage(self, page, tab, results):
# The test page sets window.xhrRequestCompleted to true when the XHR fetch
# finishes.
tab.WaitForJavaScriptExpression('window.xhrRequestCompleted', 300)
super(ChromeProxyCorsBypass,
self).ValidateAndMeasurePage(page, tab, results)
def AddResults(self, tab, results):
self._metrics.AddResultsForCorsBypass(tab, results)
class ChromeProxyBlockOnce(ChromeProxyValidation):
"""Correctness measurement for block-once responses."""
def __init__(self):
super(ChromeProxyBlockOnce, self).__init__(
restart_after_each_page=True,
metrics=metrics.ChromeProxyMetric())
def AddResults(self, tab, results):
self._metrics.AddResultsForBlockOnce(tab, results)
class ChromeProxySafebrowsingOn(ChromeProxyValidation):
"""Correctness measurement for safebrowsing."""
def __init__(self):
super(ChromeProxySafebrowsingOn, self).__init__(
metrics=metrics.ChromeProxyMetric())
def AddResults(self, tab, results):
self._metrics.AddResultsForSafebrowsingOn(tab, results)
class ChromeProxySafebrowsingOff(ChromeProxyValidation):
"""Correctness measurement for safebrowsing."""
def __init__(self):
super(ChromeProxySafebrowsingOff, self).__init__(
metrics=metrics.ChromeProxyMetric())
def AddResults(self, tab, results):
self._metrics.AddResultsForSafebrowsingOff(tab, results)
_FAKE_PROXY_AUTH_VALUE = 'aabbccdd3b7579186c1b0620614fdb1f0000ffff'
_TEST_SERVER = 'chromeproxy-test.appspot.com'
_TEST_SERVER_DEFAULT_URL = 'http://' + _TEST_SERVER + '/default'
# We rely on the chromeproxy-test server to facilitate some of the tests.
# The test server code is at <TBD location> and runs at _TEST_SERVER
#
# The test server allow request to override response status, headers, and
# body through query parameters. See GetResponseOverrideURL.
def GetResponseOverrideURL(url=_TEST_SERVER_DEFAULT_URL, respStatus=0,
respHeader="", respBody=""):
""" Compose the request URL with query parameters to override
the chromeproxy-test server response.
"""
queries = []
if respStatus > 0:
queries.append('respStatus=%d' % respStatus)
if respHeader:
queries.append('respHeader=%s' % base64.b64encode(respHeader))
if respBody:
queries.append('respBody=%s' % base64.b64encode(respBody))
if len(queries) == 0:
return url
"&".join(queries)
# url has query already
if urlparse.urlparse(url).query:
return url + '&' + "&".join(queries)
else:
return url + '?' + "&".join(queries)
class ChromeProxyHTTPFallbackProbeURL(ChromeProxyValidation):
"""Correctness measurement for proxy fallback.
In this test, the probe URL does not return 'OK'. Chrome is expected
to use the fallback proxy.
"""
def __init__(self):
super(ChromeProxyHTTPFallbackProbeURL, self).__init__(
restart_after_each_page=True,
metrics=metrics.ChromeProxyMetric())
def CustomizeBrowserOptions(self, options):
super(ChromeProxyHTTPFallbackProbeURL,
self).CustomizeBrowserOptions(options)
# Set the secure proxy check URL to the google.com favicon, which will be
# interpreted as a secure proxy check failure since the response body is not
# "OK". The google.com favicon is used because it will load reliably fast,
# and there have been problems with chromeproxy-test.appspot.com being slow
# and causing tests to flake.
options.AppendExtraBrowserArgs(
'--data-reduction-proxy-secure-proxy-check-url='
'http://www.google.com/favicon.ico')
def AddResults(self, tab, results):
self._metrics.AddResultsForHTTPFallback(tab, results)
class ChromeProxyHTTPFallbackViaHeader(ChromeProxyValidation):
"""Correctness measurement for proxy fallback.
In this test, the configured proxy is the chromeproxy-test server which
will send back a response without the expected Via header. Chrome is
expected to use the fallback proxy and add the configured proxy to the
bad proxy list.
"""
def __init__(self):
super(ChromeProxyHTTPFallbackViaHeader, self).__init__(
restart_after_each_page=True,
metrics=metrics.ChromeProxyMetric())
def CustomizeBrowserOptions(self, options):
super(ChromeProxyHTTPFallbackViaHeader,
self).CustomizeBrowserOptions(options)
options.AppendExtraBrowserArgs('--ignore-certificate-errors')
# Set the primary Data Reduction Proxy to be the test server. The test
# doesn't know if Chrome is configuring the DRP using the Data Saver API or
# not, so the appropriate flags are set for both cases.
options.AppendExtraBrowserArgs(
'--spdy-proxy-auth-origin=http://%s' % _TEST_SERVER)
options.AppendExtraBrowserArgs(
'--data-reduction-proxy-http-proxies='
'http://%s;http://compress.googlezip.net' % _TEST_SERVER)
def AddResults(self, tab, results):
self._metrics.AddResultsForHTTPFallback(tab, results)
class ChromeProxyClientType(ChromeProxyValidation):
"""Correctness measurement for Chrome-Proxy header client type directives."""
def __init__(self):
super(ChromeProxyClientType, self).__init__(
restart_after_each_page=True,
metrics=metrics.ChromeProxyMetric())
self._chrome_proxy_client_type = None
def AddResults(self, tab, results):
# Get the Chrome-Proxy client type from the first page in the page set, so
# that the client type value can be used to determine which of the later
# pages in the page set should be bypassed.
if not self._chrome_proxy_client_type:
client_type = self._metrics.GetClientTypeFromRequests(tab)
if client_type:
self._chrome_proxy_client_type = client_type
self._metrics.AddResultsForClientType(tab,
results,
self._chrome_proxy_client_type,
self._page.bypass_for_client_type)
class ChromeProxyLoFi(ChromeProxyValidation):
"""Correctness measurement for Lo-Fi in Chrome-Proxy header."""
def __init__(self):
super(ChromeProxyLoFi, self).__init__(restart_after_each_page=True,
metrics=metrics.ChromeProxyMetric())
def CustomizeBrowserOptions(self, options):
super(ChromeProxyLoFi, self).CustomizeBrowserOptions(options)
options.AppendExtraBrowserArgs('--data-reduction-proxy-lo-fi=always-on')
def AddResults(self, tab, results):
self._metrics.AddResultsForLoFi(tab, results)
class ChromeProxyCacheLoFiDisabled(ChromeProxyValidation):
"""
Correctness measurement for Lo-Fi placeholder is not loaded from cache when a
page is reloaded with LoFi disabled. First a test page is opened with LoFi and
chrome proxy enabled. This allows Chrome to cache the LoFi placeholder image.
The browser is restarted with LoFi disabled and the same test page is loaded.
This second page load should not pick the LoFi placeholder from cache and
original image should be loaded. This test should be run with
--profile-type=default command line for the same user profile and cache to be
used across the two page loads.
"""
def __init__(self):
super(ChromeProxyCacheLoFiDisabled, self).__init__(
restart_after_each_page=True,
metrics=metrics.ChromeProxyMetric(),
clear_cache_before_each_run=False)
def AddResults(self, tab, results):
self._metrics.AddResultsForLoFiCache(tab, results, self._is_lo_fi_enabled)
def WillStartBrowser(self, platform):
super(ChromeProxyCacheLoFiDisabled, self).WillStartBrowser(platform)
if not self._page:
# First page load, enable LoFi and chrome proxy.
self.options.AppendExtraBrowserArgs(
'--data-reduction-proxy-lo-fi=always-on')
self._is_lo_fi_enabled = True
else:
# Second page load, disable LoFi. Chrome proxy is still enabled.
self.options.browser_options.extra_browser_args.discard(
'--data-reduction-proxy-lo-fi=always-on')
self._is_lo_fi_enabled = False
def WillNavigateToPage(self, page, tab):
super(ChromeProxyCacheLoFiDisabled, self).WillNavigateToPage(page, tab)
if self._is_lo_fi_enabled:
# Clear cache for the first page to pick LoFi image from server.
tab.ClearCache(force=True)
def DidNavigateToPage(self, page, tab):
if not self._is_lo_fi_enabled:
tab.ExecuteJavaScript('window.location.reload()')
util.WaitFor(tab.HasReachedQuiescence, 3)
class ChromeProxyCacheProxyDisabled(ChromeProxyValidation):
"""
Correctness measurement for Lo-Fi placeholder is not loaded from cache when a
page is reloaded with data reduction proxy disabled. First a test page is
opened with LoFi and chrome proxy enabled. This allows Chrome to cache the
LoFi placeholder image. The browser is restarted with chrome proxy disabled
and the same test page is loaded. This second page load should not pick the
LoFi placeholder from cache and original image should be loaded. This test
should be run with --profile-type=default command line for the same user
profile and cache to be used across the two page loads.
"""
def __init__(self):
super(ChromeProxyCacheProxyDisabled, self).__init__(
restart_after_each_page=True,
metrics=metrics.ChromeProxyMetric(),
clear_cache_before_each_run=False)
def AddResults(self, tab, results):
self._metrics.AddResultsForLoFiCache(tab, results,
self._is_chrome_proxy_enabled)
def WillStartBrowser(self, platform):
super(ChromeProxyCacheProxyDisabled, self).WillStartBrowser(platform)
if not self._page:
# First page load, enable LoFi and chrome proxy.
self.options.AppendExtraBrowserArgs(
'--data-reduction-proxy-lo-fi=always-on')
else:
# Second page load, disable chrome proxy. LoFi is still enabled.
self.DisableChromeProxy()
def WillNavigateToPage(self, page, tab):
super(ChromeProxyCacheProxyDisabled, self).WillNavigateToPage(page, tab)
if self._is_chrome_proxy_enabled:
# Clear cache for the first page to pick LoFi image from server.
tab.ClearCache(force=True)
def DidNavigateToPage(self, page, tab):
if not self._is_chrome_proxy_enabled:
tab.ExecuteJavaScript('window.location.reload()')
util.WaitFor(tab.HasReachedQuiescence, 3)
class ChromeProxyLoFiPreview(ChromeProxyValidation):
"""Correctness measurement for Lo-Fi preview in Chrome-Proxy header."""
def __init__(self):
super(ChromeProxyLoFiPreview, self).__init__(
restart_after_each_page=True,
metrics=metrics.ChromeProxyMetric())
def CustomizeBrowserOptions(self, options):
super(ChromeProxyLoFiPreview, self).CustomizeBrowserOptions(options)
options.AppendExtraBrowserArgs(
'--data-reduction-proxy-lo-fi=always-on')
options.AppendExtraBrowserArgs(
'--enable-data-reduction-proxy-lo-fi-preview')
def AddResults(self, tab, results):
self._metrics.AddResultsForLoFiPreview(tab, results)
class ChromeProxyExpDirective(ChromeProxyValidation):
"""Correctness measurement for experiment directives in Chrome-Proxy header.
This test verifies that "exp=test" in the Chrome-Proxy request header
causes a bypass on the experiment test page.
"""
def __init__(self):
super(ChromeProxyExpDirective, self).__init__(
restart_after_each_page=True,
metrics=metrics.ChromeProxyMetric())
def CustomizeBrowserOptions(self, options):
super(ChromeProxyExpDirective, self).CustomizeBrowserOptions(options)
options.AppendExtraBrowserArgs('--data-reduction-proxy-experiment=test')
def AddResults(self, tab, results):
self._metrics.AddResultsForBypass(tab, results, url_pattern='/exp/')
class ChromeProxyPassThrough(ChromeProxyValidation):
"""Correctness measurement for Chrome-Proxy pass-through directives.
This test verifies that "pass-through" in the Chrome-Proxy request header
causes a resource to be loaded without Data Reduction Proxy transformations.
"""
def __init__(self):
super(ChromeProxyPassThrough, self).__init__(
restart_after_each_page=True,
metrics=metrics.ChromeProxyMetric())
def CustomizeBrowserOptions(self, options):
super(ChromeProxyPassThrough, self).CustomizeBrowserOptions(options)
def AddResults(self, tab, results):
self._metrics.AddResultsForPassThrough(tab, results)
class ChromeProxyHTTPToDirectFallback(ChromeProxyValidation):
"""Correctness measurement for HTTP proxy fallback to direct."""
def __init__(self):
super(ChromeProxyHTTPToDirectFallback, self).__init__(
restart_after_each_page=True,
metrics=metrics.ChromeProxyMetric())
def CustomizeBrowserOptions(self, options):
super(ChromeProxyHTTPToDirectFallback,
self).CustomizeBrowserOptions(options)
# Set the primary proxy to something that will fail to be resolved so that
# this test will run using the HTTP fallback proxy. The test doesn't know if
# Chrome is configuring the DRP using the Data Saver API or not, so the
# appropriate flags are set for both cases.
options.AppendExtraBrowserArgs(
'--spdy-proxy-auth-origin=http://nonexistent.googlezip.net')
options.AppendExtraBrowserArgs(
'--data-reduction-proxy-http-proxies='
'http://nonexistent.googlezip.net;http://compress.googlezip.net')
def WillNavigateToPage(self, page, tab):
super(ChromeProxyHTTPToDirectFallback, self).WillNavigateToPage(page, tab)
# Attempt to load a page through the nonexistent primary proxy in order to
# cause a proxy fallback, and have this test run starting from the HTTP
# fallback proxy.
tab.Navigate(_TEST_SERVER_DEFAULT_URL)
tab.WaitForJavaScriptExpression('performance.timing.loadEventStart', 300)
def AddResults(self, tab, results):
self._metrics.AddResultsForHTTPToDirectFallback(tab, results, _TEST_SERVER)
class ChromeProxyReenableAfterBypass(ChromeProxyValidation):
"""Correctness measurement for re-enabling proxies after bypasses.
This test loads a page that causes all data reduction proxies to be bypassed
for 1 to 5 minutes, then waits 5 minutes and verifies that the proxy is no
longer bypassed.
"""
def __init__(self):
super(ChromeProxyReenableAfterBypass, self).__init__(
restart_after_each_page=True,
metrics=metrics.ChromeProxyMetric())
def AddResults(self, tab, results):
self._metrics.AddResultsForReenableAfterBypass(
tab, results, self._page.bypass_seconds_min,
self._page.bypass_seconds_max)
class ChromeProxyReenableAfterSetBypass(ChromeProxyValidation):
"""Correctness test for re-enabling proxies after bypasses with set duration.
This test loads a page that causes all data reduction proxies to be bypassed
for 20 seconds.
"""
def __init__(self):
super(ChromeProxyReenableAfterSetBypass, self).__init__(
restart_after_each_page=True,
metrics=metrics.ChromeProxyMetric())
def AddResults(self, tab, results):
self._metrics.AddResultsForReenableAfterSetBypass(
tab, results, self._page.BYPASS_SECONDS)
class ChromeProxySmoke(ChromeProxyValidation):
"""Smoke measurement for basic chrome proxy correctness."""
def __init__(self):
super(ChromeProxySmoke, self).__init__(restart_after_each_page=True,
metrics=metrics.ChromeProxyMetric())
def AddResults(self, tab, results):
# Map a page name to its AddResults func.
page_to_metrics = {
'header validation': [self._metrics.AddResultsForHeaderValidation],
'compression: image': [
self._metrics.AddResultsForHeaderValidation,
self._metrics.AddResultsForDataSaving,
],
'compression: javascript': [
self._metrics.AddResultsForHeaderValidation,
self._metrics.AddResultsForDataSaving,
],
'compression: css': [
self._metrics.AddResultsForHeaderValidation,
self._metrics.AddResultsForDataSaving,
],
'bypass': [self._metrics.AddResultsForBypass],
}
if not self._page.name in page_to_metrics:
raise page_test.MeasurementFailure(
'Invalid page name (%s) in smoke. Page name must be one of:\n%s' % (
self._page.name, page_to_metrics.keys()))
for add_result in page_to_metrics[self._page.name]:
add_result(tab, results)
PROXIED = metrics.PROXIED
DIRECT = metrics.DIRECT
class ChromeProxyClientConfig(ChromeProxyValidation):
"""Chrome proxy client configuration service validation."""
def __init__(self):
super(ChromeProxyClientConfig, self).__init__(
restart_after_each_page=True,
metrics=metrics.ChromeProxyMetric())
def CustomizeBrowserOptions(self, options):
super(ChromeProxyClientConfig, self).CustomizeBrowserOptions(options)
options.AppendExtraBrowserArgs(
'--enable-data-reduction-proxy-config-client')
def AddResults(self, tab, results):
self._metrics.AddResultsForClientConfig(tab, results)
class ChromeProxyVideoValidation(page_test.PageTest):
"""Validation for video pages.
Measures pages using metrics.ChromeProxyVideoMetric. Pages can be fetched
either direct from the origin server or via the proxy. If a page is fetched
both ways, then the PROXIED and DIRECT measurements are compared to ensure
the same video was loaded in both cases.
"""
def __init__(self):
super(ChromeProxyVideoValidation, self).__init__(
needs_browser_restart_after_each_page=True,
clear_cache_before_each_run=True)
# The type is _allMetrics[url][PROXIED,DIRECT][metricName] = value,
# where (metricName,value) is a metric computed by videowrapper.js.
self._allMetrics = {}
def WillNavigateToPage(self, page, tab):
if page.use_chrome_proxy:
measurements.WaitForViaHeader(tab)
super(ChromeProxyVideoValidation, self).WillNavigateToPage(page, tab)
def DidNavigateToPage(self, page, tab):
self._currMetrics = metrics.ChromeProxyVideoMetric(tab)
self._currMetrics.Start(page, tab)
def ValidateAndMeasurePage(self, page, tab, results):
assert self._currMetrics
self._currMetrics.Stop(page, tab)
if page.url not in self._allMetrics:
self._allMetrics[page.url] = {}
# Verify this page.
if page.use_chrome_proxy:
self._currMetrics.AddResultsForProxied(tab, results)
self._allMetrics[page.url][PROXIED] = self._currMetrics.videoMetrics
else:
self._currMetrics.AddResultsForDirect(tab, results)
self._allMetrics[page.url][DIRECT] = self._currMetrics.videoMetrics
self._currMetrics = None
# Compare proxied and direct results for this url, if they exist.
m = self._allMetrics[page.url]
if PROXIED in m and DIRECT in m:
self._CompareProxiedAndDirectMetrics(page.url, m[PROXIED], m[DIRECT])
def _CompareProxiedAndDirectMetrics(self, url, pm, dm):
"""Compare metrics from PROXIED and DIRECT fetches.
Compares video metrics computed by videowrapper.js for pages that were
fetch both PROXIED and DIRECT.
Args:
url: The url for the page being tested.
pm: Metrics when loaded by the Flywheel proxy.
dm: Metrics when loaded directly from the origin server.
Raises:
ChromeProxyMetricException on failure.
"""
def err(s):
raise ChromeProxyMetricException, s
if not pm['ready']:
err('Proxied page did not load video: %s' % page.url)
if not dm['ready']:
err('Direct page did not load video: %s' % page.url)
# Compare metrics that should match for PROXIED and DIRECT.
for x in ('video_height', 'video_width', 'video_duration',
'decoded_frames'):
if x not in pm:
err('Proxied page has no %s: %s' % (x, page.url))
if x not in dm:
err('Direct page has no %s: %s' % (x, page.url))
if pm[x] != dm[x]:
err('Mismatch for %s (proxied=%s direct=%s): %s' %
(x, str(pm[x]), str(dm[x]), page.url))
# Proxied XOCL should match direct CL.
pxocl = pm['x_original_content_length_header']
dcl = dm['content_length_header']
if pxocl != dcl:
err('Mismatch for content length (proxied=%s direct=%s): %s' %
(str(pxocl), str(dcl), page.url))
class ChromeProxyInstrumentedVideoValidation(page_test.PageTest):
"""Tests a specially instrumented page for correct video transcoding."""
def __init__(self):
super(ChromeProxyInstrumentedVideoValidation, self).__init__(
needs_browser_restart_after_each_page=True,
clear_cache_before_each_run=True)
self._metrics = metrics.ChromeProxyInstrumentedVideoMetric()
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArgs('--enable-spdy-proxy-auth')
def WillNavigateToPage(self, page, tab):
measurements.WaitForViaHeader(tab)
tab.ClearCache(force=True)
self._metrics.Start(page, tab)
def ValidateAndMeasurePage(self, page, tab, results):
self._metrics.Stop(page, tab)
self._metrics.AddResults(tab, results)
| |
#!/usr/bin/env python
from Container import Container
from JumpScale import j
import os
import docker
import time
from urllib import parse
class Docker:
def __init__(self):
self.__jslocation__ = "j.sal.docker"
self.logger = j.logger.get('j.sal.docker')
self._basepath = "/mnt/vmstor/docker"
self._weaveSocket = None
self._prefix = ""
self._containers = {}
self._names = []
if 'DOCKER_HOST' not in os.environ or os.environ['DOCKER_HOST'] == "":
self.base_url = 'unix://var/run/docker.sock'
elif self.weaveIsActive:
self.base_url = self.weavesocket
else:
self.base_url = os.environ['DOCKER_HOST']
self.client = docker.Client(base_url=self.base_url, timeout=120)
def init(self):
j.do.execute("systemctl stop docker")
d = j.sal.disklayout.findDisk(mountpoint="/storage")
if d is not None:
# we found a disk, lets make sure its in fstab
d.setAutoMount()
dockerpath = "%s/docker" % d.mountpoint
dockerpath = dockerpath.replace("//", '/')
if dockerpath not in j.sal.btrfs.subvolumeList(d.mountpoint):
# have to create the dockerpath
j.sal.btrfs.subvolumeCreate(dockerpath)
# j.sal.fs.createDir("/storage/docker")
j.sal.fs.copyDirTree("/var/lib/docker", dockerpath)
j.sal.fs.symlink("/storage/docker", "/var/lib/docker",
overwriteTarget=True)
j.do.execute("systemctl start docker")
@property
def weaveIsActive(self):
return bool(self.weavesocket)
@property
def weavesocket(self):
if self._weaveSocket is None:
if not j.tools.cuisine.local.core.command_check('weave'):
self.logger.warning("weave not found, do not forget to start if installed.")
self._weaveSocket = ""
else:
rc, self._weaveSocket = j.sal.process.execute("eval $(weave env) && echo $DOCKER_HOST", die=False)
if rc > 0:
self.logger.warning("weave not found, do not forget to start if installed.")
self._weaveSocket = ""
self._weaveSocket = self._weaveSocket.strip()
return self._weaveSocket
def weaveInstall(self, ufw=False):
j.tools.cuisine.local.systemservices.weave.install(start=True)
if ufw:
j.tools.cuisine.local.systemservices.ufw.allowIncoming(6783)
j.tools.cuisine.local.systemservices.ufw.allowIncoming(6783, protocol="udp")
# def connectRemoteTCP(self, base_url):
# self.base_url = base_url
# self.client = docker.Client(base_url=weavesocket)
@property
def docker_host(self):
"""
Get the docker hostname.
"""
u = parse.urlparse(self.base_url)
if u.scheme == 'unix':
return 'localhost'
else:
return u.hostname
def _execute(self, command):
env = os.environ.copy()
env.pop('PYTHONPATH', None)
(exitcode, stdout, stderr) = j.sal.process.run(
command, showOutput=False, captureOutput=True, stopOnError=False, env=env)
if exitcode != 0:
raise j.exceptions.RuntimeError(
"Failed to execute %s: Error: %s, %s" % (command, stdout, stderr))
return stdout
#
# def copy(self, name, src, dest):
# rndd = j.data.idgenerator.generateRandomInt(10, 1000000)
# temp = "/var/docker/%s/%s" % (name, rndd)
# j.sal.fs.createDir(temp)
# source_name = j.sal.fs.getBaseName(src)
# if j.sal.fs.isDir(src):
# j.sal.fs.copyDirTree(src, j.sal.fs.joinPaths(temp, source_name))
# else:
# j.sal.fs.copyFile(src, j.sal.fs.joinPaths(temp, source_name))
#
# ddir = j.sal.fs.getDirName(dest)
# cmd = "mkdir -p %s" % (ddir)
# self.run(name, cmd)
#
# cmd = "cp -r /var/jumpscale/%s/%s %s" % (rndd, source_name, dest)
# self.run(name, cmd)
# j.sal.fs.remove(temp)
@property
def containers(self):
if self._containers == {}:
for item in self.client.containers(all=all):
try:
name = str(item["Names"][0].strip("/").strip())
except:
continue
id = str(item["Id"].strip())
self._containers[id] = Container(name, id, self.client)
return list(self._containers.values())
@property
def containerNamesRunning(self):
"""
List all running containers names
"""
res = []
for container in self.containers:
if container.isRunning():
res.append(container.name)
return res
@property
def containerNames(self):
"""
List all containers names
"""
res = []
for container in self.containers:
res.append(container.name)
return res
@property
def containersRunning(self):
"""
List of all running container objects
"""
res = []
for container in self.containers:
if container.isRunning():
res.append(container)
return res
def exists(self, name):
return name in self.containerNames
@property
def basepath(self):
self._basepath = '/mnt/data/docker'
# TODO: needs to fetch values out of hrd
# if not self._basepath:
# if j.application.config.exists('docker.basepath'):
# self._basepath = j.application.config.get('docker.basepath')
# else:
# self._basepath="/mnt/vmstor/docker" #btrfs subvol create
return self._basepath
def _getChildren(self, pid, children):
process = j.sal.process.getProcessObject(pid)
children.append(process)
for child in process.get_children():
children = self._getChildren(child.pid, children)
return children
def _get_rootpath(self, name):
rootpath = j.sal.fs.joinPaths(
self.basepath, '%s%s' % (self._prefix, name), 'rootfs')
return rootpath
def _getMachinePath(self, machinename, append=""):
if machinename == "":
raise j.exceptions.RuntimeError("Cannot be empty")
base = j.sal.fs.joinPaths(self.basepath, '%s%s' %
(self._prefix, machinename))
if append != "":
base = j.sal.fs.joinPaths(base, append)
return base
def status(self):
"""
return list docker with some info
@return list of dicts
"""
self.weavesocket
res = []
for item in self.client.containers():
name = item["Names"][0].strip(" /")
sshport = ""
for port in item["Ports"]:
if port["PrivatePort"] == 22:
if "PublicPort" in port:
sshport = port["PublicPort"]
else:
sshport = None
res.append([name, item["Image"], self.docker_host,
sshport, item["Status"]])
return res
def ps(self):
"""
return detailed info
"""
self.weavesocket
return self.client.containers()
def get(self, name, die=True):
"""
Get a container object by name
@param name string: container name
"""
for container in self.containers:
if container.name == name:
return container
if die:
raise j.exceptions.RuntimeError(
"Container with name %s doesn't exists" % name)
else:
return None
def exportRsync(self, name, backupname, key="pub"):
raise j.exceptions.RuntimeError("not implemented")
self.removeRedundantFiles(name)
ipaddr = j.application.config.get("jssync.addr")
path = self._getMachinePath(name)
if not j.sal.fs.exists(path):
raise j.exceptions.RuntimeError("cannot find machine:%s" % path)
if backupname[-1] != "/":
backupname += "/"
if path[-1] != "/":
path += "/"
cmd = "rsync -a %s %s::upload/%s/images/%s --delete-after --modify-window=60 --compress --stats --progress --exclude '.Trash*'" % (
path, ipaddr, key, backupname)
j.sal.process.executeWithoutPipe(cmd)
# def removeRedundantFiles(self,name):
# raise j.exceptions.RuntimeError("not implemented")
# basepath=self._getMachinePath(name)
# j.sal.fs.removeIrrelevantFiles(basepath,followSymlinks=False)
# toremove="%s/rootfs/var/cache/apt/archives/"%basepath
# j.sal.fs.removeDirTree(toremove)
def importRsync(self, backupname, name, basename="", key="pub"):
"""
@param basename is the name of a start of a machine locally, will be used as basis and then the source will be synced over it
"""
raise j.exceptions.RuntimeError("not implemented")
ipaddr = j.application.config.get("jssync.addr")
path = self._getMachinePath(name)
self.btrfsSubvolNew(name)
# j.sal.fs.createDir(path)
if backupname[-1] != "/":
backupname += "/"
if path[-1] != "/":
path += "/"
if basename != "":
basepath = self._getMachinePath(basename)
if basepath[-1] != "/":
basepath += "/"
if not j.sal.fs.exists(basepath):
raise j.exceptions.RuntimeError(
"cannot find base machine:%s" % basepath)
cmd = "rsync -av -v %s %s --delete-after --modify-window=60 --size-only --compress --stats --progress" % (
basepath, path)
self.logger.info(cmd)
j.sal.process.executeWithoutPipe(cmd)
cmd = "rsync -av -v %s::download/%s/images/%s %s --delete-after --modify-window=60 --compress --stats --progress" % (
ipaddr, key, backupname, path)
self.logger.info(cmd)
j.sal.process.executeWithoutPipe(cmd)
def exportTgz(self, name, backupname):
raise j.exceptions.RuntimeError("not implemented")
self.removeRedundantFiles(name)
path = self._getMachinePath(name)
bpath = j.sal.fs.joinPaths(self.basepath, "backups")
if not j.sal.fs.exists(path):
raise j.exceptions.RuntimeError("cannot find machine:%s" % path)
j.sal.fs.createDir(bpath)
bpath = j.sal.fs.joinPaths(bpath, "%s.tgz" % backupname)
cmd = "cd %s;tar Szcf %s ." % (path, bpath)
j.sal.process.executeWithoutPipe(cmd)
return bpath
def importTgz(self, backupname, name):
raise j.exceptions.RuntimeError("not implemented")
path = self._getMachinePath(name)
bpath = j.sal.fs.joinPaths(
self.basepath, "backups", "%s.tgz" % backupname)
if not j.sal.fs.exists(bpath):
raise j.exceptions.RuntimeError(
"cannot find import path:%s" % bpath)
j.sal.fs.createDir(path)
cmd = "cd %s;tar xzvf %s -C ." % (path, bpath)
j.sal.process.executeWithoutPipe(cmd)
def _init_aysfs(self, fs, dockname):
if fs.isUnique():
if not fs.isRunning():
self.logger.info('starting unique aysfs: %s' % fs.getName())
fs.start()
else:
self.logger.info(
'skipping aysfs: %s (unique running)' % fs.getName())
else:
fs.setName('%s-%s' % (dockname, fs.getName()))
if fs.isRunning():
fs.stop()
self.logger.info('starting aysfs: %s' % fs.getName())
fs.start()
def create(self, name="", ports="", vols="", volsro="", stdout=True, base="jumpscale/ubuntu1604", nameserver=["8.8.8.8"],
replace=True, cpu=None, mem=0, ssh=True, myinit=True, sharecode=False, sshkeyname="", sshpubkey="",
setrootrndpasswd=True, rootpasswd="", jumpscalebranch="master", aysfs=[], detach=False, privileged=False, getIfExists=True, weavenet=False):
"""
Creates a new container.
@param ports in format as follows "22:8022 80:8080" the first arg e.g. 22 is the port in the container
@param vols in format as follows "/var/insidemachine:/var/inhost # /var/1:/var/1 # ..." '#' is separator
@param sshkeyname : use ssh-agent (can even do remote through ssh -A) and then specify key you want to use in docker
"""
if ssh is True and myinit is False:
raise ValueError("SSH can't be enabled without myinit.")
# check there is weave
self.weavesocket
name = name.lower().strip()
self.logger.info(("create:%s" % name))
running = [item.name for item in self.containersRunning]
if not replace:
if name in self.containerNamesRunning:
if getIfExists:
return self.get(name=name)
else:
j.events.opserror_critical(
"Cannot create machine with name %s, because it does already exists.")
else:
if self.exists(name):
self.logger.info("remove existing container %s" % name)
container = self.get(name)
if container:
container.destroy()
if vols is None:
vols = ""
if volsro is None:
volsro = ""
if ports is None:
ports = ""
if mem is not None:
if mem > 0:
mem = int(mem) * 1024
elif mem <= 0:
mem = None
portsdict = {}
if len(ports) > 0:
items = ports.split(" ")
for item in items:
key, val = item.split(":", 1)
ss = key.split("/")
if len(ss) == 2:
portsdict[tuple(ss)] = val
else:
portsdict[int(key)] = val
if ssh:
if 22 not in portsdict:
for port in range(9022, 9190):
if not j.sal.nettools.tcpPortConnectionTest(self.docker_host, port):
portsdict[22] = port
self.logger.info(("ssh port will be on:%s" % port))
break
volsdict = {}
if len(vols) > 0:
items = vols.split("#")
for item in items:
key, val = item.split(":", 1)
volsdict[str(key).strip()] = str(val).strip()
if sharecode and j.sal.fs.exists(path="/opt/code"):
self.logger.info("share jumpscale code enable")
if "/opt/code" not in volsdict:
volsdict["/opt/code"] = "/opt/code"
for fs in aysfs:
self._init_aysfs(fs, name)
mounts = fs.getPrefixs()
for inp, out in mounts.items():
while not j.sal.fs.exists(inp):
time.sleep(0.1)
volsdict[out] = inp
volsdictro = {}
if len(volsro) > 0:
items = volsro.split("#")
for item in items:
key, val = item.split(":", 1)
volsdictro[str(key).strip()] = str(val).strip()
self.logger.info("Volumes map:")
for src1, dest1 in list(volsdict.items()):
self.logger.info(" %-20s %s" % (src1, dest1))
binds = {}
volskeys = [] # is location in docker
for key, path in list(volsdict.items()):
# j.sal.fs.createDir(path) # create the path on hostname
binds[path] = {"bind": key, "ro": False}
volskeys.append(key)
for key, path in list(volsdictro.items()):
# j.sal.fs.createDir(path) # create the path on hostname
binds[path] = {"bind": key, "ro": True}
volskeys.append(key)
if base not in self.getImages():
self.logger.info("download docker image %s" % base)
self.pull(base)
if base.startswith("jumpscale/ubuntu1604") or myinit is True:
cmd = "sh -c \"mkdir -p /var/run/screen;chmod 777 /var/run/screen; /var/run/screen;exec >/dev/tty 2>/dev/tty </dev/tty && /sbin/my_init -- /usr/bin/screen -s bash\""
cmd = "sh -c \" /sbin/my_init -- bash -l\""
else:
cmd = None
self.logger.info(("install docker with name '%s'" % name))
if vols != "":
self.logger.info("Volumes")
self.logger.info(volskeys)
self.logger.info(binds)
hostname = None if self.weaveIsActive else name.replace('_', '-')
host_config = self.client.create_host_config(
privileged=privileged) if privileged else None
res = self.client.create_container(image=base, command=cmd, hostname=hostname, user="root",
detach=detach, stdin_open=False, tty=True, mem_limit=mem, ports=list(portsdict.keys()), environment=None, volumes=volskeys,
network_disabled=False, name=name, entrypoint=None, cpu_shares=cpu, working_dir=None, domainname=None, memswap_limit=None, host_config=host_config)
if res["Warnings"] is not None:
raise j.exceptions.RuntimeError(
"Could not create docker, res:'%s'" % res)
id = res["Id"]
if self.weaveIsActive:
nameserver = None
for k, v in portsdict.items():
if type(k) == tuple and len(k) == 2:
portsdict["%s/%s" % (k[0], k[1])] = v
portsdict.pop(k)
res = self.client.start(container=id, binds=binds, port_bindings=portsdict, lxc_conf=None,
publish_all_ports=False, links=None, privileged=privileged, dns=nameserver, dns_search=None,
volumes_from=None, network_mode=None)
container = Container(name, id, self.client, host=self.docker_host)
self._containers[id] = container
if ssh:
container.pushSSHKey(keyname=sshkeyname, sshpubkey=sshpubkey)
# Make sure docker is ready for executor
end_time = time.time() + 60
while time.time() < end_time:
rc, _, _ = container.executor.execute('ls /', die=False, showout=False)
if rc:
time.sleep(0.1)
break
if setrootrndpasswd:
if rootpasswd is None or rootpasswd == '':
self.logger.info("set default root passwd (gig1234)")
container.executor.execute(
"echo \"root:gig1234\"|chpasswd", showout=False)
else:
self.logger.info("set root passwd to %s" % rootpasswd)
container.executor.execute(
"echo \"root:%s\"|chpasswd" % rootpasswd, showout=False)
if not self.weaveIsActive:
container.setHostName(name)
return container
def getImages(self):
images = []
for item in self.client.images():
if item['RepoTags'] is None:
continue
tags = str(item['RepoTags'][0])
tags = tags.replace(":latest", "")
images.append(tags)
return images
def removeImages(self, tag="<none>:<none>"):
"""
Delete a certain Docker image using tag
"""
for item in self.client.images():
if tag in item["RepoTags"]:
self.client.remove_image(item["Id"])
def ping(self):
self.weavesocket
try:
self.client.ping()
except Exception as e:
return False
return True
def destroyAll(self, removeimages=False):
"""
Destroy all containers.
@param removeimages bool: to remove all images.
"""
for container in self.containers:
if "weave" in container.name:
continue
container.destroy()
if removeimages:
self.removeImages()
def _destroyAllKill(self):
if self.ping():
for container in self.containers:
container.destroy()
self.removeImages()
j.do.execute("systemctl stop docker")
if j.sal.fs.exists(path="/var/lib/docker/btrfs/subvolumes"):
j.sal.btrfs.subvolumesDelete('/var/lib/docker/btrfs/subvolumes')
if j.sal.fs.exists(path="/var/lib/docker/volumes"):
for item in j.sal.fs.listDirsInDir("/var/lib/docker/volumes"):
j.sal.fs.removeDirTree(item)
def removeDocker(self):
self._destroyAllKill()
rc, out = j.sal.process.execute("mount")
mountpoints = []
for line in out.split("\n"):
if line.find("type btrfs") != -1:
mountpoint = line.split("on ")[1].split("type")[0].strip()
mountpoints.append(mountpoint)
for mountpoint in mountpoints:
j.sal.btrfs.subvolumesDelete(mountpoint, "/docker/")
j.sal.btrfs.subvolumesDelete("/storage", "docker")
j.sal.process.execute("apt-get remove docker-engine -y")
# j.sal.process.execute("rm -rf /var/lib/docker")
j.sal.fs.removeDirTree("/var/lib/docker")
def reInstallDocker(self):
"""
ReInstall docker on your system
"""
self.removeDocker()
j.tools.cuisine.local.docker.install(force=True)
self.init()
def pull(self, imagename):
"""
pull a certain image.
@param imagename string: image
"""
self.client.import_image_from_image(imagename)
def push(self, image, output=True):
"""
image: str, name of the image
output: print progress as it pushes
"""
client = self.client
previous_timeout = client.timeout
client.timeout = 36000
out = []
for l in client.push(image, stream=True):
line = j.data.serializer.json.loads(l)
id = line['id'] if 'id' in line else ''
s = "%s " % id
if 'status' in line:
s += line['status']
if 'progress' in line:
detail = line['progressDetail']
progress = line['progress']
s += " %50s " % progress
if 'error' in line:
message = line['errorDetail']['message']
raise j.exceptions.RuntimeError(message)
if output:
self.logger.info(s)
out.append(s)
client.timeout = previous_timeout
return "\n".join(out)
def build(self, path, tag, output=True, force=False):
"""
path: path of the directory that contains the docker file
tag: tag to give to the image. e.g: 'jumpscale/myimage'
output: print output as it builds
return: strint containing the stdout
"""
# TODO: implement force
out = []
if force:
nocache = True
for l in self.client.build(path=path, tag=tag, nocache=nocache):
line = j.data.serializer.json.loads(l)
if 'stream' in line:
line = line['stream'].strip()
if output:
self.logger.info(line)
out.append(line)
return "\n".join(out)
class DockerExecObj:
def __init__(self, name):
self.name = name
self.id = "docker:%s" % name
def execute(self, cmds, die=True, checkok=None, async=False, showout=True, timeout=0, env={}):
return self._cuisineDockerHost.core.run("docker exec %s %s" % (self.name, cmds))
| |
import unittest
class SrecException(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class SrecExceptionWrongLength(Exception):
def __init__(self):
self.msg = 'Wrong length of record line'
def __str__(self):
return self.msg
class SrecExceptionWrongType(Exception):
def __init__(self, record_type):
self.msg = 'Wrong record type (%s)' % record_type
def __str__(self):
return self.msg
class SrecExceptionWrongChecksum(Exception):
def __init__(self, checksum):
self.msg = 'Wrong record checksum (expected: 0xff, get: 0x%02x)' % checksum
def __str__(self):
return self.msg
class Srec():
ADDR_SIZE = {
'S0': 2,
'S1': 2,
'S2': 3,
'S3': 4,
'S5': 2,
'S7': 4,
'S8': 3,
'S9': 2,
}
def __init__(self):
self._buffers = []
self._buffer_addr = 0
self._buffer_data = None
self.header = None
def encode_record(self, srec):
srec = srec.strip()
# validate: length of file, start with S, length minimum 6 and even
if len(srec) < 10 or len(srec) % 2:
raise SrecExceptionWrongLength()
record = srec[:2]
if record not in Srec.ADDR_SIZE:
raise SrecExceptionWrongType(record)
data = []
srec = srec[2:]
while srec:
data.append(int(srec[:2], 16))
srec = srec[2:]
# validate checksum + remove checksum byte
checksum = 0x00
for i in data:
checksum += i
checksum &= 0xff
if checksum != 0xff:
raise SrecExceptionWrongChecksum(checksum)
data = data[:-1]
# validate length + remove length byte
if data[0] != len(data):
raise SrecExceptionWrongLength()
data = data[1:]
# read address + remove address bytes
addr_size = Srec.ADDR_SIZE[record]
addr = 0
while addr_size:
addr <<= 8
addr |= data[0]
data = data[1:]
addr_size -= 1
return record, addr, data
def process_record(self, srec):
record, addr, data = self.encode_record(srec)
if record == 'S0':
self.header = data
elif record in ('S1', 'S2', 'S3') and data:
if self._buffer_addr is None:
self._buffer_addr = addr
self._buffer_data = data
elif self._buffer_addr + len(self._buffer_data) == addr:
self._buffer_data += data
elif self._buffer_addr + len(self._buffer_data) != addr:
self._buffers.append((self._buffer_addr, self._buffer_data))
self._buffer_addr = addr
self._buffer_data = data
def encode_lines(self, srec_lines):
self._buffers = []
self._buffer_addr = None
self._buffer_data = None
self.header = None
for srec in srec_lines:
self.process_record(srec)
if self._buffer_addr is not None:
self._buffers.append((self._buffer_addr, self._buffer_data))
# return self._buffers
@property
def buffers(self):
return self._buffers
def encode_file(self, filename):
with open(filename) as srec_file:
self.encode_lines(srec_file)
class TestSrec(unittest.TestCase):
def setUp(self):
self.srec = Srec()
def testEncodeSrecVeryShortLine1(self):
with self.assertRaises(SrecExceptionWrongLength):
self.srec.encode_record('S')
def testEncodeSrecVeryShortLine3(self):
with self.assertRaises(SrecExceptionWrongLength):
self.srec.encode_record('S000')
def testEncodeSrecLinesWrongRecord(self):
with self.assertRaises(SrecExceptionWrongType):
self.srec.encode_record('abcdefghij')
def testEncodeSrecLinesWrongRecordType(self):
with self.assertRaises(SrecExceptionWrongType):
self.srec.encode_record('S600000000')
def testEncodeSrecWrongChecksum(self):
with self.assertRaises(SrecExceptionWrongChecksum):
self.srec.encode_record('S000000000')
def testEncodeSrecWrongChecksum2(self):
with self.assertRaises(SrecExceptionWrongChecksum):
self.srec.encode_record('S012345678901234567890')
def testEncodeSrecShortLine(self):
with self.assertRaises(SrecExceptionWrongLength):
self.srec.encode_record('S0000000ff')
def testEncodeSrecShortLine2(self):
with self.assertRaises(SrecExceptionWrongLength):
self.srec.encode_record('S0020000fd')
def testEncodeSrecLongLine(self):
with self.assertRaises(SrecExceptionWrongLength):
self.srec.encode_record('S0040000fb')
def testEncodeSrecEmptyHeader(self):
ret = self.srec.encode_record('S0030000FC')
self.assertEqual(ret, ('S0', 0x0000, []))
def testEncodeSrecHeader(self):
ret = self.srec.encode_record('S0060000766C6BAC')
self.assertEqual(ret, ('S0', 0x0000, [0x76, 0x6c, 0x6b]))
def testEncodeSrecDataAddr16(self):
ret = self.srec.encode_record('S1060000766C6BAC')
self.assertEqual(ret, ('S1', 0x0000, [0x76, 0x6c, 0x6b]))
def testEncodeSrecDataAddr24(self):
ret = self.srec.encode_record('S207000000766C6BAB')
self.assertEqual(ret, ('S2', 0x000000, [0x76, 0x6c, 0x6b]))
def testEncodeSrecDataAddr32(self):
ret = self.srec.encode_record('S30800000000766C6BAA')
self.assertEqual(ret, ('S3', 0x00000000, [0x76, 0x6c, 0x6b]))
def testEncodeLines1Buffer(self):
ret = self.srec.encode_lines([
'S30900000000112233444c',
'S309000000045566778838',
'S309000000089ABCDEF0CA',
])
self.assertEqual(ret, [(0x00000000, [0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x9a, 0xbc, 0xde, 0xf0])])
def testEncodeLines2Buffer(self):
ret = self.srec.encode_lines([
'S30900000000112233444c',
'S309000000045566778838',
'S309000008009ABCDEF0CA',
])
self.assertEqual(ret, [(0x00000000, [0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88]), (2048, [0x9a, 0xbc, 0xde, 0xf0])])
if __name__ == '__main__':
unittest.main()
| |
#!/usr/bin/env python
import logging
import random
import string
import time
import base64
import sys
import hmac
# from python 2.5
import imp
import traceback
if sys.version_info >= (2, 5):
import hashlib
else: # before python 2.5
import sha
from saml2 import saml
from saml2 import samlp
from saml2 import VERSION
from saml2.time_util import instant
try:
from hashlib import md5
except ImportError:
from md5 import md5
import zlib
logger = logging.getLogger(__name__)
class SamlException(Exception):
pass
class RequestVersionTooLow(SamlException):
pass
class RequestVersionTooHigh(SamlException):
pass
class UnknownPrincipal(SamlException):
pass
class Unsupported(SamlException):
pass
class UnsupportedBinding(Unsupported):
pass
class VersionMismatch(Exception):
pass
class Unknown(Exception):
pass
class OtherError(Exception):
pass
class MissingValue(Exception):
pass
class PolicyError(Exception):
pass
class BadRequest(Exception):
pass
class UnravelError(Exception):
pass
EXCEPTION2STATUS = {
VersionMismatch: samlp.STATUS_VERSION_MISMATCH,
UnknownPrincipal: samlp.STATUS_UNKNOWN_PRINCIPAL,
UnsupportedBinding: samlp.STATUS_UNSUPPORTED_BINDING,
RequestVersionTooLow: samlp.STATUS_REQUEST_VERSION_TOO_LOW,
RequestVersionTooHigh: samlp.STATUS_REQUEST_VERSION_TOO_HIGH,
OtherError: samlp.STATUS_UNKNOWN_PRINCIPAL,
MissingValue: samlp.STATUS_REQUEST_UNSUPPORTED,
# Undefined
Exception: samlp.STATUS_AUTHN_FAILED,
}
GENERIC_DOMAINS = ["aero", "asia", "biz", "cat", "com", "coop", "edu",
"gov", "info", "int", "jobs", "mil", "mobi", "museum",
"name", "net", "org", "pro", "tel", "travel"]
def valid_email(emailaddress, domains=GENERIC_DOMAINS):
"""Checks for a syntactically valid email address."""
# Email address must be at least 6 characters in total.
# Assuming noone may have addresses of the type a@com
if len(emailaddress) < 6:
return False # Address too short.
# Split up email address into parts.
try:
localpart, domainname = emailaddress.rsplit('@', 1)
host, toplevel = domainname.rsplit('.', 1)
except ValueError:
return False # Address does not have enough parts.
# Check for Country code or Generic Domain.
if len(toplevel) != 2 and toplevel not in domains:
return False # Not a domain name.
for i in '-_.%+.':
localpart = localpart.replace(i, "")
for i in '-_.':
host = host.replace(i, "")
if localpart.isalnum() and host.isalnum():
return True # Email address is fine.
else:
return False # Email address has funny characters.
def decode_base64_and_inflate(string):
""" base64 decodes and then inflates according to RFC1951
:param string: a deflated and encoded string
:return: the string after decoding and inflating
"""
return zlib.decompress(base64.b64decode(string), -15)
def deflate_and_base64_encode(string_val):
"""
Deflates and the base64 encodes a string
:param string_val: The string to deflate and encode
:return: The deflated and encoded string
"""
return base64.b64encode(zlib.compress(string_val)[2:-4])
def rndstr(size=16):
"""
Returns a string of random ascii characters or digits
:param size: The length of the string
:return: string
"""
_basech = string.ascii_letters + string.digits
return "".join([random.choice(_basech) for _ in range(size)])
def sid(seed=""):
"""The hash of the server time + seed makes an unique SID for each session.
128-bits long so it fulfills the SAML2 requirements which states
128-160 bits
:param seed: A seed string
:return: The hex version of the digest, prefixed by 'id-' to make it
compliant with the NCName specification
"""
ident = md5()
ident.update(repr(time.time()))
if seed:
ident.update(seed)
return "id-" + ident.hexdigest()
def parse_attribute_map(filenames):
"""
Expects a file with each line being composed of the oid for the attribute
exactly one space, a user friendly name of the attribute and then
the type specification of the name.
:param filenames: List of filenames on mapfiles.
:return: A 2-tuple, one dictionary with the oid as keys and the friendly
names as values, the other one the other way around.
"""
forward = {}
backward = {}
for filename in filenames:
for line in open(filename).readlines():
(name, friendly_name, name_format) = line.strip().split()
forward[(name, name_format)] = friendly_name
backward[friendly_name] = (name, name_format)
return forward, backward
def identity_attribute(form, attribute, forward_map=None):
if form == "friendly":
if attribute.friendly_name:
return attribute.friendly_name
elif forward_map:
try:
return forward_map[(attribute.name, attribute.name_format)]
except KeyError:
return attribute.name
# default is name
return attribute.name
#----------------------------------------------------------------------------
def error_status_factory(info):
if isinstance(info, Exception):
try:
exc_val = EXCEPTION2STATUS[info.__class__]
except KeyError:
exc_val = samlp.STATUS_AUTHN_FAILED
msg = info.args[0]
status = samlp.Status(
status_message=samlp.StatusMessage(text=msg),
status_code=samlp.StatusCode(
value=samlp.STATUS_RESPONDER,
status_code=samlp.StatusCode(
value=exc_val)))
else:
(errcode, text) = info
status = samlp.Status(
status_message=samlp.StatusMessage(text=text),
status_code=samlp.StatusCode(
value=samlp.STATUS_RESPONDER,
status_code=samlp.StatusCode(value=errcode)))
return status
def success_status_factory():
return samlp.Status(status_code=samlp.StatusCode(
value=samlp.STATUS_SUCCESS))
def status_message_factory(message, code, fro=samlp.STATUS_RESPONDER):
return samlp.Status(
status_message=samlp.StatusMessage(text=message),
status_code=samlp.StatusCode(value=fro,
status_code=samlp.StatusCode(value=code)))
def assertion_factory(**kwargs):
assertion = saml.Assertion(version=VERSION, id=sid(),
issue_instant=instant())
for key, val in kwargs.items():
setattr(assertion, key, val)
return assertion
def _attrval(val, typ=""):
if isinstance(val, list) or isinstance(val, set):
attrval = [saml.AttributeValue(text=v) for v in val]
elif val is None:
attrval = None
else:
attrval = [saml.AttributeValue(text=val)]
if typ:
for ava in attrval:
ava.set_type(typ)
return attrval
# --- attribute profiles -----
# xmlns:xs="http://www.w3.org/2001/XMLSchema"
# xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
def do_ava(val, typ=""):
if isinstance(val, basestring):
ava = saml.AttributeValue()
ava.set_text(val)
attrval = [ava]
elif isinstance(val, list):
attrval = [do_ava(v)[0] for v in val]
elif val or val is False:
ava = saml.AttributeValue()
ava.set_text(val)
attrval = [ava]
elif val is None:
attrval = None
else:
raise OtherError("strange value type on: %s" % val)
if typ:
for ava in attrval:
ava.set_type(typ)
return attrval
def do_attribute(val, typ, key):
attr = saml.Attribute()
attrval = do_ava(val, typ)
if attrval:
attr.attribute_value = attrval
if isinstance(key, basestring):
attr.name = key
elif isinstance(key, tuple): # 3-tuple or 2-tuple
try:
(name, nformat, friendly) = key
except ValueError:
(name, nformat) = key
friendly = ""
if name:
attr.name = name
if format:
attr.name_format = nformat
if friendly:
attr.friendly_name = friendly
return attr
def do_attributes(identity):
attrs = []
if not identity:
return attrs
for key, spec in identity.items():
try:
val, typ = spec
except ValueError:
val = spec
typ = ""
except TypeError:
val = ""
typ = ""
attr = do_attribute(val, typ, key)
attrs.append(attr)
return attrs
def do_attribute_statement(identity):
"""
:param identity: A dictionary with fiendly names as keys
:return:
"""
return saml.AttributeStatement(attribute=do_attributes(identity))
def factory(klass, **kwargs):
instance = klass()
for key, val in kwargs.items():
setattr(instance, key, val)
return instance
def signature(secret, parts):
"""Generates a signature.
"""
if sys.version_info >= (2, 5):
csum = hmac.new(secret, digestmod=hashlib.sha1)
else:
csum = hmac.new(secret, digestmod=sha)
for part in parts:
csum.update(part)
return csum.hexdigest()
def verify_signature(secret, parts):
""" Checks that the signature is correct """
if signature(secret, parts[:-1]) == parts[-1]:
return True
else:
return False
FTICKS_FORMAT = "F-TICKS/SWAMID/2.0%s#"
def fticks_log(sp, logf, idp_entity_id, user_id, secret, assertion):
"""
'F-TICKS/' federationIdentifier '/' version *('#' attribute '=' value) '#'
Allowed attributes:
TS the login time stamp
RP the relying party entityID
AP the asserting party entityID (typcially the IdP)
PN a sha256-hash of the local principal name and a unique key
AM the authentication method URN
:param sp: Client instance
:param logf: The log function to use
:param idp_entity_id: IdP entity ID
:param user_id: The user identifier
:param secret: A salt to make the hash more secure
:param assertion: A SAML Assertion instance gotten from the IdP
"""
csum = hmac.new(secret, digestmod=hashlib.sha1)
csum.update(user_id)
info = {
"TS": time.time(),
"RP": sp.entity_id,
"AP": idp_entity_id,
"PN": csum.hexdigest(),
"AM": assertion.AuthnStatement.AuthnContext.AuthnContextClassRef.text
}
logf.info(FTICKS_FORMAT % "#".join(["%s=%s" % (a,v) for a,v in info]))
def dynamic_importer(name, class_name=None):
"""
Dynamically imports modules / classes
"""
try:
fp, pathname, description = imp.find_module(name)
except ImportError:
print "unable to locate module: " + name
return None, None
try:
package = imp.load_module(name, fp, pathname, description)
except Exception, e:
raise
if class_name:
try:
_class = imp.load_module("%s.%s" % (name, class_name), fp,
pathname, description)
except Exception, e:
raise
return package, _class
else:
return package, None
def exception_trace(exc):
message = traceback.format_exception(*sys.exc_info())
try:
_exc = "Exception: %s" % exc
except UnicodeEncodeError:
_exc = "Exception: %s" % exc.message.encode("utf-8", "replace")
return {"message": _exc, "content": "".join(message)}
| |
# -*- coding: utf-8 -*-
##
# This file is standalone vmware vcloud director util
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# For those usages not covered by the Apache License, Version 2.0 please
# contact with: mbayramov@vmware.com
##
"""
Standalone application that leverage openmano vmware connector work with vCloud director rest api.
- Provides capability to create and delete VDC for specific organization.
- Create, delete and manage network for specific VDC
- List deployed VM's , VAPPs, VDSs, Organization
- View detail information about VM / Vapp , Organization etc
- Operate with images upload / boot / power on etc
Usage example.
List organization created in vCloud director
vmwarecli.py -u admin -p qwerty123 -c 172.16.254.206 -U Administrator -P qwerty123 -o test -v TEF list org
List VDC for particular organization
vmwarecli.py -u admin -p qwerty123 -c 172.16.254.206 -U Administrator -P qwerty123 -o test -v TEF list vdc
Upload image
python vmwarerecli.py image upload /Users/spyroot/Developer/Openmano/Ro/vnfs/cirros/cirros.ovf
Boot Image
python vmwarerecli.py -u admin -p qwerty123 -c 172.16.254.206 -o test -v TEF image boot cirros cirros
View vApp
python vmwarerecli.py -u admin -p qwerty123 -c 172.16.254.206 -o test -v TEF view vapp 90bd2b4e-f782-46cf-b5e2-c3817dcf6633 -u
List VMS
python vmwarerecli.py -u admin -p qwerty123 -c 172.16.254.206 -o test -v TEF list vms
List VDC in OSM format
python vmwarerecli.py -u admin -p qwerty123 -c 172.16.254.206 -o test -v TEF list vdc -o
Mustaafa Bayramov
mbayramov@vmware.com
"""
import os
import argparse
import traceback
import uuid
from xml.etree import ElementTree as ET
import sys
from pyvcloud import Http
import logging
import vimconn
import time
import uuid
import urllib3
import requests
from vimconn_vmware import vimconnector
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from prettytable import PrettyTable
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
__author__ = "Mustafa Bayramov"
__date__ = "$16-Sep-2016 11:09:29$"
# TODO move to main vim
def delete_network_action(vca=None, network_uuid=None):
"""
Method leverages vCloud director and query network based on network uuid
Args:
vca - is active VCA connection.
network_uuid - is a network uuid
Returns:
The return XML respond
"""
if vca is None or network_uuid is None:
return None
url_list = [vca.host, '/api/admin/network/', network_uuid]
vm_list_rest_call = ''.join(url_list)
if not (not vca.vcloud_session or not vca.vcloud_session.organization):
response = Http.get(url=vm_list_rest_call,
headers=vca.vcloud_session.get_vcloud_headers(),
verify=vca.verify,
logger=vca.logger)
if response.status_code == requests.codes.ok:
print response.content
return response.content
return None
def print_vapp(vapp_dict=None):
""" Method takes vapp_dict and print in tabular format
Args:
vapp_dict: container vapp object.
Returns:
The return nothing
"""
# following key available to print
# {'status': 'POWERED_OFF', 'storageProfileName': '*', 'hardwareVersion': '7', 'vmToolsVersion': '0',
# 'memoryMB': '384',
# 'href': 'https://172.16.254.206/api/vAppTemplate/vm-129e22e8-08dc-4cb6-8358-25f635e65d3b',
# 'isBusy': 'false', 'isDeployed': 'false', 'isInMaintenanceMode': 'false', 'isVAppTemplate': 'true',
# 'networkName': 'nat', 'isDeleted': 'false', 'catalogName': 'Cirros',
# 'containerName': 'Cirros Template', # 'container':
# 'https://172.16.254.206/api/vAppTemplate/vappTemplate-b966453d-c361-4505-9e38-ccef45815e5d',
# 'name': 'Cirros', 'pvdcHighestSupportedHardwareVersion': '11', 'isPublished': 'false',
# 'numberOfCpus': '1', 'vdc': 'https://172.16.254.206/api/vdc/a5056f85-418c-4bfd-8041-adb0f48be9d9',
# 'guestOs': 'Other (32-bit)', 'isVdcEnabled': 'true'}
if vapp_dict is None:
return
vm_table = PrettyTable(['vm uuid',
'vapp name',
'vapp uuid',
'network name',
'storage name',
'vcpu', 'memory', 'hw ver','deployed','status'])
for k in vapp_dict:
entry = []
entry.append(k)
entry.append(vapp_dict[k]['containerName'])
# vm-b1f5cd4c-2239-4c89-8fdc-a41ff18e0d61
entry.append(vapp_dict[k]['container'].split('/')[-1:][0][5:])
entry.append(vapp_dict[k]['networkName'])
entry.append(vapp_dict[k]['storageProfileName'])
entry.append(vapp_dict[k]['numberOfCpus'])
entry.append(vapp_dict[k]['memoryMB'])
entry.append(vapp_dict[k]['pvdcHighestSupportedHardwareVersion'])
entry.append(vapp_dict[k]['isDeployed'])
entry.append(vapp_dict[k]['status'])
vm_table.add_row(entry)
print vm_table
def print_org(org_dict=None):
""" Method takes vapp_dict and print in tabular format
Args:
org_dict: dictionary of organization where key is org uuid.
Returns:
The return nothing
"""
if org_dict is None:
return
org_table = PrettyTable(['org uuid', 'name'])
for k in org_dict:
entry = [k, org_dict[k]]
org_table.add_row(entry)
print org_table
def print_vm_list(vm_dict=None):
""" Method takes vapp_dict and print in tabular format
Args:
vm_dict: dictionary of organization where key is org uuid.
Returns:
The return nothing
"""
if vm_dict is None:
return
vm_table = PrettyTable(
['vm uuid', 'vm name', 'vapp uuid', 'vdc uuid', 'network name', 'is deployed', 'vcpu', 'memory', 'status'])
try:
for k in vm_dict:
entry = []
entry.append(k)
entry.append(vm_dict[k]['name'])
entry.append(vm_dict[k]['container'].split('/')[-1:][0][5:])
entry.append(vm_dict[k]['vdc'].split('/')[-1:][0])
entry.append(vm_dict[k]['networkName'])
entry.append(vm_dict[k]['isDeployed'])
entry.append(vm_dict[k]['numberOfCpus'])
entry.append(vm_dict[k]['memoryMB'])
entry.append(vm_dict[k]['status'])
vm_table.add_row(entry)
print vm_table
except KeyError:
logger.error("wrong key {}".format(KeyError.message))
pass
def print_vdc_list(org_dict=None):
""" Method takes vapp_dict and print in tabular format
Args:
org_dict: dictionary of organization where key is org uuid.
Returns:
The return nothing
"""
if org_dict is None:
return
try:
vdcs_dict = {}
if org_dict.has_key('vdcs'):
vdcs_dict = org_dict['vdcs']
vdc_table = PrettyTable(['vdc uuid', 'vdc name'])
for k in vdcs_dict:
entry = [k, vdcs_dict[k]]
vdc_table.add_row(entry)
print vdc_table
except KeyError:
logger.error("wrong key {}".format(KeyError.message))
logger.logger.debug(traceback.format_exc())
def print_network_list(org_dict=None):
""" Method print network list.
Args:
org_dict: dictionary of organization that contain key networks with a list of all
network for for specific VDC
Returns:
The return nothing
"""
if org_dict is None:
return
try:
network_dict = {}
if org_dict.has_key('networks'):
network_dict = org_dict['networks']
network_table = PrettyTable(['network uuid', 'network name'])
for k in network_dict:
entry = [k, network_dict[k]]
network_table.add_row(entry)
print network_table
except KeyError:
logger.error("wrong key {}".format(KeyError.message))
logger.logger.debug(traceback.format_exc())
def print_org_details(org_dict=None):
""" Method takes vapp_dict and print in tabular format
Args:
org_dict: dictionary of organization where key is org uuid.
Returns:
The return nothing
"""
if org_dict is None:
return
try:
catalogs_dict = {}
print_vdc_list(org_dict=org_dict)
print_network_list(org_dict=org_dict)
if org_dict.has_key('catalogs'):
catalogs_dict = org_dict['catalogs']
catalog_table = PrettyTable(['catalog uuid', 'catalog name'])
for k in catalogs_dict:
entry = [k, catalogs_dict[k]]
catalog_table.add_row(entry)
print catalog_table
except KeyError:
logger.error("wrong key {}".format(KeyError.message))
logger.logger.debug(traceback.format_exc())
def delete_actions(vim=None, action=None, namespace=None):
if action == 'network' or namespace.action == 'network':
logger.debug("Requesting delete for network {}".format(namespace.network_name))
network_uuid = namespace.network_name
# if request name based we need find UUID
# TODO optimize it or move to external function
if not namespace.uuid:
org_dict = vim.get_org_list()
for org in org_dict:
org_net = vim.get_org(org)['networks']
for network in org_net:
if org_net[network] == namespace.network_name:
network_uuid = network
vim.delete_network_action(network_uuid=network_uuid)
def list_actions(vim=None, action=None, namespace=None):
""" Method provide list object from VDC action
Args:
vim - is vcloud director vim connector.
action - is action for list ( vdc / org etc)
namespace - must contain VDC / Org information.
Returns:
The return nothing
"""
org_id = None
myorgs = vim.get_org_list()
for org in myorgs:
if myorgs[org] == namespace.vcdorg:
org_id = org
break
else:
print(" Invalid organization.")
return
if action == 'vms' or namespace.action == 'vms':
vm_dict = vim.get_vm_list(vdc_name=namespace.vcdvdc)
print_vm_list(vm_dict=vm_dict)
elif action == 'vapps' or namespace.action == 'vapps':
vapp_dict = vim.get_vapp_list(vdc_name=namespace.vcdvdc)
print_vapp(vapp_dict=vapp_dict)
elif action == 'networks' or namespace.action == 'networks':
if namespace.osm:
osm_print(vim.get_network_list(filter_dict={}))
else:
print_network_list(vim.get_org(org_uuid=org_id))
elif action == 'vdc' or namespace.action == 'vdc':
if namespace.osm:
osm_print(vim.get_tenant_list(filter_dict=None))
else:
print_vdc_list(vim.get_org(org_uuid=org_id))
elif action == 'org' or namespace.action == 'org':
print_org(org_dict=vim.get_org_list())
else:
return None
def print_network_details(network_dict=None):
try:
network_table = PrettyTable(network_dict.keys())
entry = [network_dict.values()]
network_table.add_row(entry[0])
print network_table
except KeyError:
logger.error("wrong key {}".format(KeyError.message))
logger.logger.debug(traceback.format_exc())
def osm_print(generic_dict=None):
try:
for element in generic_dict:
table = PrettyTable(element.keys())
entry = [element.values()]
table.add_row(entry[0])
print table
except KeyError:
logger.error("wrong key {}".format(KeyError.message))
logger.logger.debug(traceback.format_exc())
def view_actions(vim=None, action=None, namespace=None):
org_id = None
orgs = vim.get_org_list()
for org in orgs:
if orgs[org] == namespace.vcdorg:
org_id = org
break
else:
print(" Invalid organization.")
return
myorg = vim.get_org(org_uuid=org_id)
# view org
if action == 'org' or namespace.action == 'org':
org_id = None
orgs = vim.get_org_list()
if namespace.uuid:
if namespace.org_name in orgs:
org_id = namespace.org_name
else:
# we need find UUID based on name provided
for org in orgs:
if orgs[org] == namespace.org_name:
org_id = org
break
logger.debug("Requesting view for orgs {}".format(org_id))
print_org_details(vim.get_org(org_uuid=org_id))
# view vapp action
if action == 'vapp' or namespace.action == 'vapp':
if namespace.vapp_name is not None and namespace.uuid:
logger.debug("Requesting vapp {} for vdc {}".format(namespace.vapp_name, namespace.vcdvdc))
vapp_dict = {}
vapp_uuid = namespace.vapp_name
# if request based on just name we need get UUID
if not namespace.uuid:
vapp_uuid = vim.get_vappid(vdc=namespace.vcdvdc, vapp_name=namespace.vapp_name)
if vapp_uuid is None:
print("Can't find vapp by given name {}".format(namespace.vapp_name))
return
print " namespace {}".format(namespace)
if vapp_dict is not None and namespace.osm:
vm_info_dict = vim.get_vminstance(vim_vm_uuid=vapp_uuid)
print vm_info_dict
if vapp_dict is not None and namespace.osm != True:
vapp_dict = vim.get_vapp(vdc_name=namespace.vcdvdc, vapp_name=vapp_uuid, isuuid=True)
print_vapp(vapp_dict=vapp_dict)
# view network
if action == 'network' or namespace.action == 'network':
logger.debug("Requesting view for network {}".format(namespace.network_name))
network_uuid = namespace.network_name
# if request name based we need find UUID
# TODO optimize it or move to external function
if not namespace.uuid:
if not myorg.has_key('networks'):
print("Network {} is undefined in vcloud director for org {} vdc {}".format(namespace.network_name,
vim.name,
vim.tenant_name))
return
my_org_net = myorg['networks']
for network in my_org_net:
if my_org_net[network] == namespace.network_name:
network_uuid = network
break
print print_network_details(network_dict=vim.get_vcd_network(network_uuid=network_uuid))
def create_actions(vim=None, action=None, namespace=None):
"""Method gets provider vdc view from vcloud director
Args:
vim - is Cloud director vim connector
action - action for create ( network / vdc etc)
Returns:
The return xml content of respond or None
"""
if action == 'network' or namespace.action == 'network':
logger.debug("Creating a network in vcloud director".format(namespace.network_name))
network_uuid = vim.create_network(namespace.network_name)
if network_uuid is not None:
print ("Crated new network {} and uuid: {}".format(namespace.network_name, network_uuid))
else:
print ("Failed create a new network {}".format(namespace.network_name))
elif action == 'vdc' or namespace.action == 'vdc':
logger.debug("Creating a new vdc in vcloud director.".format(namespace.vdc_name))
vdc_uuid = vim.create_vdc(namespace.vdc_name)
if vdc_uuid is not None:
print ("Crated new vdc {} and uuid: {}".format(namespace.vdc_name, vdc_uuid))
else:
print ("Failed create a new vdc {}".format(namespace.vdc_name))
else:
return None
def validate_uuid4(uuid_string):
"""Function validate that string contain valid uuid4
Args:
uuid_string - valid UUID string
Returns:
The return true if string contain valid UUID format
"""
try:
val = uuid.UUID(uuid_string, version=4)
except ValueError:
return False
return True
def upload_image(vim=None, image_file=None):
"""Function upload image to vcloud director
Args:
image_file - valid UUID string
Returns:
The return true if image uploaded correctly
"""
try:
catalog_uuid = vim.get_image_id_from_path(path=image_file, progress=True)
if catalog_uuid is not None and validate_uuid4(catalog_uuid):
print("Image uploaded and uuid {}".format(catalog_uuid))
return True
except vimconn.vimconnException as upload_exception:
print("Failed uploaded {} image".format(image_file))
print("Error Reason: {}".format(upload_exception.message))
return False
def boot_image(vim=None, image_name=None, vm_name=None):
""" Function boot image that resided in vcloud director.
The image name can be UUID of name.
Args:
vim - vim connector
image_name - image identified by UUID or text string.
vm_name - vmname
Returns:
The return true if image uploaded correctly
"""
vim_catalog = None
try:
catalogs = vim.vca.get_catalogs()
if not validate_uuid4(image_name):
vim_catalog = vim.get_catalogid(catalog_name=image_name, catalogs=catalogs)
if vim_catalog is None:
return None
else:
vim_catalog = vim.get_catalogid(catalog_name=image_name, catalogs=catalogs)
if vim_catalog is None:
return None
print (" Booting {} image id {} ".format(vm_name, vim_catalog))
vm_uuid, _ = vim.new_vminstance(name=vm_name, image_id=vim_catalog)
if vm_uuid is not None and validate_uuid4(vm_uuid):
print("Image booted and vm uuid {}".format(vm_uuid))
vapp_dict = vim.get_vapp(vdc_name=namespace.vcdvdc, vapp_name=vm_uuid, isuuid=True)
if vapp_dict is not None:
print_vapp(vapp_dict=vapp_dict)
return True
except vimconn.vimconnNotFoundException as notFound:
print("Failed boot {} image".format(image_name))
print(notFound.message)
except vimconn.vimconnException as vimconError:
print("Failed boot {} image".format(image_name))
print(vimconError.message)
except:
print("Failed boot {} image".format(image_name))
return False
def image_action(vim=None, action=None, namespace=None):
""" Function present set of action to manipulate with image.
- upload image
- boot image.
- delete image ( not yet done )
Args:
vim - vcloud director connector
action - string (upload/boot etc)
namespace - contain other attributes image name etc
Returns:
The return nothing
"""
if action == 'upload' or namespace.action == 'upload':
upload_image(vim=vim, image_file=namespace.image)
elif action == 'boot' or namespace.action == 'boot':
boot_image(vim=vim, image_name=namespace.image, vm_name=namespace.vmname)
else:
return None
def vmwarecli(command=None, action=None, namespace=None):
logger.debug("Namespace {}".format(namespace))
urllib3.disable_warnings()
vcduser = None
vcdpasword = None
vcdhost = None
vcdorg = None
if hasattr(__builtins__, 'raw_input'):
input = raw_input
if namespace.vcdvdc is None:
while True:
vcduser = input("Enter vcd username: ")
if vcduser is not None and len(vcduser) > 0:
break
else:
vcduser = namespace.vcduser
if namespace.vcdpassword is None:
while True:
vcdpasword = input("Please enter vcd password: ")
if vcdpasword is not None and len(vcdpasword) > 0:
break
else:
vcdpasword = namespace.vcdpassword
if namespace.vcdhost is None:
while True:
vcdhost = input("Please enter vcd host name or ip: ")
if vcdhost is not None and len(vcdhost) > 0:
break
else:
vcdhost = namespace.vcdhost
if namespace.vcdorg is None:
while True:
vcdorg = input("Please enter vcd organization name: ")
if vcdorg is not None and len(vcdorg) > 0:
break
else:
vcdorg = namespace.vcdorg
try:
vim = vimconnector(uuid=None,
name=vcdorg,
tenant_id=None,
tenant_name=namespace.vcdvdc,
url=vcdhost,
url_admin=vcdhost,
user=vcduser,
passwd=vcdpasword,
log_level="DEBUG",
config={'admin_username': namespace.vcdamdin, 'admin_password': namespace.vcdadminpassword})
vim.vca = vim.connect()
except vimconn.vimconnConnectionException:
print("Failed connect to vcloud director. Please check credential and hostname.")
return
# list
if command == 'list' or namespace.command == 'list':
logger.debug("Client requested list action")
# route request to list actions
list_actions(vim=vim, action=action, namespace=namespace)
# view action
if command == 'view' or namespace.command == 'view':
logger.debug("Client requested view action")
view_actions(vim=vim, action=action, namespace=namespace)
# delete action
if command == 'delete' or namespace.command == 'delete':
logger.debug("Client requested delete action")
delete_actions(vim=vim, action=action, namespace=namespace)
# create action
if command == 'create' or namespace.command == 'create':
logger.debug("Client requested create action")
create_actions(vim=vim, action=action, namespace=namespace)
# image action
if command == 'image' or namespace.command == 'image':
logger.debug("Client requested create action")
image_action(vim=vim, action=action, namespace=namespace)
if __name__ == '__main__':
defaults = {'vcdvdc': 'default',
'vcduser': 'admin',
'vcdpassword': 'admin',
'vcdhost': 'https://localhost',
'vcdorg': 'default',
'debug': 'INFO'}
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--vcduser', help='vcloud director username', type=str)
parser.add_argument('-p', '--vcdpassword', help='vcloud director password', type=str)
parser.add_argument('-U', '--vcdamdin', help='vcloud director password', type=str)
parser.add_argument('-P', '--vcdadminpassword', help='vcloud director password', type=str)
parser.add_argument('-c', '--vcdhost', help='vcloud director host', type=str)
parser.add_argument('-o', '--vcdorg', help='vcloud director org', type=str)
parser.add_argument('-v', '--vcdvdc', help='vcloud director vdc', type=str)
parser.add_argument('-d', '--debug', help='debug level', type=int)
parser_subparsers = parser.add_subparsers(help='commands', dest='command')
sub = parser_subparsers.add_parser('list', help='List objects (VMs, vApps, networks)')
sub_subparsers = sub.add_subparsers(dest='action')
list_vms = sub_subparsers.add_parser('vms', help='list - all vm deployed in vCloud director')
list_vapps = sub_subparsers.add_parser('vapps', help='list - all vapps deployed in vCloud director')
list_network = sub_subparsers.add_parser('networks', help='list - all networks deployed')
list_network.add_argument('-o', '--osm', default=False, action='store_true', help='provide view in OSM format')
#list vdc
list_vdc = sub_subparsers.add_parser('vdc', help='list - list all vdc for organization accessible to you')
list_vdc.add_argument('-o', '--osm', default=False, action='store_true', help='provide view in OSM format')
list_org = sub_subparsers.add_parser('org', help='list - list of organizations accessible to you.')
create_sub = parser_subparsers.add_parser('create')
create_sub_subparsers = create_sub.add_subparsers(dest='action')
create_vms = create_sub_subparsers.add_parser('vms')
create_vapp = create_sub_subparsers.add_parser('vapp')
create_vapp.add_argument('uuid')
# add network
create_network = create_sub_subparsers.add_parser('network')
create_network.add_argument('network_name', action='store', help='create a network for a vdc')
# add VDC
create_vdc = create_sub_subparsers.add_parser('vdc')
create_vdc.add_argument('vdc_name', action='store', help='create a new VDC for org')
delete_sub = parser_subparsers.add_parser('delete')
del_sub_subparsers = delete_sub.add_subparsers(dest='action')
del_vms = del_sub_subparsers.add_parser('vms')
del_vapp = del_sub_subparsers.add_parser('vapp')
del_vapp.add_argument('uuid', help='view vapp based on UUID')
# delete network
del_network = del_sub_subparsers.add_parser('network')
del_network.add_argument('network_name', action='store',
help='- delete network for vcloud director by provided name')
del_network.add_argument('-u', '--uuid', default=False, action='store_true',
help='delete network for vcloud director by provided uuid')
# delete vdc
del_vdc = del_sub_subparsers.add_parser('vdc')
view_sub = parser_subparsers.add_parser('view')
view_sub_subparsers = view_sub.add_subparsers(dest='action')
view_vms_parser = view_sub_subparsers.add_parser('vms')
view_vms_parser.add_argument('uuid', default=False, action='store_true',
help='- View VM for specific uuid in vcloud director')
view_vms_parser.add_argument('name', default=False, action='store_true',
help='- View VM for specific vapp name in vcloud director')
# view vapp
view_vapp_parser = view_sub_subparsers.add_parser('vapp')
view_vapp_parser.add_argument('vapp_name', action='store',
help='- view vapp for specific vapp name in vcloud director')
view_vapp_parser.add_argument('-u', '--uuid', default=False, action='store_true', help='view vapp based on uuid')
view_vapp_parser.add_argument('-o', '--osm', default=False, action='store_true', help='provide view in OSM format')
# view network
view_network = view_sub_subparsers.add_parser('network')
view_network.add_argument('network_name', action='store',
help='- view network for specific network name in vcloud director')
view_network.add_argument('-u', '--uuid', default=False, action='store_true', help='view network based on uuid')
# view VDC command and actions
view_vdc = view_sub_subparsers.add_parser('vdc')
view_vdc.add_argument('vdc_name', action='store',
help='- View VDC based and action based on provided vdc uuid')
view_vdc.add_argument('-u', '--uuid', default=False, action='store_true', help='view vdc based on uuid')
# view organization command and actions
view_org = view_sub_subparsers.add_parser('org')
view_org.add_argument('org_name', action='store',
help='- View VDC based and action based on provided vdc uuid')
view_org.add_argument('-u', '--uuid', default=False, action='store_true', help='view org based on uuid')
# upload image action
image_sub = parser_subparsers.add_parser('image')
image_subparsers = image_sub.add_subparsers(dest='action')
upload_parser = image_subparsers.add_parser('upload')
upload_parser.add_argument('image', default=False, action='store', help='- valid path to OVF image ')
upload_parser.add_argument('catalog', default=False, action='store_true', help='- catalog name')
# boot vm action
boot_parser = image_subparsers.add_parser('boot')
boot_parser.add_argument('image', default=False, action='store', help='- Image name')
boot_parser.add_argument('vmname', default=False, action='store', help='- VM name')
boot_parser.add_argument('-u', '--uuid', default=False, action='store_true', help='view org based on uuid')
namespace = parser.parse_args()
# put command_line args to mapping
command_line_args = {k: v for k, v in vars(namespace).items() if v}
d = defaults.copy()
d.update(os.environ)
d.update(command_line_args)
logger = logging.getLogger('mano.vim.vmware')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch = logging.StreamHandler()
ch.setLevel(str.upper(d['debug']))
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.setLevel(getattr(logging, str.upper(d['debug'])))
logger.info(
"Connecting {} username: {} org: {} vdc: {} ".format(d['vcdhost'], d['vcduser'], d['vcdorg'], d['vcdvdc']))
logger.debug("command: \"{}\" actio: \"{}\"".format(d['command'], d['action']))
# main entry point.
vmwarecli(namespace=namespace)
| |
# Copyright 2016 Hewlett Packard Enterprise Development, LP
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib import constants
from neutron_lib.db import api as db_api
from neutron_lib.db import resource_extend
from neutron_lib.db import utils as db_utils
from neutron_lib import exceptions as n_exc
from neutron_lib.plugins import directory
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import helpers as log_helpers
from oslo_utils import uuidutils
from neutron.common import utils
from neutron.db import segments_db as db
from neutron.extensions import segment as extension
from neutron import manager
from neutron.objects import base as base_obj
from neutron.objects import network
from neutron.services.segments import exceptions
_USER_CONFIGURED_SEGMENT_PLUGIN = None
FOR_NET_DELETE = 'for_net_delete'
def check_user_configured_segment_plugin():
global _USER_CONFIGURED_SEGMENT_PLUGIN
# _USER_CONFIGURED_SEGMENT_PLUGIN will contain 3 possible values:
# 1. None, this just happens during neutron-server startup.
# 2. True, this means that users configure the 'segments'
# service plugin in neutron config file.
# 3. False, this means that can not find 'segments' service
# plugin in neutron config file.
# This function just load once to store the result
# into _USER_CONFIGURED_SEGMENT_PLUGIN during neutron-server startup.
if _USER_CONFIGURED_SEGMENT_PLUGIN is None:
segment_class = 'neutron.services.segments.plugin.Plugin'
_USER_CONFIGURED_SEGMENT_PLUGIN = any(
p in cfg.CONF.service_plugins for p in ['segments', segment_class])
return _USER_CONFIGURED_SEGMENT_PLUGIN
class SegmentDbMixin(object):
"""Mixin class to add segment."""
@staticmethod
def _make_segment_dict(segment_obj, fields=None):
res = {'id': segment_obj['id'],
'network_id': segment_obj['network_id'],
'name': segment_obj['name'],
'description': segment_obj['description'],
db.PHYSICAL_NETWORK: segment_obj[db.PHYSICAL_NETWORK],
db.NETWORK_TYPE: segment_obj[db.NETWORK_TYPE],
db.SEGMENTATION_ID: segment_obj[db.SEGMENTATION_ID],
'hosts': segment_obj['hosts'],
'segment_index': segment_obj['segment_index']}
resource_extend.apply_funcs('segments', res, segment_obj.db_obj)
return db_utils.resource_fields(res, fields)
def _get_segment(self, context, segment_id):
segment = network.NetworkSegment.get_object(context, id=segment_id)
if not segment:
raise exceptions.SegmentNotFound(segment_id=segment_id)
return segment
@log_helpers.log_method_call
def create_segment(self, context, segment):
"""Create a segment."""
segment = segment['segment']
segment_id = segment.get('id') or uuidutils.generate_uuid()
try:
new_segment = self._create_segment_db(context, segment_id, segment)
except db_exc.DBReferenceError:
raise n_exc.NetworkNotFound(net_id=segment['network_id'])
registry.publish(resources.SEGMENT, events.AFTER_CREATE, self,
payload=events.DBEventPayload(
context, resource_id=segment_id,
states=(new_segment,)))
return self._make_segment_dict(new_segment)
def _create_segment_db(self, context, segment_id, segment):
with db_api.CONTEXT_WRITER.using(context):
network_id = segment['network_id']
physical_network = segment[extension.PHYSICAL_NETWORK]
if physical_network == constants.ATTR_NOT_SPECIFIED:
physical_network = None
network_type = segment[extension.NETWORK_TYPE]
segmentation_id = segment[extension.SEGMENTATION_ID]
if segmentation_id == constants.ATTR_NOT_SPECIFIED:
segmentation_id = None
name = segment['name']
if name == constants.ATTR_NOT_SPECIFIED:
name = None
description = segment['description']
if description == constants.ATTR_NOT_SPECIFIED:
description = None
args = {'id': segment_id,
'network_id': network_id,
'name': name,
'description': description,
db.PHYSICAL_NETWORK: physical_network,
db.NETWORK_TYPE: network_type,
db.SEGMENTATION_ID: segmentation_id}
# Calculate the index of segment
segment_index = 0
segments = self.get_segments(
context,
filters={'network_id': [network_id]},
fields=['segment_index'],
sorts=[('segment_index', True)])
if segments:
# NOTE(xiaohhui): The new index is the last index + 1, this
# may cause discontinuous segment_index. But segment_index
# can functionally work as the order index for segments.
segment_index = (segments[-1].get('segment_index') + 1)
args['segment_index'] = segment_index
new_segment = network.NetworkSegment(context, **args)
new_segment.create()
# Do some preliminary operations before committing the segment to
# db
registry.publish(
resources.SEGMENT, events.PRECOMMIT_CREATE, self,
payload=events.DBEventPayload(context, resource_id=segment_id,
states=(new_segment,)))
# The new segment might have been updated by the callbacks
# subscribed to the PRECOMMIT_CREATE event. So update it in the DB
new_segment.update()
return new_segment
@log_helpers.log_method_call
def update_segment(self, context, uuid, segment):
"""Update an existing segment."""
segment = segment['segment']
with db_api.CONTEXT_WRITER.using(context):
curr_segment = self._get_segment(context, uuid)
curr_segment.update_fields(segment)
curr_segment.update()
return self._make_segment_dict(curr_segment)
@log_helpers.log_method_call
def get_segment(self, context, uuid, fields=None):
segment_db = self._get_segment(context, uuid)
return self._make_segment_dict(segment_db, fields)
@log_helpers.log_method_call
def get_segments(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
filters = filters or {}
pager = base_obj.Pager(sorts, limit, page_reverse, marker)
segment_objs = network.NetworkSegment.get_objects(
context, _pager=pager, **filters)
return [self._make_segment_dict(obj) for obj in segment_objs]
@log_helpers.log_method_call
def get_segments_count(self, context, filters=None):
filters = filters or {}
return network.NetworkSegment.count(context, **filters)
@log_helpers.log_method_call
def get_segments_by_hosts(self, context, hosts):
if not hosts:
return []
segment_host_mapping = network.SegmentHostMapping.get_objects(
context, host=hosts)
return list({mapping.segment_id for mapping in segment_host_mapping})
@log_helpers.log_method_call
def delete_segment(self, context, uuid, for_net_delete=False):
"""Delete an existing segment."""
segment_dict = self.get_segment(context, uuid)
# Do some preliminary operations before deleting the segment
registry.publish(resources.SEGMENT, events.BEFORE_DELETE,
self.delete_segment,
payload=events.DBEventPayload(
context, metadata={
FOR_NET_DELETE: for_net_delete},
states=(segment_dict,),
resource_id=uuid))
# Delete segment in DB
with db_api.CONTEXT_WRITER.using(context):
if not network.NetworkSegment.delete_objects(context, id=uuid):
raise exceptions.SegmentNotFound(segment_id=uuid)
# Do some preliminary operations before deleting segment in db
registry.publish(resources.SEGMENT, events.PRECOMMIT_DELETE,
self.delete_segment,
payload=events.DBEventPayload(
context, metadata={
FOR_NET_DELETE: for_net_delete},
resource_id=uuid,
states=(segment_dict,)))
registry.publish(resources.SEGMENT, events.AFTER_DELETE,
self.delete_segment,
payload=events.DBEventPayload(
context, metadata={
FOR_NET_DELETE: for_net_delete},
states=(segment_dict,),
resource_id=uuid))
@db_api.retry_if_session_inactive()
@lockutils.synchronized('update_segment_host_mapping')
def update_segment_host_mapping(context, host, current_segment_ids):
with db_api.CONTEXT_WRITER.using(context):
segment_host_mapping = network.SegmentHostMapping.get_objects(
context, host=host)
previous_segment_ids = {
seg_host['segment_id'] for seg_host in segment_host_mapping}
for segment_id in current_segment_ids - previous_segment_ids:
network.SegmentHostMapping(
context, segment_id=segment_id, host=host).create()
stale_segment_ids = previous_segment_ids - current_segment_ids
if stale_segment_ids:
for entry in segment_host_mapping:
if entry.segment_id in stale_segment_ids:
entry.delete()
def get_hosts_mapped_with_segments(context):
"""Get hosts that are mapped with segments.
L2 providers can use this method to get an overview of SegmentHostMapping,
and then delete the stale SegmentHostMapping.
"""
segment_host_mapping = network.SegmentHostMapping.get_objects(context)
return {row.host for row in segment_host_mapping}
def _get_phys_nets(agent):
configurations_dict = agent.get('configurations', {})
mappings = configurations_dict.get('bridge_mappings', {})
mappings.update(configurations_dict.get('interface_mappings', {}))
mappings.update(configurations_dict.get('device_mappings', {}))
return list(mappings.keys())
reported_hosts = set()
# NOTE: Module level variable of segments plugin. It should be removed once
# segments becomes a default plugin.
segments_plugin = None
def get_segments_with_phys_nets(context, phys_nets):
"""Get segments from physical networks.
L2 providers usually have information of hostname and physical networks.
They could use this method to get related segments and then update
SegmentHostMapping.
"""
phys_nets = list(phys_nets)
if not phys_nets:
return []
with db_api.CONTEXT_READER.using(context):
return network.NetworkSegment.get_objects(
context, physical_network=phys_nets)
def map_segment_to_hosts(context, segment_id, hosts):
"""Map segment to a collection of hosts."""
with db_api.CONTEXT_WRITER.using(context):
for host in hosts:
network.SegmentHostMapping(
context, segment_id=segment_id, host=host).create()
def _update_segment_host_mapping_for_agent(resource, event, trigger,
payload=None):
plugin = payload.metadata.get('plugin')
agent = payload.desired_state
host = payload.metadata.get('host')
context = payload.context
check_segment_for_agent = getattr(plugin, 'check_segment_for_agent', None)
if (not check_user_configured_segment_plugin() or
not check_segment_for_agent):
return
phys_nets = _get_phys_nets(agent)
if not phys_nets:
return
start_flag = agent.get('start_flag', None)
if host in reported_hosts and not start_flag:
return
reported_hosts.add(host)
if (len(payload.states) > 1 and
payload.states[1] is not None and
agent.get('configurations') == payload.states[1].get(
'configurations')):
return
segments = get_segments_with_phys_nets(context, phys_nets)
current_segment_ids = {
segment['id'] for segment in segments
if check_segment_for_agent(segment, agent)}
update_segment_host_mapping(context, host, current_segment_ids)
registry.publish(resources.SEGMENT_HOST_MAPPING, events.AFTER_CREATE,
plugin, payload=events.DBEventPayload(
context,
metadata={
'host': host,
'current_segment_ids': current_segment_ids}))
def _add_segment_host_mapping_for_segment(resource, event, trigger,
payload=None):
context = payload.context
segment = payload.latest_state
if not utils.is_session_active(context.session):
# The session might be in partial rollback state, due to errors in
# peer callback. In that case, there is no need to add the mapping.
# Just return here.
return
if not segment.physical_network:
return
cp = directory.get_plugin()
check_segment_for_agent = getattr(cp, 'check_segment_for_agent', None)
if not check_user_configured_segment_plugin() or not hasattr(
cp, 'get_agents') or not check_segment_for_agent:
# not an agent-supporting plugin
registry.unsubscribe(_add_segment_host_mapping_for_segment,
resources.SEGMENT, events.PRECOMMIT_CREATE)
return
hosts = {agent['host'] for agent in cp.get_agents(context)
if check_segment_for_agent(segment, agent)}
map_segment_to_hosts(context, segment.id, hosts)
def _delete_segments_for_network(resource, event, trigger,
payload=None, **kwargs):
network_id = payload.resource_id
admin_ctx = payload.context.elevated()
global segments_plugin
if not segments_plugin:
segments_plugin = manager.NeutronManager.load_class_for_provider(
'neutron.service_plugins', 'segments')()
segments = segments_plugin.get_segments(
admin_ctx, filters={'network_id': [network_id]})
for segment in segments:
segments_plugin.delete_segment(admin_ctx, segment['id'],
for_net_delete=True)
def subscribe():
registry.subscribe(_update_segment_host_mapping_for_agent,
resources.AGENT,
events.AFTER_CREATE)
registry.subscribe(_update_segment_host_mapping_for_agent,
resources.AGENT,
events.AFTER_UPDATE)
registry.subscribe(_add_segment_host_mapping_for_segment,
resources.SEGMENT, events.PRECOMMIT_CREATE)
registry.subscribe(_delete_segments_for_network,
resources.NETWORK,
events.PRECOMMIT_DELETE)
subscribe()
| |
import copy
import json
import logging
import os
import colorcet as cc
import pandas as pd
import pyproj
import pytoml
import tornado
import tornado.escape
import yaml
from bokeh.layouts import row, widgetbox, layout
from bokeh.models import Select, CustomJS, Jitter, DataTable, TableColumn, Slider, Button
# noinspection PyUnresolvedReferences
from bokeh.palettes import linear_palette
from bokeh.plotting import figure, ColumnDataSource
from bokeh.themes import Theme
from bokeh.tile_providers import STAMEN_TERRAIN
log = logging.getLogger(__name__)
def modify_doc(doc):
SIZES = list(range(6, 22, 3))
# define available palettes
palettes = {k: v for k, v in cc.palette.items() if
("_" not in k and
k not in ["bkr", "coolwarm", "bjy", "bky", "gwv"])}
#################
# data handling #
#################
def get_data(path, force_discrete_colorable):
"""Read data from csv and transform map coordinates."""
data = pd.read_csv(path)
# data from columns in force_discrete_colorable will be treated as discrete even if numeric
for col in data.columns:
if col in force_discrete_colorable:
data[col] = data[col].apply(str)
data = data.applymap(lambda x: "NaN" if pd.isnull(x) else x)
# transform coords to map projection
wgs84 = pyproj.Proj(init="epsg:4326")
web_mer = pyproj.Proj(init="epsg:3857")
data["easting"] = "NaN"
data["northing"] = "NaN"
data["easting"] = data["easting"].astype("float64")
data["northing"] = data["northing"].astype("float64")
data.loc[pd.notnull(data["lon"]), "easting"], data.loc[pd.notnull(data["lat"]), "northing"] = zip(
*data.loc[pd.notnull(data["lon"]) & pd.notnull(data["lat"])].apply(
lambda x: pyproj.transform(wgs84, web_mer, x["lon"], x["lat"]), axis=1))
# show unknown locations on map in antarctic
default_wgs84 = config.get('default_coords') or {'lon': 0, 'lat': -80}
default_web_mer = dict(zip(("lon", "lat"),
pyproj.transform(wgs84, web_mer, default_wgs84["lon"], default_wgs84["lat"])))
data.easting = data.easting.apply(lambda x: default_web_mer["lon"] if pd.isnull(x) else x)
data.northing = data.northing.apply(lambda x: default_web_mer["lat"] if pd.isnull(x) else x)
return data
def update_df(_df, _size, _color, _palette, _continuous, _discrete_sizeable, _discrete_colorable):
"""update the size and color columns of the given df based on widget selections and column classifications"""
_df["size"] = 9
if _size != 'None' and _size in _discrete_sizeable:
values = _df[_size][pd.notnull(_df[_size])].unique()
if all([val.isnumeric() for val in values]):
values = sorted(values, key=lambda x: float(x))
codes = dict(zip(values, range(len(values))))
groups = [codes[val] for val in _df[_size].values]
_df["size"] = [SIZES[xx] for xx in groups]
elif _size != 'None' and _size in _continuous:
try:
groups = pd.qcut(_df[_size].values, len(SIZES))
except ValueError:
groups = pd.cut(_df[_size].values, len(SIZES))
_df["size"] = [SIZES[xx] for xx in groups.codes]
_df["color"] = "#31AADE"
if _color != 'None' and _color in _discrete_colorable:
values = _df[_color][pd.notnull(_df[_color])].unique()
colors = linear_palette(palettes[_palette], len(values))
if all([val.isnumeric() for val in values]):
values = sorted(values, key=lambda x: float(x))
codes = dict(zip(values, range(len(values))))
groups = [codes[val] for val in _df[_color].values]
_df["color"] = [colors[xx] for xx in groups]
elif _color != 'None' and _color in _continuous:
colors = palettes[_palette]
groups = pd.cut(_df[_color].values, len(colors))
_df["color"] = [colors[xx] for xx in groups.codes]
def create_source(_df, _size, _color, _palette, _continuous, _discrete_sizeable, _discrete_colorable):
"""Update df and return new ColumnDataSource."""
update_df(_df, _size, _color, _palette, _continuous, _discrete_sizeable, _discrete_colorable)
_df["ns"] = _df["northing"]
_df["es"] = _df["easting"]
# create a ColumnDataSource from the data set
return ColumnDataSource(_df)
def update_source(_source, _df, _size, _color, _palette, _continuous, _discrete_sizeable, _discrete_colorable):
"""update df and and propagate changes to source"""
update_df(_df, _size, _color, _palette, _continuous, _discrete_sizeable, _discrete_colorable)
# create a ColumnDataSource from the data set
_source.data.update({"size": _df["size"], "color": _df["color"]})
#######################
# Data Visualizations #
#######################
def create_crossfilter(_df, _source, _discrete, _x, _y):
"""Return a crossfilter plot linked to ColumnDataSource '_source'."""
kw = dict()
if _x in _discrete:
values = _df[_x][pd.notnull(_df[_x])].unique()
if all([val.isnumeric() for val in values]):
kw["x_range"] = sorted(values, key=lambda x: float(x))
else:
kw["x_range"] = sorted(values)
if _y in _discrete:
values = _df[_y][pd.notnull(_df[_y])].unique()
if all([val.isnumeric() for val in values]):
kw["y_range"] = sorted(values, key=lambda x: float(x))
else:
kw["y_range"] = sorted(values)
x_title = _x.title()
y_title = _y.title()
p = figure(plot_height=700, plot_width=700, # responsive=True,
tools="wheel_zoom, pan, save, reset, box_select, tap",
active_drag="box_select", active_scroll="wheel_zoom",
title="%s vs %s" % (y_title, x_title),
**kw, )
if _x in _discrete:
p.xaxis.major_label_orientation = pd.np.pi / 4
# plot data on crossfilter
p.circle(x=_x, y=_y, color="color", size="size", source=_source, line_color="white",
alpha=0.6,
# set visual properties for selected glyphs
selection_fill_color="color",
selection_fill_alpha=0.6,
selection_line_color="white",
selection_line_alpha=0.6,
# set visual properties for non-selected glyphs
nonselection_fill_color="white",
nonselection_fill_alpha=0.1,
nonselection_line_color="color",
nonselection_line_alpha=0.6, )
return p
def create_map(_source):
"""Return map linked to ColumnDataSource '_source'."""
stamen = copy.copy(STAMEN_TERRAIN)
# create map
bound = 20000000 # meters
m = figure(plot_height=700, plot_width=700, # responsive=True,
tools="wheel_zoom, pan, reset, box_select, tap",
active_drag="box_select", active_scroll="wheel_zoom",
x_range=(-bound, bound), y_range=(-bound, bound))
m.axis.visible = False
m.add_tile(stamen)
# plot data on world map
m.circle(x="es", y="ns", color="color", size="size", source=_source, line_color="white",
alpha=0.6,
# set visual properties for selected glyphs
selection_fill_color="color",
selection_fill_alpha=0.6,
selection_line_color="white",
selection_line_alpha=0.6,
# set visual properties for non-selected glyphs
nonselection_fill_color="black",
nonselection_fill_alpha=0.01,
nonselection_line_color="color",
nonselection_line_alpha=0.6, )
return m
def create_table(_columns, _source):
"""Return table linked to ColumnDataSource '_source'."""
table_cols = [TableColumn(field=col, title=col) for col in _columns]
return DataTable(source=_source, columns=table_cols, width=1600, height=250, fit_columns=False, )
#############
# callbacks #
#############
# noinspection PyUnusedLocal
def x_change(attr, old, new):
"""Replece crossfilter plot."""
l.children[0].children[1] = create_crossfilter(df, source, discrete, x.value, y.value)
# noinspection PyUnusedLocal
def y_change(attr, old, new):
"""Replece crossfilter plot."""
l.children[0].children[1] = create_crossfilter(df, source, discrete, x.value, y.value)
# noinspection PyUnusedLocal
def size_change(attr, old, new):
"""Update ColumnDataSource 'source'."""
update_source(source, df, size.value, color.value, palette.value, continuous, discrete_sizeable,
discrete_colorable)
# noinspection PyUnusedLocal
def color_change(attr, old, new):
"""Update ColumnDataSource 'source'."""
update_source(source, df, size.value, color.value, palette.value, continuous, discrete_sizeable,
discrete_colorable)
# noinspection PyUnusedLocal
def selection_change(attr, old, new):
"""Update ColumnDataSource 'table_source' with selection found in 'source'."""
selected = source.selected['1d']['indices']
table_source.data = table_source.from_df(df.iloc[selected, :])
# noinspection PyUnusedLocal
def palette_change(attr, old, new):
"""Update ColumnDataSource 'source'."""
update_source(source, df, size.value, color.value, palette.value, continuous, discrete_sizeable,
discrete_colorable)
########
# Main #
########
# get user config and data paths from session arguments
args = doc.session_context.request.arguments
# validate config parameter
if 'c' in args:
configPath = tornado.escape.url_unescape(args.get('c')[0])
# check that file name is valid
cleanName = "".join(c for c in configPath if c.isalnum() or (c in ".-_")) # insure filename is safe
if cleanName != configPath:
# emit error, load error page: invalid character(s) in config parameter
message = "Invalid character(s) in config parameter: {}".format(configPath)
log.info(message)
raise ValueError(message)
# check that file exists
elif not os.path.isfile("config/" + configPath):
# emit error, load error page: no such config file found
message = "No such config file found: {}".format(configPath)
log.info(message)
raise FileNotFoundError(message)
# valid name and file exists, therefore pass argument
else:
configPath = "config/" + configPath
else:
configPath = "defaultConfig.toml"
# load config file
with open(configPath) as toml_data:
config = pytoml.load(toml_data)
# validate data parameter
if 'd' in args:
dataPath = tornado.escape.url_unescape(args.get('d')[0])
# check that file name is valid
cleanName = "".join(c for c in dataPath if c.isalnum() or (c in ".-_")) # insure filename is safe
if cleanName != dataPath:
# emit error, load error page: invalid character(s) in data parameter
message = "Invalid character(s) in data parameter: {}".format(dataPath)
log.info(message)
raise ValueError(message)
# check that file exists
elif not os.path.isfile("data/" + dataPath):
# emit error, load error page: no such data file found
message = "No such data file found: {}".format(dataPath)
log.info(message)
raise FileNotFoundError(message)
# valid name and file exists, therefore pass argument
else:
dataPath = "data/" + dataPath
else:
dataPath = config.get("defaultDataPath")
if not os.path.isfile(dataPath):
message = 'defaultDataPath "{}" from config file "{}" does not point to a file'.format(dataPath, configPath)
raise FileNotFoundError(message)
df = get_data(dataPath, config.get("force_discrete_colorable", []))
# catigorize columns
columns = [c for c in df.columns if c not in {"easting", "northing"}]
discrete = [x for x in columns if df[x].dtype == object]
continuous = [x for x in columns if x not in discrete]
discrete_sizeable = [x for x in discrete if len(df[x].unique()) <= len(SIZES)]
discrete_colorable = [x for x in discrete if (len(df[x].unique()) <= config.get("max_discrete_colors", 256)) or
((x in config.get("force_discrete_colorable", [])) and (len(df[x].unique()) < 256))]
# create widgets
x = Select(title='X-Axis',
value=(config.get("default_xAxis") if config.get("default_xAxis") in columns else columns[1]),
options=columns)
x.on_change('value', x_change)
y = Select(title='Y-Axis',
value=(config.get("default_yAxis") if config.get("default_yAxis") in columns else columns[2]),
options=columns)
y.on_change('value', y_change)
sizeOptions = ['None'] + discrete_sizeable + continuous
size = Select(title='Size', value=config.get("default_sizeBy", "None"), options=sizeOptions)
size.on_change('value', size_change)
colorOptions = ['None'] + discrete_colorable + continuous
color = Select(title='Color', value=config.get("default_colorBy", "None"), options=colorOptions)
color.on_change('value', color_change)
palleteOptions = [k for k in palettes.keys()]
palette = Select(title='Palette', value=config.get("default_palette", "inferno"), options=palleteOptions)
palette.on_change('value', palette_change)
######################
# initialize sources #
######################
source = create_source(df, size.value, color.value, palette.value, continuous, discrete_sizeable,
discrete_colorable)
source.on_change('selected', selection_change)
table_source = ColumnDataSource(df)
########################
# javascript callbacks #
########################
download_callback = CustomJS(args=dict(table_source=table_source), code=r"""
var data = table_source.data;
var columns = %s;
var n = columns.length;
var m = data[columns[0]].length;
var csvLines = [];
var currRow = [];
for (j=0; j<n; j++) {
currRow.push("\"" + columns[j].toString() + "\"");
}
csvLines.push(currRow.join(","));
for (i=0; i < m; i++) {
var currRow = [];
for (j=0; j<n; j++) {
if (typeof(data[columns[j]][i]) == 'string') {
currRow.push("\"" + data[columns[j]][i].toString() + "\"");
} else {
currRow.push(data[columns[j]][i].toString());
}
}
csvLines.push(currRow.join(","));
}
var filetext = csvLines.join("\n");
var filename = 'data_result.csv';
var blob = new Blob([filetext], { type: 'text/csv;charset=utf-8;' });
//addresses IE
if (navigator.msSaveBlob) {
navigator.msSaveBlob(blob, filename);
}
else {
var link = document.createElement("a");
link = document.createElement('a');
link.href = URL.createObjectURL(blob);
link.download = filename;
link.target = "_blank";
link.style.visibility = 'hidden';
link.dispatchEvent(new MouseEvent('click'));
}
""" % json.dumps(columns))
jitter_callback = CustomJS(args=dict(source=source, map_jitter=Jitter()), code=r"""
var data = source.data;
if (slider.value == 0) {
for (var i = 0; i < data['easting'].length; i++) {
data['es'][i] = data['easting'][i];
}
for (var i = 0; i < data['northing'].length; i++) {
data['ns'][i] = data['northing'][i];
}
}
else {
map_jitter.distribution = dist.value
map_jitter.width = slider.value * 1000
for (var i = 0; i < data['easting'].length; i++) {
data['es'][i] = map_jitter.compute(data['easting'][i]);
}
for (var i = 0; i < data['northing'].length; i++) {
data['ns'][i] = map_jitter.compute(data['northing'][i]);
}
}
source.trigger('change');
""")
download_button = Button(label="Download Selected", button_type="success", callback=download_callback)
jitter_selector = Select(title="Map Jitter Distribution:", value="uniform",
options=["uniform", "normal"], callback=jitter_callback)
jitter_slider = Slider(start=0, end=1000, value=0, step=10,
title="Map Jitter Width (Km):", callback=jitter_callback)
jitter_callback.args["dist"] = jitter_selector
jitter_callback.args["slider"] = jitter_slider
# initialize plots
crossfilter = create_crossfilter(df, source, discrete, x.value, y.value)
mapPlot = create_map(source)
# create layout
controls = widgetbox([x, y, color, palette, size, jitter_selector, jitter_slider, download_button], width=200)
table = widgetbox(create_table(columns, table_source))
l = layout([
[controls, crossfilter, mapPlot],
[row(table)]
])
# add layout to document
doc.add_root(l)
doc.title = "Crossfilter"
doc.theme = Theme(json=yaml.load("""
attrs:
Figure:
background_fill_color: '#2F2F2F'
border_fill_color: '#2F2F2F'
outline_line_color: '#444444'
Axis:
axis_line_color: "white"
axis_label_text_color: "white"
major_label_text_color: "white"
major_tick_line_color: "white"
minor_tick_line_color: "white"
minor_tick_line_color: "white"
Grid:
grid_line_dash: [6, 4]
grid_line_alpha: .3
Title:
text_color: "white"
"""))
| |
#!/bin/env python2.7
import sys
import os
import os.path
import glob
import copy
import traceback
import re
import csv
import tempfile
import urllib
import shutil
import atexit
import subprocess
import time
import math
from collections import defaultdict, Counter
from os.path import join, dirname, realpath
try:
sys.path.append(join(dirname(realpath(__file__)),
'..', '..', 'common', 'src'))
except NameError:
pass
from optparse_gui import OptionParser, OptionGroup, GUI, UserCancelledError, ProgressText
from util import *
from fisher import *
from pileups import SerialPileups, ThreadedPileups
from chromreg import ChromLabelRegistry
from operator import itemgetter
from version import VERSION
VERSION = '1.0.6 (%s)' % (VERSION,)
def excepthook(etype, value, tb):
traceback.print_exception(etype, value, tb)
print >>sys.stderr, "Type <Enter> to Exit...",
sys.stderr.flush()
raw_input()
toremove = []
def cleanup():
for d in toremove:
shutil.rmtree(d, ignore_errors=True)
atexit.register(cleanup)
if not GUI() and len(sys.argv) == 2 and sys.argv[1] == '--GUI':
from optparse_gui.needswx import *
sys.exit(1)
if GUI() and len(sys.argv) == 1:
from optparse_gui import OptionParserGUI
parser = OptionParserGUI(version=VERSION)
error_kwargs = {'exit': False}
sys.excepthook = excepthook
else:
parser = OptionParser(version=VERSION)
error_kwargs = {}
advanced = OptionGroup(parser, "Advanced")
parser.add_option("-s", "--snvs", type="files", dest="snvs", default=None,
help="Single-Nucleotide-Variant files. Required.", name="SNV Files",
notNone=True, remember=True,
filetypes=[("SNV Files", "*.vcf;*.csv;*.tsv;*.xls;*.xlsx;*.txt")])
parser.add_option("-r", "--readalignments", type="files", dest="alignments", default=None,
help="Read alignment files in indexed BAM format. Required.", name="Read Alignment Files",
notNone=True, remember=True,
filetypes=[("Read Alignment Files (indexed BAM)", "*.bam")])
advanced.add_option("-m", "--minreads", type="int", dest="minreads", default=10, remember=True,
help="Minimum number of good reads at SNV locus per alignment file. Default=10.", name="Min. Reads")
advanced.add_option("-M", "--maxreads", type="float", dest="maxreads", default=None, remember=True,
help="Scale read counts at high-coverage loci to ensure at most this many good reads at SNV locus per alignment file. Values greater than 1 indicate absolute read counts, otherwise the value indicates the coverage distribution percentile. Default=No maximum.", name="Max. Reads")
advanced.add_option("-F", "--full", action="store_true", dest="full", default=False, remember=True,
help="Output extra diagnostic read count fields. Default=False.", name="All Fields")
advanced.add_option("-f", "--alignmentfilter", action="store_false", dest="filter", default=True, remember=True,
help="(Turn off) alignment filtering by length, edits, etc.", name="Filter Alignments")
advanced.add_option("-U", "--uniquereads", action="store_true", dest="unique", default=False, remember=True,
help="Consider only distinct reads.", name="Unique Reads")
advanced.add_option("-t", "--threadsperbam", type="int", dest="tpb", default=1, remember=True,
help="Worker threads per alignment file. Indicate no threading with 0. Default=1.", name="Threads/BAM")
advanced.add_option("-q", "--quiet", action="store_true", dest="quiet", default=False, remember=True,
help="Quiet.", name="Quiet")
# advanced.add_option("-d", "--debug", action="store_true", dest="debug", default=False, remember=True,
# help="Debug.", name="Debug")
parser.add_option("-o", "--output", type="savefile", dest="output", remember=True,
help="Output file. Leave empty for console ouptut.", default="",
name="Output File", filetypes=[("All output formats", "*.xlsx;*.xls;*.csv;*.tsv;*.txt"),
("Excel", "*.xlsx"), ("Excel2003", "*.xls"),
("CSV", "*.csv"), ("TSV", "*.tsv"), ("Text", "*.txt")])
parser.add_option_group(advanced)
opt = None
while True:
if 'exit' in error_kwargs:
try:
opt, args = parser.parse_args(opts=opt)
except UserCancelledError:
sys.exit(0)
else:
opt, args = parser.parse_args()
break
progress = None
if not opt.output:
opt.quiet = True
if opt.maxreads == None:
opt.maxreads = 1e+20
progress = ProgressText(quiet=opt.quiet)
from dataset import XLSFileTable, CSVFileTable, TSVFileTable, XLSXFileTable, TXTFileTable, BEDFile, VCFFile
progress.stage("Read SNV data", len(opt.snvs))
snvheaders = filter(None, """
CHROM POS REF ALT
""".split())
snvdata = {}
# extrasnvheaders = []
# usedsnvheaders = set()
snvchroms = defaultdict(set)
for filename in opt.snvs:
base, extn = filename.rsplit('.', 1)
extn = extn.lower()
if extn == 'csv':
snvs = CSVFileTable(filename=filename)
elif extn == 'vcf':
snvs = VCFFile(filename=filename)
elif extn == 'tsv':
snvs = TSVFileTable(filename=filename)
elif extn == 'xls':
snvs = XLSFileTable(filename=filename)
elif extn == 'xlsx':
snvs = XLSXFileTable(filename=filename)
elif extn == 'txt':
snvs = TXTFileTable(filename=filename, headers=snvheaders)
else:
raise RuntimeError("Unexpected SNV file extension: %s" % filename)
for h in snvheaders:
if h not in snvs.headers():
raise RuntimeError(
"Required header: %s missing from SNV file %s" % (h, filename))
for h in snvs.headers():
if h in snvheaders:
continue
# if h not in extrasnvheaders:
# extrasnvheaders.append(h)
for r in snvs:
chr = r[snvheaders[0]].strip()
snvchroms[filename].add(chr)
locus = int(r[snvheaders[1]].strip())
ref = r[snvheaders[2]].strip()
alt = r[snvheaders[3]].strip()
if r.get('INFO:INDEL'):
continue
if len(ref) != 1:
continue
if not re.search(r'^[ACGT](,[ACGT])*$', alt):
continue
# for h in r:
# if r.get(h):
# usedsnvheaders.add(h)
snvkey = (filename, chr, locus, ref, alt)
if snvkey not in snvdata:
snvdata[snvkey] = r
progress.update()
progress.done()
chrreg = ChromLabelRegistry()
for snvfile in snvchroms:
chrreg.add_labels(snvfile,snvchroms[snvfile])
snvdata1 = {}
for (sf, chr, locus, ref, alt), r in snvdata.iteritems():
chrom = chrreg.label2chrom(sf,chr)
assert(chrom)
snvkey = (chrom,locus,ref,alt)
if snvkey not in snvdata1:
snvdata1[snvkey] = (chrom,locus,ref,alt,r)
for bamfile in opt.alignments:
chrreg.add_bamlabels(bamfile)
chrreg.determine_chrom_order()
snvdata = sorted(snvdata1.values(),key=lambda s: (chrreg.chrom_order(s[0]),s[1],s[2],s[3]))
# extrasnvheaders = filter(lambda h: h in usedsnvheaders, extrasnvheaders)
progress.message("SNVs: %d\n" % len(snvdata))
outheaders = snvheaders + filter(None, """
SNVCountForward
SNVCountReverse
RefCountForward
RefCountReverse
SNVCount
RefCount
GoodReads
%BadRead
R
HomoVarSc
HetSc
HomoRefSc
VarDomSc
RefDomSc
""".split())
debugging = filter(None, """
OtherCountForward
OtherCountReverse
OtherCount
NotHomoVarpV
NotHomoRefpV
NotHetpV
VarDompV
RefDompV
NotHomoVarFDR
NotHomoRefFDR
NotHetFDR
VarDomFDR
RefDomFDR
RemovedDuplicateReads
FilteredSNVLociReads
SNVLociReads
""".split())
debugging.extend(sorted(BadRead.allheaders))
outheaders.extend(debugging)
pos = outheaders.index("SNVCountForward")
outheaders.insert(pos, 'AlignedReads')
# for h in reversed(extrasnvheaders):
# outheaders.insert(pos,h)
outheaders1 = copy.copy(outheaders)
if not opt.full:
for dh in debugging:
if dh in outheaders1:
outheaders1.remove(dh)
emptysym = None
if opt.output:
filename = opt.output
base, extn = filename.rsplit('.', 1)
extn = extn.lower()
if extn == 'csv':
output = CSVFileTable(filename=filename, headers=outheaders1)
elif extn == 'tsv':
output = TSVFileTable(filename=filename, headers=outheaders1)
elif extn == 'xls':
output = XLSFileTable(
filename=filename, headers=outheaders1, sheet='Results')
elif extn == 'xlsx':
output = XLSXFileTable(
filename=filename, headers=outheaders1, sheet='Results')
elif extn == 'txt':
output = TXTFileTable(filename=filename, headers=outheaders1)
else:
raise RuntimeError("Unexpected output file extension: %s" % filename)
else:
output = TXTFileTable(filename=sys.stdout, headers=outheaders1)
emptysym = "-"
outrows = []
# if opt.debug:
# import random
# random.seed(1234567)
# snvdata = sorted(random.sample(snvdata,10000))
# snvdata = sorted(sorted(random.sample(snvdata,200))*5)
# snvdata = sorted(random.sample(snvdata,200))*5
if opt.filter:
readfilter = SNVPileupReadFilter()
else:
readfilter = BasicFilter()
if opt.tpb == 0:
pileups = SerialPileups(snvdata, opt.alignments, readfilter, chrreg).iterator()
else:
pileups = ThreadedPileups(snvdata, opt.alignments, readfilter, chrreg, threadsperbam=opt.tpb).iterator()
progress.stage("Count reads per SNV", len(snvdata))
totalsnvs = 0
start = time.time()
# for i in range(len(snvdata)):
for snvchr, snvpos, ref, alt, snvextra in snvdata:
## if opt.debug:
## if totalsnvs % 100 == 0 and totalsnvs > 0:
## print "SNVs/sec: %.2f"%(float(totalsnvs)/(time.time()-start),)
snvchr1, snvpos1, ref1, alt1, total, reads, badread = pileups.next()
assert(snvchr == snvchr1 and snvpos == snvpos1)
## if opt.debug:
## print snvchr,snvpos,ref,alt, \
## " ".join(map(str,map(lambda i: total[i],range(len(opt.alignments))))), \
## " ".join(map(str,map(lambda i: badread[(i, 'Good')],range(len(opt.alignments)))))
goodreads = defaultdict(list)
for al, pos, base, si in reads:
goodreads[base].append((si, al))
# Deduplicate the reads based on the read sequence or the
# start and end of the alignment or ???
duplicates_removed = Counter()
if opt.unique:
for base in goodreads:
seen = set()
retain = list()
for si, al in goodreads[base]:
if (si, al.seq) not in seen:
retain.append((si, al))
seen.add((si, al.seq))
else:
duplicates_removed[si] += 1
goodreads[base] = retain
# goodreads now contains the relevant read alignments.
totalsnvs += 1
counts = defaultdict(int)
for base in goodreads:
for si, al in goodreads[base]:
counts[(base, "R" if al.is_reverse else "F", si)] += 1
mincounted = 1e+20
for si, alf in enumerate(opt.alignments):
counted = sum(map(lambda t: counts[(t[0], t[1], si)], [
(n, d) for n in 'ACGT' for d in 'FR']))
mincounted = min(counted, mincounted)
if mincounted < opt.minreads:
continue
for si, alf in enumerate(opt.alignments):
nsnvf = sum(map(lambda nuc: counts[(nuc, "F", si)], map(str.strip,alt.split(','))))
nsnvr = sum(map(lambda nuc: counts[(nuc, "R", si)], map(str.strip,alt.split(','))))
nsnv = nsnvr + nsnvf
nreff = counts[(ref, "F", si)]
nrefr = counts[(ref, "R", si)]
nref = nreff + nrefr
othernucs = set('ACGT') - set([ref] + alt.split(','))
notherf = sum(map(lambda nuc: counts[(nuc, "F", si)], othernucs))
notherr = sum(map(lambda nuc: counts[(nuc, "R", si)], othernucs))
nother = notherf + notherr
counted = sum(map(lambda t: counts[(t[0], t[1], si)], [
(n, d) for n in 'ACGT' for d in 'FR']))
row = [ snvchr, snvpos, ref, alt ] + \
[ os.path.split(alf)[1].rsplit('.', 1)[0] ] + \
[nsnvf, nsnvr,
nreff, nrefr,
nsnv, nref,
counted,
100.0 * (total[si] - badread[si, 'Good']) /
float(total[si]) if total[si] != 0 else 0.0,
float(nsnv)/(nsnv+nref) if (nsnv+nref) != 0 else "NA",
-1, -1, -1, -1, -1,
notherf, notherr,
nother,
-1, -1, -1, -1, -1,
-1, -1, -1, -1, -1,
duplicates_removed[si],
badread[si, 'Good'],
total[si]]
for s in sorted(BadRead.allheaders):
row.append(badread[si, s])
outrows.append(row)
progress.update()
progress.done()
if not opt.quiet:
print "SNVs/sec: %.2f"%(float(totalsnvs)/(time.time()-start),)
# Determine the maxreads value, if percentile, otherwise let the defaultdict take care of it
coverage = defaultdict(list)
maxreads = defaultdict(lambda: int(opt.maxreads))
if 0 < opt.maxreads < 1:
for r in map(lambda r: dict(zip(outheaders,r)),outrows):
coverage[r['AlignedReads']].append(r['GoodReads'])
for al in coverage:
n = len(coverage[al])
percind = int(round(n*opt.maxreads))
maxreads[al] = sorted(coverage[al])[percind]
for i in range(len(outrows)):
#Exctract the counts and rescale if necessary
r = dict(zip(outheaders, outrows[i]))
al,nsnv,nref,nother,counted = map(r.get,["AlignedReads","SNVCount","RefCount","OtherCount","GoodReads"])
if counted > maxreads[al]:
factor = float(maxreads[al])/float(counted)
nsnv = int(round(factor*nsnv))
nref = int(round(factor*nref))
nother = int(round(factor*nother))
#Compute p-values
pcount = 0.5
n = nsnv + nref + nother
nprime = nsnv + nref + nother + 4 * pcount
q = float(nother + 2 * pcount) / (2 * nprime)
nothomoref = binom_test_high(nsnv, n, q)
nothomovar = binom_test_high(nref, n, q)
if nsnv > nref:
nothet = binom_test_high(nsnv, nsnv + nref, 0.5)
refdom = 1.0
vardom = nothet
elif nref > nsnv:
nothet = binom_test_high(nref, nsnv + nref, 0.5)
vardom = 1.0
refdom = nothet
else:
nothet = 1.0
vardom = 1.0
refdom = 1.0
# And store in the output rows...
pos = outheaders.index("NotHomoRefpV")
outrows[i][pos] = nothomoref
pos = outheaders.index("NotHetpV")
outrows[i][pos] = nothet
pos = outheaders.index("NotHomoVarpV")
outrows[i][pos] = nothomovar
pos = outheaders.index("VarDompV")
outrows[i][pos] = vardom
pos = outheaders.index("RefDompV")
outrows[i][pos] = refdom
# Now compute FDR and scores...
pvkeys = filter(lambda h: h.endswith('pV'), outheaders)
fdrkeys = filter(lambda h: h.endswith('FDR'), outheaders)
allpvals = []
n = len(outrows)
for pvk in pvkeys:
pos = outheaders.index(pvk)
allpvals.extend(map(itemgetter(pos), outrows))
# print allpvals
allfdrs = fdr(allpvals)
for j, fdrk in enumerate(fdrkeys):
pos1 = outheaders.index(fdrk)
for i in range(len(outrows)):
outrows[i][pos1] = allfdrs[(j * n) + i]
for i in range(len(outrows)):
r = dict(zip(outheaders, outrows[i]))
homovarsc = max(0.0, min(pvscore(r["NotHetFDR"]), pvscore(
r["NotHomoRefFDR"])) - pvscore(r["NotHomoVarFDR"]))
homorefsc = max(0.0, min(pvscore(r["NotHetFDR"]), pvscore(
r["NotHomoVarFDR"])) - pvscore(r["NotHomoRefFDR"]))
hetsc = max(0.0, min(pvscore(r["NotHomoRefFDR"]), pvscore(
r["NotHomoVarFDR"])) - pvscore(r["NotHetFDR"]))
vardomsc = pvscore(r["VarDomFDR"])
refdomsc = pvscore(r["RefDomFDR"])
pos = outheaders.index("HomoVarSc")
outrows[i][pos] = homovarsc
pos = outheaders.index("HomoRefSc")
outrows[i][pos] = homorefsc
pos = outheaders.index("HetSc")
outrows[i][pos] = hetsc
pos = outheaders.index("VarDomSc")
outrows[i][pos] = vardomsc
pos = outheaders.index("RefDomSc")
outrows[i][pos] = refdomsc
progress.stage('Output results')
output.from_rows(
map(lambda r: dict(zip(outheaders, r + [emptysym] * 50)), outrows))
progress.done()
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from testtools import skipIf
from heat.engine import clients
from heat.engine import environment
from heat.tests.v1_1 import fakes
from heat.common import exception
from heat.common import template_format
from heat.engine import resources
from heat.engine.resources import instance as instances
from heat.engine import service
from heat.openstack.common.importutils import try_import
from heat.engine import parser
from heat.tests.common import HeatTestCase
from heat.tests import utils
test_template_volumeattach = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Resources" : {
"WikiDatabase": {
"Type": "AWS::EC2::Instance",
"DeletionPolicy": "Delete",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": "test_KeyName"
}
},
"DataVolume" : {
"Type" : "AWS::EC2::Volume",
"Properties" : {
"Size" : "6",
"AvailabilityZone" : "nova"
}
},
"MountPoint" : {
"Type" : "AWS::EC2::VolumeAttachment",
"Properties" : {
"InstanceId" : { "Ref" : "WikiDatabase" },
"VolumeId" : { "Ref" : "DataVolume" },
"Device" : "/dev/%s"
}
}
}
}
'''
test_template_ref = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
''' + \
'"Description" : "Name of an existing EC2' + \
'KeyPair to enable SSH access to the instances",' + \
'''
"Type" : "String"
}
},
"Resources" : {
"WikiDatabase": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName" }
}
},
"DataVolume" : {
"Type" : "AWS::EC2::Volume",
"Properties" : {
"Size" : "6",
"AvailabilityZone" : "nova"
}
},
"MountPoint" : {
"Type" : "AWS::EC2::VolumeAttachment",
"Properties" : {
"InstanceId" : { "Ref" : "%s" },
"VolumeId" : { "Ref" : "DataVolume" },
"Device" : "/dev/vdb"
}
}
}
}
'''
test_template_findinmap_valid = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
''' + \
'"Description" : "Name of an existing EC2 KeyPair to' + \
'enable SSH access to the instances",' + \
'''
"Type" : "String"
}
},
"Resources" : {
"WikiDatabase": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName" }
}
},
"DataVolume" : {
"Type" : "AWS::EC2::Volume",
"Properties" : {
"Size" : "6",
"AvailabilityZone" : "nova"
}
},
"MountPoint" : {
"Type" : "AWS::EC2::VolumeAttachment",
"Properties" : {
"InstanceId" : { "Ref" : "WikiDatabase" },
"VolumeId" : { "Ref" : "DataVolume" },
"Device" : "/dev/vdb"
}
}
}
}
'''
test_template_findinmap_invalid = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
''' + \
'"Description" : "Name of an existing EC2 KeyPair to enable SSH ' + \
'access to the instances",' + \
''' "Type" : "String"
}
},
"Mappings" : {
"AWSInstanceType2Arch" : {
"t1.micro" : { "Arch" : "64" },
"m1.small" : { "Arch" : "64" },
"m1.medium" : { "Arch" : "64" },
"m1.large" : { "Arch" : "64" },
"m1.xlarge" : { "Arch" : "64" },
"m2.xlarge" : { "Arch" : "64" },
"m2.2xlarge" : { "Arch" : "64" },
"m2.4xlarge" : { "Arch" : "64" },
"c1.medium" : { "Arch" : "64" },
"c1.xlarge" : { "Arch" : "64" },
"cc1.4xlarge" : { "Arch" : "64HVM" },
"cc2.8xlarge" : { "Arch" : "64HVM" },
"cg1.4xlarge" : { "Arch" : "64HVM" }
}
},
"Resources" : {
"WikiDatabase": {
"Type": "AWS::EC2::Instance",
"Properties": {
''' + \
'"ImageId" : { "Fn::FindInMap" : [ "DistroArch2AMI", { "Ref" : ' + \
'"LinuxDistribution" },' + \
'{ "Fn::FindInMap" : [ "AWSInstanceType2Arch", { "Ref" : ' + \
'"InstanceType" }, "Arch" ] } ] },' + \
'''
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName"}
}
},
"DataVolume" : {
"Type" : "AWS::EC2::Volume",
"Properties" : {
"Size" : "6",
"AvailabilityZone" : "nova"
}
},
"MountPoint" : {
"Type" : "AWS::EC2::VolumeAttachment",
"Properties" : {
"InstanceId" : { "Ref" : "WikiDatabase" },
"VolumeId" : { "Ref" : "DataVolume" },
"Device" : "/dev/vdb"
}
}
}
}
'''
test_template_invalid_resources = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "AWS CloudFormation Sample Template for xyz.",
"Parameters" : {
"InstanceType" : {
"Description" : "Defined instance type",
"Type" : "String",
"Default" : "node.ee",
"AllowedValues" : ["node.ee", "node.apache", "node.api"],
"ConstraintDescription" : "must be a valid instance type."
}
},
"Resources" : {
"Type" : "AWS::EC2::Instance",
"Metadata" : {
},
"Properties" : {
"ImageId" : { "Ref" : "centos-6.4-20130701-0" },
"InstanceType" : { "Ref" : "InstanceType" }
}
}
}
'''
test_template_invalid_property = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
''' + \
'"Description" : "Name of an existing EC2' + \
'KeyPair to enable SSH access to the instances",' + \
'''
"Type" : "String"
}
},
"Resources" : {
"WikiDatabase": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName" },
"UnknownProperty": "unknown"
}
}
}
}
'''
test_template_unimplemented_property = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
''' + \
'"Description" : "Name of an existing EC2' + \
'KeyPair to enable SSH access to the instances",' + \
'''
"Type" : "String"
}
},
"Resources" : {
"WikiDatabase": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName" },
"SourceDestCheck": "false"
}
}
}
}
'''
test_template_invalid_deletion_policy = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
''' + \
'"Description" : "Name of an existing EC2' + \
'KeyPair to enable SSH access to the instances",' + \
'''
"Type" : "String"
}
},
"Resources" : {
"WikiDatabase": {
"Type": "AWS::EC2::Instance",
"DeletionPolicy": "Destroy",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName" }
}
}
}
}
'''
test_template_snapshot_deletion_policy = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
''' + \
'"Description" : "Name of an existing EC2' + \
'KeyPair to enable SSH access to the instances",' + \
'''
"Type" : "String"
}
},
"Resources" : {
"WikiDatabase": {
"Type": "AWS::EC2::Instance",
"DeletionPolicy": "Snapshot",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName" }
}
}
}
}
'''
test_template_volume_snapshot = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Resources" : {
"DataVolume" : {
"Type" : "AWS::EC2::Volume",
"DeletionPolicy": "Snapshot",
"Properties" : {
"Size" : "6",
"AvailabilityZone" : "nova"
}
}
}
}
'''
test_unregistered_key = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
''' + \
'"Description" : "Name of an existing EC2' + \
'KeyPair to enable SSH access to the instances",' + \
'''
"Type" : "String"
}
},
"Resources" : {
"Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName" }
}
}
}
}
'''
test_template_image = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
''' + \
'"Description" : "Name of an existing EC2' + \
'KeyPair to enable SSH access to the instances",' + \
'''
"Type" : "String"
}
},
"Resources" : {
"Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName" }
}
}
}
}
'''
test_template_invalid_secgroups = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
''' + \
'"Description" : "Name of an existing EC2' + \
'KeyPair to enable SSH access to the instances",' + \
'''
"Type" : "String"
}
},
"Resources" : {
"Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName" },
"SecurityGroups": [ "default" ],
"NetworkInterfaces": [ "mgmt", "data" ]
}
}
}
}
'''
test_template_invalid_secgroupids = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
''' + \
'"Description" : "Name of an existing EC2' + \
'KeyPair to enable SSH access to the instances",' + \
'''
"Type" : "String"
}
},
"Resources" : {
"Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName" },
"SecurityGroupIds": [ "default" ],
"NetworkInterfaces": [ "mgmt", "data" ]
}
}
}
}
'''
test_template_nova_client_exception = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Resources" : {
"Instance": {
"Type": "AWS::EC2::Instance",
"DeletionPolicy": "Delete",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large"
}
}
}
}
'''
test_template_unique_logical_name = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "test.",
"Parameters" : {
"KeyName" : {
''' + \
'"Description" : "Name of an existing EC2' + \
'KeyPair to enable SSH access to the instances",' + \
'''
"Type" : "String"
},
"AName" : {
''' + \
'"Description" : "Name of an existing EC2' + \
'KeyPair to enable SSH access to the instances",' + \
'''
"Type" : "String"
}
},
"Resources" : {
"AName": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "image_name",
"InstanceType": "m1.large",
"KeyName": { "Ref" : "KeyName" },
"NetworkInterfaces": [ "mgmt", "data" ]
}
}
}
}
'''
class validateTest(HeatTestCase):
def setUp(self):
super(validateTest, self).setUp()
resources.initialise()
self.fc = fakes.FakeClient()
resources.initialise()
utils.setup_dummy_db()
self.ctx = utils.dummy_context()
def test_validate_volumeattach_valid(self):
t = template_format.parse(test_template_volumeattach % 'vdq')
stack = parser.Stack(self.ctx, 'test_stack', parser.Template(t))
volumeattach = stack['MountPoint']
self.assertTrue(volumeattach.validate() is None)
def test_validate_volumeattach_invalid(self):
t = template_format.parse(test_template_volumeattach % 'sda')
stack = parser.Stack(self.ctx, 'test_stack', parser.Template(t))
volumeattach = stack['MountPoint']
self.assertRaises(exception.StackValidationFailed,
volumeattach.validate)
def test_validate_ref_valid(self):
t = template_format.parse(test_template_ref % 'WikiDatabase')
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, t))
self.assertEqual(res['Description'], 'test.')
def test_validate_hot_valid(self):
t = template_format.parse(
"""
heat_template_version: 2013-05-23
description: test.
resources:
my_instance:
type: AWS::EC2::Instance
""")
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, t))
self.assertEqual(res['Description'], 'test.')
def test_validate_ref_invalid(self):
t = template_format.parse(test_template_ref % 'WikiDatabasez')
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, t))
self.assertNotEqual(res['Description'], 'Successfully validated')
def test_validate_findinmap_valid(self):
t = template_format.parse(test_template_findinmap_valid)
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, t))
self.assertEqual(res['Description'], 'test.')
def test_validate_findinmap_invalid(self):
t = template_format.parse(test_template_findinmap_invalid)
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, t))
self.assertNotEqual(res['Description'], 'Successfully validated')
def test_validate_parameters(self):
t = template_format.parse(test_template_ref % 'WikiDatabase')
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, t))
self.assertEqual(res['Parameters'], {'KeyName': {
'Type': 'String',
'Description': 'Name of an existing EC2KeyPair to enable SSH '
'access to the instances'}})
def test_validate_properties(self):
t = template_format.parse(test_template_invalid_property)
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, t))
self.assertEqual(res, {'Error': 'Unknown Property UnknownProperty'})
def test_invalid_resources(self):
t = template_format.parse(test_template_invalid_resources)
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, t))
self.assertEqual({'Error': 'Resources must contain Resource. '
'Found a [string] instead'},
res)
def test_unimplemented_property(self):
t = template_format.parse(test_template_unimplemented_property)
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, t))
self.assertEqual(
res,
{'Error': 'Property SourceDestCheck not implemented yet'})
def test_invalid_deletion_policy(self):
t = template_format.parse(test_template_invalid_deletion_policy)
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, t))
self.assertEqual(res, {'Error': 'Invalid DeletionPolicy Destroy'})
def test_snapshot_deletion_policy(self):
t = template_format.parse(test_template_snapshot_deletion_policy)
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, t))
self.assertEqual(
res, {'Error': 'Snapshot DeletionPolicy not supported'})
@skipIf(try_import('cinderclient.v1.volume_backups') is None,
'unable to import volume_backups')
def test_volume_snapshot_deletion_policy(self):
t = template_format.parse(test_template_volume_snapshot)
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.ReplayAll()
engine = service.EngineService('a', 't')
res = dict(engine.validate_template(None, t))
self.assertEqual(res, {'Description': u'test.', 'Parameters': {}})
def test_unregistered_key(self):
t = template_format.parse(test_unregistered_key)
template = parser.Template(t)
params = {'KeyName': 'not_registered'}
stack = parser.Stack(self.ctx, 'test_stack', template,
environment.Environment(params))
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
instances.Instance.nova().AndReturn(self.fc)
self.m.ReplayAll()
resource = stack['Instance']
self.assertRaises(exception.UserKeyPairMissing, resource.validate)
def test_unregistered_image(self):
t = template_format.parse(test_template_image)
template = parser.Template(t)
stack = parser.Stack(self.ctx, 'test_stack', template,
environment.Environment({'KeyName': 'test'}))
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
instances.Instance.nova().AndReturn(self.fc)
self.m.ReplayAll()
resource = stack['Instance']
self.assertRaises(exception.ImageNotFound, resource.validate)
self.m.VerifyAll()
def test_duplicated_image(self):
t = template_format.parse(test_template_image)
template = parser.Template(t)
stack = parser.Stack(self.ctx, 'test_stack', template,
environment.Environment({'KeyName': 'test'}))
class image_type(object):
def __init__(self, id, name):
self.id = id
self.name = name
image_list = [image_type(id='768b5464-3df5-4abf-be33-63b60f8b99d0',
name='image_name'),
image_type(id='a57384f5-690f-48e1-bf46-c4291e6c887e',
name='image_name')]
self.m.StubOutWithMock(self.fc.images, 'list')
self.fc.images.list().AndReturn(image_list)
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
instances.Instance.nova().AndReturn(self.fc)
self.m.ReplayAll()
resource = stack['Instance']
self.assertRaises(exception.NoUniqueImageFound, resource.validate)
self.m.VerifyAll()
def test_invalid_security_groups_with_nics(self):
t = template_format.parse(test_template_invalid_secgroups)
template = parser.Template(t)
stack = parser.Stack(self.ctx, 'test_stack', template,
environment.Environment({'KeyName': 'test'}))
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.ReplayAll()
resource = stack['Instance']
self.assertRaises(exception.ResourcePropertyConflict,
resource.validate)
def test_invalid_security_group_ids_with_nics(self):
t = template_format.parse(test_template_invalid_secgroupids)
template = parser.Template(t)
stack = parser.Stack(self.ctx, 'test_stack', template,
environment.Environment({'KeyName': 'test'}))
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.ReplayAll()
resource = stack['Instance']
self.assertRaises(exception.ResourcePropertyConflict,
resource.validate)
def test_client_exception_from_nova_client(self):
t = template_format.parse(test_template_nova_client_exception)
template = parser.Template(t)
stack = parser.Stack(self.ctx, 'test_stack', template)
self.m.StubOutWithMock(self.fc.images, 'list')
self.fc.images.list()\
.AndRaise(clients.novaclient.exceptions.ClientException(500))
self.m.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().AndReturn(self.fc)
self.m.ReplayAll()
self.assertRaises(exception.Error, stack.validate)
self.m.VerifyAll()
def test_validate_unique_logical_name(self):
t = template_format.parse(test_template_unique_logical_name)
template = parser.Template(t)
stack = parser.Stack(self.ctx, 'test_stack', template,
environment.Environment({'AName': 'test',
'KeyName': 'test'}))
self.assertRaises(exception.StackValidationFailed, stack.validate)
| |
'''
By Tim Ehrensberger
The base of the functions for the network's training is taken from https://github.com/Zackory/Keras-MNIST-GAN/blob/master/mnist_gan.py by Zackory Erickson
The network architecture is perhaps loosely inspired by https://github.com/aleju/face-generator by Alexander Jung
'''
import os
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
from keras.layers import Input, BatchNormalization, Activation, MaxPooling2D, AveragePooling2D
from keras.models import Model, Sequential
from keras.layers.core import Reshape, Dense, Dropout, Flatten
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import Convolution2D, UpSampling2D
from keras.datasets import cifar10
from keras.optimizers import Adam
from keras.regularizers import l1_l2
#------
# DATA
#------
from keras import backend as K
K.set_image_dim_ordering('th')
import h5py
# Get hdf5 file
hdf5_file = os.path.join("C:\Daten\Maturaarbeit\celeba_processed", "CelebA_64_data.h5")
with h5py.File(hdf5_file, "r") as hf:
X_train = hf["data"] [()] #[()] makes it read the whole thing
X_train = X_train.astype(np.float32) / 255
#----------------
# HYPERPARAMETERS
#----------------
randomDim = 100
adam = Adam(lr=0.0002, beta_1=0.5)
reg = lambda: l1_l2(l1=1e-7, l2=1e-7)
dropout = 0
#-----------
# Generator
#-----------
h = 5
generator = Sequential()
#In: 100
generator.add(Dense(128 * 16 * 16, input_dim=100, kernel_regularizer=reg()))
generator.add(BatchNormalization())
generator.add(Reshape((128, 16, 16)))
generator.add(LeakyReLU(0.2))
#Out: 128 x 16 x 16
#In: 128 x 16 x 16
generator.add(UpSampling2D(size=(2, 2)))
generator.add(Convolution2D(256, (h, h), padding='same', kernel_regularizer=reg())) #1
generator.add(BatchNormalization(axis=1))
generator.add(LeakyReLU(0.2))
#Out: 256 x 32 x 32
#In: 256 x 32 x 32
generator.add(UpSampling2D(size=(2, 2)))
generator.add(Convolution2D(128, (h, h), padding='same', kernel_regularizer=reg())) #2
generator.add(BatchNormalization(axis=1))
generator.add(LeakyReLU(0.2))
#Out: 128 x 64 x 64
#In: 128 x 64 x 64
generator.add(Convolution2D(3, (h, h), padding='same', kernel_regularizer=reg())) #4
generator.add(Activation('sigmoid'))
#Out: 3 x 64 x 64
generator.compile(loss='binary_crossentropy', optimizer=adam)
#--------------
# Discriminator
#--------------
discriminator = Sequential()
#In: 3 x 64 x 64
discriminator.add(Convolution2D(64, (h, h), padding='same', input_shape=(3, 64, 64), kernel_regularizer=reg()))
discriminator.add(MaxPooling2D(pool_size=(2, 2)))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dropout(dropout))
#Out: 64 x 32 x 32
#In: 64 x 32 x 32
discriminator.add(Convolution2D(128, (h, h), padding='same', kernel_regularizer=reg()))
discriminator.add(MaxPooling2D(pool_size=(2, 2)))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dropout(dropout))
#Out: 128 x 16 x 16
#In: 128 x 16 x 16
discriminator.add(Convolution2D(256, (h, h), padding='same', kernel_regularizer=reg()))
discriminator.add(MaxPooling2D(pool_size=(2, 2)))#Try AveragePooling2D?
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dropout(dropout))
#Out: 256 x 8 x 8
#In: 256 x 8 x 8
discriminator.add(Flatten())
discriminator.add(Dense(512))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dropout(dropout))
discriminator.add(Dense(1))
discriminator.add(Activation('sigmoid'))
#Out: 1 (Probability)
discriminator.compile(loss='binary_crossentropy', optimizer=adam)
#-----
# GAN
#-----
discriminator.trainable = False
ganInput = Input(shape=(randomDim,))
x = generator(ganInput)
ganOutput = discriminator(x)
gan = Model(inputs=ganInput, outputs=ganOutput)
gan.compile(loss='binary_crossentropy', optimizer=adam)
#-----------
# FUNCTIONS
#-----------
dLosses = []
gLosses = []
def plotLoss(epoch):
assertExists('images')
plt.figure(figsize=(10, 8))
plt.plot(dLosses, label='Discriminative loss')
plt.plot(gLosses, label='Generative loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
plt.savefig('images/dcgan_loss_epoch_%d.png' % epoch)
# Create a wall of generated images
def plotGeneratedImages(epoch, examples=100, dim=(10, 10), figsize=(10, 10)):
noise = np.random.normal(0, 1, size=[examples, randomDim])
generatedImages = generator.predict(noise)
generatedImages = generatedImages.transpose(0, 2, 3, 1)
assertExists('images')
plt.figure(figsize=figsize)
for i in range(generatedImages.shape[0]):
plt.subplot(dim[0], dim[1], i+1)
plt.imshow(generatedImages[i, :, :, :], interpolation='nearest')
plt.axis('off')
plt.tight_layout()
plt.savefig('images/dcgan_generated_image_epoch_%d.png' % epoch)
# Save the generator and discriminator networks (and weights) for later use
def savemodels(epoch):
assertExists('models')
generator.save('models/dcgan_generator_epoch_%d.h5' % epoch)
discriminator.save('models/dcgan_discriminator_epoch_%d.h5' % epoch)
def train(epochs=1, batchSize=128, save_interval=1):
batchCount = X_train.shape[0] // batchSize
print('Epochs:', epochs)
print('Batch size:', batchSize)
print('Batches per epoch:', batchCount)
for e in range(1, epochs+1):
print('-'*15, 'Epoch %d' % e, '-'*15)
for _ in tqdm(range(batchCount)):
# Get a random set of input noise and images
noise = np.random.normal(0, 1, size=[batchSize, randomDim])
imageBatch = X_train[np.random.randint(0, X_train.shape[0], size=batchSize)]
# Generate fake images
generatedImages = generator.predict(noise)
X = np.concatenate([imageBatch, generatedImages])
# Labels for generated and real data
yDis = np.zeros(2*batchSize)
# One-sided label smoothing = not exactly 1
yDis[:batchSize] = 0.95
# Train discriminator
discriminator.trainable = True
dloss = discriminator.train_on_batch(X, yDis) # here only D is trained
# Train generator
noise = np.random.normal(0, 1, size=[batchSize, randomDim])
yGen = np.ones(batchSize)
discriminator.trainable = False
gloss = gan.train_on_batch(noise, yGen) # here only G is trained because D is not trainable
# Store loss of most recent batch from this epoch
dLosses.append(dloss)
gLosses.append(gloss)
#plot after every epoch
if (e == 1 or e % save_interval == 0):
plotGeneratedImages(e)
savemodels(e)
# Plot losses from every epoch
plotLoss(e)
def assertExists(path):
if not os.path.exists(path):
os.makedirs(path)
if __name__ == '__main__':
train(100, 16, 1)
| |
'''
Copyright (c) <2012> Tarek Galal <tare2.galal@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR
A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
from Yowsup.connectionmanager import YowsupConnectionManager
import time, datetime, sys
from Yowsup.Media.downloader import MediaDownloader
from Yowsup.Media.uploader import MediaUploader
from sys import stdout
import os
import hashlib
import base64
from PIL import Image
import StringIO
size = 100,100
if sys.version_info >= (3, 0):
raw_input = input
class LocationUploaderClient:
def __init__(self, phoneNumber, name, geolong, geolat, keepAlive = False, sendReceipts = False):
self.sendReceipts = sendReceipts
self.phoneNumber = phoneNumber
self.name = name
self.geolong = geolong
self.geolat = geolat
if '-' in phoneNumber:
self.jid = "%s@g.us" % phoneNumber
else:
self.jid = "%s@s.whatsapp.net" % phoneNumber
self.sentCache = {}
connectionManager = YowsupConnectionManager()
connectionManager.setAutoPong(keepAlive)
self.signalsInterface = connectionManager.getSignalsInterface()
self.methodsInterface = connectionManager.getMethodsInterface()
self.signalsInterface.registerListener("auth_success", self.onAuthSuccess)
self.signalsInterface.registerListener("auth_fail", self.onAuthFailed)
self.signalsInterface.registerListener("message_received", self.onMessageReceived)
self.signalsInterface.registerListener("receipt_messageSent", self.onMessageSent)
self.signalsInterface.registerListener("presence_updated", self.onPresenceUpdated)
self.signalsInterface.registerListener("disconnected", self.onDisconnected)
self.signalsInterface.registerListener("media_uploadRequestSuccess", self.onmedia_uploadRequestSuccess)
self.signalsInterface.registerListener("media_uploadRequestFailed", self.onmedia_uploadRequestFailed)
self.signalsInterface.registerListener("media_uploadRequestDuplicate", self.onmedia_uploadRequestDuplicate)
self.path = ""
self.gotMediaReceipt = False
self.done = False
self.commandMappings = {"lastseen":lambda: self.methodsInterface.call("presence_request", ( self.jid,)),
"available": lambda: self.methodsInterface.call("presence_sendAvailable"),
"unavailable": lambda: self.methodsInterface.call("presence_sendUnavailable")
}
self.done = False
#signalsInterface.registerListener("receipt_messageDelivered", lambda jid, messageId: methodsInterface.call("delivered_ack", (jid, messageId)))
def login(self, username, password):
self.username = username
self.methodsInterface.call("auth_login", (username, password))
#while not self.done:
# time.sleep(0.5)
def onAuthSuccess(self, username):
print("Authed %s" % username)
self.methodsInterface.call("ready")
self.doSendLocation()
def onAuthFailed(self, username, err):
print("Auth Failed!")
def onDisconnected(self, reason):
print("Disconnected because %s" %reason)
def onPresenceUpdated(self, jid, lastSeen):
formattedDate = datetime.datetime.fromtimestamp(long(time.time()) - lastSeen).strftime('%d-%m-%Y %H:%M')
self.onMessageReceived(0, jid, "LAST SEEN RESULT: %s"%formattedDate, long(time.time()), False, None, False)
def onMessageSent(self, jid, messageId):
formattedDate = datetime.datetime.fromtimestamp(self.sentCache[messageId][0]).strftime('%d-%m-%Y %H:%M')
print("%s [%s]:%s"%(self.username, formattedDate, self.sentCache[messageId][1]))
print(self.getPrompt())
def onMessageReceived(self, messageId, jid, messageContent, timestamp, wantsReceipt, pushName, isBroadcast):
if jid[:jid.index('@')] != self.phoneNumber:
return
formattedDate = datetime.datetime.fromtimestamp(timestamp).strftime('%d-%m-%Y %H:%M')
print("%s [%s]:%s"%(jid, formattedDate, messageContent))
if wantsReceipt and self.sendReceipts:
self.methodsInterface.call("message_ack", (jid, messageId))
#print(self.getPrompt())
def goInteractive(self, jid):
print("Starting Interactive chat with %s" % jid)
jid = "%s@s.whatsapp.net" % jid
print(self.getPrompt())
while True:
message = raw_input()
message = message.strip()
if not len(message):
continue
if not self.runCommand(message.strip()):
msgId = self.methodsInterface.call("message_send", (jid, message))
self.sentCache[msgId] = [int(time.time()), message]
self.done = True
def getPrompt(self):
return "Enter Message or command: (/%s)" % ", /".join(self.commandMappings)
def onImageReceived(self, messageId, jid, preview, url, size, wantsReceipt, isBroadcast):
print("Image received: Id:%s Jid:%s Url:%s size:%s" %(messageId, jid, url, size))
downloader = MediaDownloader(self.onDlsuccess, self.onDlerror, self.onDlprogress)
downloader.download(url)
if wantsReceipt and self.sendReceipts:
self.methodsInterface.call("message_ack", (jid, messageId))
timeout = 10
t = 0;
while t < timeout:
time.sleep(0.5)
t+=1
def onDlsuccess(self, path):
stdout.write("\n")
stdout.flush()
print("Image downloded to %s"%path)
print(self.getPrompt())
def onDlerror(self):
stdout.write("\n")
stdout.flush()
print("Download Error")
print(self.getPrompt())
def onDlprogress(self, progress):
stdout.write("\r Progress: %s" % progress)
stdout.flush()
def onmedia_uploadRequestSuccess(self,_hash, url, resumeFrom):
print("Request Succ: hash: %s url: %s resume: %s"%(_hash, url, resumeFrom))
self.uploadImage(url)
self.gotMediaReceipt = True
def onmedia_uploadRequestFailed(self,_hash):
print("Request Fail: hash: %s"%(_hash))
self.gotReceipt = True
def onmedia_uploadRequestDuplicate(self,_hash, url):
print("Request Dublicate: hash: %s url: %s "%(_hash, url))
self.doSendImage(url)
self.gotMediaReceipt = True
def uploadImage(self, url):
uploader = MediaUploader(self.jid, self.username, self.onUploadSuccess, self.onError, self.onProgressUpdated)
uploader.upload(self.path,url)
def onUploadSuccess(self, url):
stdout.write("\n")
stdout.flush()
print("Upload Succ: url: %s "%( url))
self.doSendImage(url)
def onError(self):
stdout.write("\n")
stdout.flush()
print("Upload Fail:")
def onProgressUpdated(self, progress):
stdout.write("\r Progress: %s" % progress)
stdout.flush()
def doSendLocation(self):
print("Sending message_location")
msgId = self.methodsInterface.call("message_locationSend", (self.jid, self.name, self.geolat, self.geolong, None))
self.sentCache[msgId] = [int(time.time()), self.path]
def createThumb(self):
THUMBNAIL_SIZE = 64, 64
thumbnailFile = "thumb.jpg"
im = Image.open(self.path)
im.thumbnail(THUMBNAIL_SIZE, Image.ANTIALIAS)
im.save(thumbnailFile, "JPEG")
with open(thumbnailFile, "rb") as imageFile:
raw = base64.b64encode(imageFile.read())
return raw;
| |
import json
import pytest
from django.core.urlresolvers import reverse
from django.test import TestCase
from apps.core import factories
from apps.core.tests import CoreFixturesTestCase
from apps.explorer.views import DataTableCumulativeView, DataTableSelectionView
from apps.explorer.views.views_selection import GetSearchTermsMixin
class PixelSetSelectionViewTestCase(CoreFixturesTestCase):
def setUp(self):
self.user = factories.PixelerFactory(
is_active=True,
is_staff=True,
is_superuser=True,
)
self.client.login(
username=self.user.username,
password=factories.PIXELER_PASSWORD,
)
self.url = reverse('explorer:pixelset_selection')
def test_redirects_to_list_view_when_invalid(self):
response = self.client.get(self.url)
self.assertRedirects(response, reverse('explorer:pixelset_list'))
def test_displays_message_after_redirect_when_selection_is_empty(self):
response = self.client.get(self.url, follow=True)
self.assertContains(
response,
(
'<div class="message error">'
'Cannot explore an empty selection.'
'</div>'
),
html=True
)
def test_renders_pixelset_selection_template(self):
# select 2 pixel sets
pixel_sets = factories.PixelSetFactory.create_batch(2)
data = {
'pixel_sets': [str(p.id) for p in pixel_sets]
}
self.client.post(
reverse('explorer:pixelset_select'), data, follow=True
)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'explorer/pixelset_selection.html')
self.assertContains(
response,
'<title>Pixel Sets - Your selection</title>'
)
self.assertContains(
response,
'<div class="pixelset-item">',
count=len(pixel_sets)
)
def test_renders_distributions_for_each_pixel_set(self):
# select 2 pixel sets
pixel_sets = factories.PixelSetFactory.create_batch(2)
data = {
'pixel_sets': [str(p.id) for p in pixel_sets]
}
self.client.post(
reverse('explorer:pixelset_select'), data, follow=True
)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
# cumulative distributions
self.assertContains(
response,
f'<div class="histogram" id="values-histogram">'
)
self.assertContains(
response,
f'<div class="histogram" id="scores-histogram">'
)
self.assertContains(
response,
f'<div class="histogram" id="values-histogram-{pixel_sets[0].id}">'
)
self.assertContains(
response,
f'<div class="histogram" id="scores-histogram-{pixel_sets[0].id}">'
)
self.assertContains(
response,
f'<div class="histogram" id="values-histogram-{pixel_sets[1].id}">'
)
self.assertContains(
response,
f'<div class="histogram" id="scores-histogram-{pixel_sets[1].id}">'
)
class DataTableSelectionViewTestCase(TestCase):
def test_get_headers_must_be_implemented(self):
class DataTableSelectionViewWithNoGetHeaders(DataTableSelectionView):
pass
with pytest.raises(NotImplementedError):
view = DataTableSelectionViewWithNoGetHeaders()
view.get_headers()
class PixelSetSelectionValuesViewTestCase(GetSearchTermsMixin,
CoreFixturesTestCase):
def setUp(self):
self.user = factories.PixelerFactory(
is_active=True,
is_staff=True,
is_superuser=True,
)
self.client.login(
username=self.user.username,
password=factories.PIXELER_PASSWORD,
)
self.pixel_set = factories.PixelSetFactory()
self.pixels = factories.PixelFactory.create_batch(
2,
pixel_set=self.pixel_set
)
self.url = reverse(
'explorer:pixelset_selection_values',
kwargs={'pk': str(self.pixel_set.id)}
)
def test_returns_bad_request_when_not_ajax(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 400)
def test_returns_json(self):
response = self.client.get(
self.url,
data={},
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
data = json.loads(response.content)
cols = data['cols']
self.assertEqual(cols[0]['label'], 'id')
self.assertEqual(cols[1]['label'], 'value')
rows = data['rows']
self.assertEqual(len(rows), 2)
def test_filters_by_omics_units(self):
session = self.client.session
# select pixel set, otherwise we cannot set omics units
self.client.post(
reverse('explorer:pixelset_select'),
{'pixel_sets': [self.pixel_set.id]},
follow=True
)
response = self.client.get(self.url)
self.assertIsNone(self.get_search_terms(session, default=None))
selected_pixel = self.pixels[0]
# set search terms in session
response = self.client.post(reverse('explorer:pixelset_selection'), {
'search_terms': selected_pixel.omics_unit.reference.identifier,
}, follow=True)
self.assertRedirects(response, reverse('explorer:pixelset_selection'))
response = self.client.get(
self.url,
data={},
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
data = json.loads(response.content)
cols = data['cols']
self.assertEqual(cols[0]['label'], 'id')
self.assertEqual(cols[1]['label'], 'value')
rows = data['rows']
self.assertEqual(len(rows), 1)
self.assertEqual(rows[0]['c'][0]['v'], str(selected_pixel.id))
self.assertEqual(rows[0]['c'][1]['v'], selected_pixel.value)
def test_filters_by_term_in_description(self):
session = self.client.session
# select pixel set, otherwise we cannot set omics units
self.client.post(
reverse('explorer:pixelset_select'),
{'pixel_sets': [self.pixel_set.id]},
follow=True
)
response = self.client.get(self.url)
self.assertIsNone(self.get_search_terms(session, default=None))
selected_pixel = self.pixels[0]
description = selected_pixel.omics_unit.reference.description
first_words = description.split(' ')[:2]
# set search terms in session
response = self.client.post(reverse('explorer:pixelset_selection'), {
# here, we pass a single term made of two words to hopefully match
# only one omics unit (randomly generated values are... sometimes
# annoying).
'search_terms': ' '.join(first_words),
}, follow=True)
self.assertRedirects(response, reverse('explorer:pixelset_selection'))
response = self.client.get(
self.url,
data={},
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
data = json.loads(response.content)
cols = data['cols']
self.assertEqual(cols[0]['label'], 'id')
self.assertEqual(cols[1]['label'], 'value')
rows = data['rows']
self.assertEqual(len(rows), 1)
self.assertEqual(rows[0]['c'][0]['v'], str(selected_pixel.id))
self.assertEqual(rows[0]['c'][1]['v'], selected_pixel.value)
class PixelSetSelectionQualityScoresViewTestCase(GetSearchTermsMixin,
CoreFixturesTestCase):
def setUp(self):
self.user = factories.PixelerFactory(
is_active=True,
is_staff=True,
is_superuser=True,
)
self.client.login(
username=self.user.username,
password=factories.PIXELER_PASSWORD,
)
self.pixel_set = factories.PixelSetFactory()
self.pixels = factories.PixelFactory.create_batch(
2,
pixel_set=self.pixel_set
)
self.url = reverse(
'explorer:pixelset_selection_quality_scores',
kwargs={'pk': str(self.pixel_set.id)}
)
def test_returns_bad_request_when_not_ajax(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 400)
def test_returns_json(self):
response = self.client.get(
self.url,
data={},
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
data = json.loads(response.content)
cols = data['cols']
self.assertEqual(cols[0]['label'], 'id')
self.assertEqual(cols[1]['label'], 'quality_score')
rows = data['rows']
self.assertEqual(len(rows), 2)
def test_filters_by_omics_units(self):
session = self.client.session
# select pixel set, otherwise we cannot set omics units
self.client.post(
reverse('explorer:pixelset_select'),
{'pixel_sets': [self.pixel_set.id]},
follow=True
)
response = self.client.get(self.url)
self.assertIsNone(self.get_search_terms(session, default=None))
selected_pixel = self.pixels[0]
# set search terms in session
response = self.client.post(reverse('explorer:pixelset_selection'), {
'search_terms': selected_pixel.omics_unit.reference.identifier,
}, follow=True)
self.assertRedirects(response, reverse('explorer:pixelset_selection'))
response = self.client.get(
self.url,
data={},
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
data = json.loads(response.content)
cols = data['cols']
self.assertEqual(cols[0]['label'], 'id')
self.assertEqual(cols[1]['label'], 'quality_score')
rows = data['rows']
self.assertEqual(len(rows), 1)
self.assertEqual(rows[0]['c'][0]['v'], str(selected_pixel.id))
self.assertEqual(rows[0]['c'][1]['v'], selected_pixel.quality_score)
def test_filters_by_term_in_description(self):
session = self.client.session
# select pixel set, otherwise we cannot set omics units
self.client.post(
reverse('explorer:pixelset_select'),
{'pixel_sets': [self.pixel_set.id]},
follow=True
)
response = self.client.get(self.url)
self.assertIsNone(self.get_search_terms(session, default=None))
selected_pixel = self.pixels[0]
description = selected_pixel.omics_unit.reference.description
first_word = description.split(' ')[0]
# set search terms in session
response = self.client.post(reverse('explorer:pixelset_selection'), {
'search_terms': first_word,
}, follow=True)
self.assertRedirects(response, reverse('explorer:pixelset_selection'))
response = self.client.get(
self.url,
data={},
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
data = json.loads(response.content)
cols = data['cols']
self.assertEqual(cols[0]['label'], 'id')
self.assertEqual(cols[1]['label'], 'quality_score')
rows = data['rows']
self.assertEqual(len(rows), 1)
self.assertEqual(rows[0]['c'][0]['v'], str(selected_pixel.id))
self.assertEqual(rows[0]['c'][1]['v'], selected_pixel.quality_score)
class DataTableCumulativeViewTestCase(TestCase):
def test_get_headers_must_be_implemented(self):
class DataTableCumulativeViewWithNoGetHeaders(DataTableCumulativeView):
pass
with pytest.raises(NotImplementedError):
view = DataTableCumulativeViewWithNoGetHeaders()
view.get_headers()
class PixelSetSelectionCumulativeQualityScoresViewTestCase(CoreFixturesTestCase): # noqa
def setUp(self):
self.user = factories.PixelerFactory(
is_active=True,
is_staff=True,
is_superuser=True,
)
self.client.login(
username=self.user.username,
password=factories.PIXELER_PASSWORD,
)
self.pixel_set = factories.PixelSetFactory()
self.pixels = factories.PixelFactory.create_batch(
2,
pixel_set=self.pixel_set
)
self.url = reverse(
'explorer:pixelset_selection_cumulative_quality_scores'
)
def test_returns_bad_request_when_not_ajax(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 400)
def test_returns_json(self):
# select 1 pixel set
data = {
'pixel_sets': [self.pixel_set.id]
}
self.client.post(
reverse('explorer:pixelset_select'), data, follow=True
)
response = self.client.get(
self.url,
data={},
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
data = json.loads(response.content)
cols = data['cols']
self.assertEqual(cols[0]['label'], 'id')
self.assertEqual(cols[1]['label'], 'quality_score')
rows = data['rows']
self.assertEqual(len(rows), 2)
def test_no_selected_pixel_sets_returns_empty(self):
response = self.client.get(
self.url,
data={},
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
data = json.loads(response.content)
cols = data['cols']
self.assertEqual(cols[0]['label'], 'id')
self.assertEqual(cols[1]['label'], 'quality_score')
rows = data['rows']
self.assertEqual(len(rows), 0)
class PixelSetSelectionCumulativeValuesViewTestCase(CoreFixturesTestCase):
def setUp(self):
self.user = factories.PixelerFactory(
is_active=True,
is_staff=True,
is_superuser=True,
)
self.client.login(
username=self.user.username,
password=factories.PIXELER_PASSWORD,
)
self.pixel_set = factories.PixelSetFactory()
self.pixels = factories.PixelFactory.create_batch(
2,
pixel_set=self.pixel_set
)
self.url = reverse('explorer:pixelset_selection_cumulative_values')
def test_returns_bad_request_when_not_ajax(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 400)
def test_returns_json(self):
# select 1 pixel set
data = {
'pixel_sets': [self.pixel_set.id]
}
self.client.post(
reverse('explorer:pixelset_select'), data, follow=True
)
response = self.client.get(
self.url,
data={},
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
data = json.loads(response.content)
cols = data['cols']
self.assertEqual(cols[0]['label'], 'id')
self.assertEqual(cols[1]['label'], 'value')
rows = data['rows']
self.assertEqual(len(rows), 2)
def test_no_selected_pixel_sets_returns_empty(self):
response = self.client.get(
self.url,
data={},
HTTP_X_REQUESTED_WITH='XMLHttpRequest'
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
data = json.loads(response.content)
cols = data['cols']
self.assertEqual(cols[0]['label'], 'id')
self.assertEqual(cols[1]['label'], 'value')
rows = data['rows']
self.assertEqual(len(rows), 0)
class PixelSetSelectionClearViewTestCase(GetSearchTermsMixin,
CoreFixturesTestCase):
def setUp(self):
self.user = factories.PixelerFactory(
is_active=True,
is_staff=True,
is_superuser=True,
)
self.client.login(
username=self.user.username,
password=factories.PIXELER_PASSWORD,
)
self.url = reverse('explorer:pixelset_selection_clear')
def test_clear_search_terms_in_session(self):
# select 2 pixel sets
pixel_sets = factories.PixelSetFactory.create_batch(2)
data = {
'pixel_sets': [str(p.id) for p in pixel_sets]
}
self.client.post(
reverse('explorer:pixelset_select'), data, follow=True
)
search_terms = ['a-omics-unit']
response = self.client.post(reverse('explorer:pixelset_selection'), {
'search_terms': search_terms,
}, follow=True)
self.assertEqual(
self.get_search_terms(self.client.session, default=None),
search_terms
)
# no let's clear the search terms
response = self.client.post(self.url)
self.assertRedirects(response, reverse('explorer:pixelset_selection'))
self.assertEqual(
self.get_search_terms(self.client.session, default=None),
[]
)
| |
import numpy as np
import logging
import pytest
from brainiak.isc import (isc, isfc, bootstrap_isc, permutation_isc,
squareform_isfc, timeshift_isc,
phaseshift_isc)
from scipy.spatial.distance import squareform
logger = logging.getLogger(__name__)
# Create simple simulated data with high intersubject correlation
def simulated_timeseries(n_subjects, n_TRs, n_voxels=30,
noise=1, data_type='array',
random_state=None):
prng = np.random.RandomState(random_state)
if n_voxels:
signal = prng.randn(n_TRs, n_voxels)
prng = np.random.RandomState(prng.randint(0, 2**32 - 1))
data = [signal + prng.randn(n_TRs, n_voxels) * noise
for subject in np.arange(n_subjects)]
elif not n_voxels:
signal = prng.randn(n_TRs)
prng = np.random.RandomState(prng.randint(0, 2**32 - 1))
data = [signal + prng.randn(n_TRs) * noise
for subject in np.arange(n_subjects)]
if data_type == 'array':
if n_voxels:
data = np.dstack(data)
elif not n_voxels:
data = np.column_stack(data)
return data
# Create 3 voxel simulated data with correlated time series
def correlated_timeseries(n_subjects, n_TRs, noise=0,
random_state=None):
prng = np.random.RandomState(random_state)
signal = prng.randn(n_TRs)
correlated = True
while correlated:
uncorrelated = np.random.randn(n_TRs,
n_subjects)[:, np.newaxis, :]
unc_max = np.amax(squareform(np.corrcoef(
uncorrelated[:, 0, :].T), checks=False))
unc_mean = np.mean(squareform(np.corrcoef(
uncorrelated[:, 0, :].T), checks=False))
if unc_max < .3 and np.abs(unc_mean) < .001:
correlated = False
data = np.repeat(np.column_stack((signal, signal))[..., np.newaxis],
20, axis=2)
data = np.concatenate((data, uncorrelated), axis=1)
data = data + np.random.randn(n_TRs, 3, n_subjects) * noise
return data
# Compute ISCs using different input types
# List of subjects with one voxel/ROI
def test_isc_input():
# Set parameters for toy time series data
n_subjects = 20
n_TRs = 60
n_voxels = 30
random_state = 42
logger.info("Testing ISC inputs")
data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=None, data_type='list',
random_state=random_state)
iscs_list = isc(data, pairwise=False, summary_statistic=None)
# Array of subjects with one voxel/ROI
data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=None, data_type='array',
random_state=random_state)
iscs_array = isc(data, pairwise=False, summary_statistic=None)
# Check they're the same
assert np.array_equal(iscs_list, iscs_array)
# List of subjects with multiple voxels/ROIs
data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=n_voxels, data_type='list',
random_state=random_state)
iscs_list = isc(data, pairwise=False, summary_statistic=None)
# Array of subjects with multiple voxels/ROIs
data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=n_voxels, data_type='array',
random_state=random_state)
iscs_array = isc(data, pairwise=False, summary_statistic=None)
# Check they're the same
assert np.array_equal(iscs_list, iscs_array)
logger.info("Finished testing ISC inputs")
# Check pairwise and leave-one-out, and summary statistics for ISC
def test_isc_options():
# Set parameters for toy time series data
n_subjects = 20
n_TRs = 60
n_voxels = 30
random_state = 42
logger.info("Testing ISC options")
data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=n_voxels, data_type='array',
random_state=random_state)
iscs_loo = isc(data, pairwise=False, summary_statistic=None)
assert iscs_loo.shape == (n_subjects, n_voxels)
# Just two subjects
iscs_loo = isc(data[..., :2], pairwise=False, summary_statistic=None)
assert iscs_loo.shape == (n_voxels,)
iscs_pw = isc(data, pairwise=True, summary_statistic=None)
assert iscs_pw.shape == (n_subjects*(n_subjects-1)/2, n_voxels)
# Check summary statistics
isc_mean = isc(data, pairwise=False, summary_statistic='mean')
assert isc_mean.shape == (n_voxels,)
isc_median = isc(data, pairwise=False, summary_statistic='median')
assert isc_median.shape == (n_voxels,)
with pytest.raises(ValueError):
isc(data, pairwise=False, summary_statistic='min')
logger.info("Finished testing ISC options")
# Make sure ISC recovers correlations of 1 and less than 1
def test_isc_output():
logger.info("Testing ISC outputs")
data = correlated_timeseries(20, 60, noise=0,
random_state=42)
iscs = isc(data, pairwise=False)
assert np.allclose(iscs[:, :2], 1., rtol=1e-05)
assert np.all(iscs[:, -1] < 1.)
iscs = isc(data, pairwise=True)
assert np.allclose(iscs[:, :2], 1., rtol=1e-05)
assert np.all(iscs[:, -1] < 1.)
logger.info("Finished testing ISC outputs")
# Check for proper handling of NaNs in ISC
def test_isc_nans():
# Set parameters for toy time series data
n_subjects = 20
n_TRs = 60
n_voxels = 30
random_state = 42
logger.info("Testing ISC options")
data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=n_voxels, data_type='array',
random_state=random_state)
# Inject NaNs into data
data[0, 0, 0] = np.nan
# Don't tolerate NaNs, should lose zeroeth voxel
iscs_loo = isc(data, pairwise=False, tolerate_nans=False)
assert np.sum(np.isnan(iscs_loo)) == n_subjects
# Tolerate all NaNs, only subject with NaNs yields NaN
iscs_loo = isc(data, pairwise=False, tolerate_nans=True)
assert np.sum(np.isnan(iscs_loo)) == 1
# Pairwise approach shouldn't care
iscs_pw_T = isc(data, pairwise=True, tolerate_nans=True)
iscs_pw_F = isc(data, pairwise=True, tolerate_nans=False)
assert np.allclose(iscs_pw_T, iscs_pw_F, equal_nan=True)
assert (np.sum(np.isnan(iscs_pw_T)) ==
np.sum(np.isnan(iscs_pw_F)) ==
n_subjects - 1)
# Set proportion of nans to reject (70% and 90% non-NaN)
data[0, 0, :] = np.nan
data[0, 1, :n_subjects - int(n_subjects * .7)] = np.nan
data[0, 2, :n_subjects - int(n_subjects * .9)] = np.nan
iscs_loo_T = isc(data, pairwise=False, tolerate_nans=True)
iscs_loo_F = isc(data, pairwise=False, tolerate_nans=False)
iscs_loo_95 = isc(data, pairwise=False, tolerate_nans=.95)
iscs_loo_90 = isc(data, pairwise=False, tolerate_nans=.90)
iscs_loo_80 = isc(data, pairwise=False, tolerate_nans=.8)
iscs_loo_70 = isc(data, pairwise=False, tolerate_nans=.7)
iscs_loo_60 = isc(data, pairwise=False, tolerate_nans=.6)
assert (np.sum(np.isnan(iscs_loo_F)) ==
np.sum(np.isnan(iscs_loo_95)) == 60)
assert (np.sum(np.isnan(iscs_loo_80)) ==
np.sum(np.isnan(iscs_loo_90)) == 42)
assert (np.sum(np.isnan(iscs_loo_T)) ==
np.sum(np.isnan(iscs_loo_60)) ==
np.sum(np.isnan(iscs_loo_70)) == 28)
assert np.array_equal(np.sum(np.isnan(iscs_loo_F), axis=0),
np.sum(np.isnan(iscs_loo_95), axis=0))
assert np.array_equal(np.sum(np.isnan(iscs_loo_80), axis=0),
np.sum(np.isnan(iscs_loo_90), axis=0))
assert np.all((np.array_equal(
np.sum(np.isnan(iscs_loo_T), axis=0),
np.sum(np.isnan(iscs_loo_60), axis=0)),
np.array_equal(
np.sum(np.isnan(iscs_loo_T), axis=0),
np.sum(np.isnan(iscs_loo_70), axis=0)),
np.array_equal(
np.sum(np.isnan(iscs_loo_60), axis=0),
np.sum(np.isnan(iscs_loo_70), axis=0))))
data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=n_voxels, data_type='array',
random_state=random_state)
# Make sure voxel with NaNs across all subjects is always removed
data[0, 0, :] = np.nan
iscs_loo_T = isc(data, pairwise=False, tolerate_nans=True)
iscs_loo_F = isc(data, pairwise=False, tolerate_nans=False)
assert np.allclose(iscs_loo_T, iscs_loo_F, equal_nan=True)
assert (np.sum(np.isnan(iscs_loo_T)) ==
np.sum(np.isnan(iscs_loo_F)) ==
n_subjects)
iscs_pw_T = isc(data, pairwise=True, tolerate_nans=True)
iscs_pw_F = isc(data, pairwise=True, tolerate_nans=False)
assert np.allclose(iscs_pw_T, iscs_pw_F, equal_nan=True)
assert (np.sum(np.isnan(iscs_pw_T)) ==
np.sum(np.isnan(iscs_pw_F)) ==
n_subjects * (n_subjects - 1) / 2)
# Test one-sample bootstrap test
def test_bootstrap_isc():
# Set parameters for toy time series data
n_subjects = 20
n_TRs = 60
n_voxels = 30
random_state = 42
n_bootstraps = 10
logger.info("Testing bootstrap hypothesis test")
data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=n_voxels, data_type='array',
random_state=random_state)
iscs = isc(data, pairwise=False, summary_statistic=None)
observed, ci, p, distribution = bootstrap_isc(iscs, pairwise=False,
summary_statistic='median',
n_bootstraps=n_bootstraps,
ci_percentile=95)
assert distribution.shape == (n_bootstraps, n_voxels)
# Test one-sample bootstrap test with pairwise approach
n_bootstraps = 10
iscs = isc(data, pairwise=True, summary_statistic=None)
observed, ci, p, distribution = bootstrap_isc(iscs, pairwise=True,
summary_statistic='median',
n_bootstraps=n_bootstraps,
ci_percentile=95)
assert distribution.shape == (n_bootstraps, n_voxels)
# Check random seeds
iscs = isc(data, pairwise=False, summary_statistic=None)
distributions = []
for random_state in [42, 42, None]:
observed, ci, p, distribution = bootstrap_isc(
iscs, pairwise=False,
summary_statistic='median',
n_bootstraps=n_bootstraps,
ci_percentile=95,
random_state=random_state)
distributions.append(distribution)
assert np.array_equal(distributions[0], distributions[1])
assert not np.array_equal(distributions[1], distributions[2])
# Check output p-values
data = correlated_timeseries(20, 60, noise=.5,
random_state=42)
iscs = isc(data, pairwise=False)
observed, ci, p, distribution = bootstrap_isc(iscs, pairwise=False)
assert np.all(iscs[:, :2] > .5)
assert np.all(iscs[:, -1] < .5)
assert p[0] < .05 and p[1] < .05
assert p[2] > .01
iscs = isc(data, pairwise=True)
observed, ci, p, distribution = bootstrap_isc(iscs, pairwise=True)
assert np.all(iscs[:, :2] > .5)
assert np.all(iscs[:, -1] < .5)
assert p[0] < .05 and p[1] < .05
assert p[2] > .01
# Check that ISC computation and bootstrap observed are same
iscs = isc(data, pairwise=False)
observed, ci, p, distribution = bootstrap_isc(iscs, pairwise=False,
summary_statistic='median')
assert np.array_equal(observed, isc(data, pairwise=False,
summary_statistic='median'))
# Check that ISC computation and bootstrap observed are same
iscs = isc(data, pairwise=True)
observed, ci, p, distribution = bootstrap_isc(iscs, pairwise=True,
summary_statistic='median')
assert np.array_equal(observed, isc(data, pairwise=True,
summary_statistic='median'))
logger.info("Finished testing bootstrap hypothesis test")
# Test permutation test with group assignments
def test_permutation_isc():
# Set parameters for toy time series data
n_subjects = 20
n_TRs = 60
n_voxels = 30
random_state = 42
group_assignment = [1] * 10 + [2] * 10
logger.info("Testing permutation test")
# Create dataset with two groups in pairwise approach
data = np.dstack((simulated_timeseries(10, n_TRs, n_voxels=n_voxels,
noise=1, data_type='array',
random_state=3),
simulated_timeseries(10, n_TRs, n_voxels=n_voxels,
noise=5, data_type='array',
random_state=4)))
iscs = isc(data, pairwise=True, summary_statistic=None)
observed, p, distribution = permutation_isc(
iscs,
group_assignment=group_assignment,
pairwise=True,
summary_statistic='mean',
n_permutations=200)
# Create data with two groups in leave-one-out approach
data_1 = simulated_timeseries(10, n_TRs, n_voxels=n_voxels,
noise=1, data_type='array',
random_state=3)
data_2 = simulated_timeseries(10, n_TRs, n_voxels=n_voxels,
noise=10, data_type='array',
random_state=4)
iscs = np.vstack((isc(data_1, pairwise=False, summary_statistic=None),
isc(data_2, pairwise=False, summary_statistic=None)))
observed, p, distribution = permutation_isc(
iscs,
group_assignment=group_assignment,
pairwise=False,
summary_statistic='mean',
n_permutations=200)
# One-sample leave-one-out permutation test
data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=n_voxels, data_type='array',
random_state=random_state)
iscs = isc(data, pairwise=False, summary_statistic=None)
observed, p, distribution = permutation_isc(iscs,
pairwise=False,
summary_statistic='median',
n_permutations=200)
# One-sample pairwise permutation test
iscs = isc(data, pairwise=True, summary_statistic=None)
observed, p, distribution = permutation_isc(iscs,
pairwise=True,
summary_statistic='median',
n_permutations=200)
# Small one-sample pairwise exact test
data = simulated_timeseries(12, n_TRs,
n_voxels=n_voxels, data_type='array',
random_state=random_state)
iscs = isc(data, pairwise=False, summary_statistic=None)
observed, p, distribution = permutation_isc(iscs, pairwise=False,
summary_statistic='median',
n_permutations=10000)
# Small two-sample pairwise exact test (and unequal groups)
data = np.dstack((simulated_timeseries(3, n_TRs, n_voxels=n_voxels,
noise=1, data_type='array',
random_state=3),
simulated_timeseries(4, n_TRs, n_voxels=n_voxels,
noise=50, data_type='array',
random_state=4)))
iscs = isc(data, pairwise=True, summary_statistic=None)
group_assignment = [1, 1, 1, 2, 2, 2, 2]
observed, p, distribution = permutation_isc(
iscs,
group_assignment=group_assignment,
pairwise=True,
summary_statistic='mean',
n_permutations=10000)
# Small two-sample leave-one-out exact test (and unequal groups)
data_1 = simulated_timeseries(3, n_TRs, n_voxels=n_voxels,
noise=1, data_type='array',
random_state=3)
data_2 = simulated_timeseries(4, n_TRs, n_voxels=n_voxels,
noise=50, data_type='array',
random_state=4)
iscs = np.vstack((isc(data_1, pairwise=False, summary_statistic=None),
isc(data_2, pairwise=False, summary_statistic=None)))
group_assignment = [1, 1, 1, 2, 2, 2, 2]
observed, p, distribution = permutation_isc(
iscs,
group_assignment=group_assignment,
pairwise=False,
summary_statistic='mean',
n_permutations=10000)
# Check output p-values
data = correlated_timeseries(20, 60, noise=.5,
random_state=42)
iscs = isc(data, pairwise=False)
observed, p, distribution = permutation_isc(iscs, pairwise=False)
assert np.all(iscs[:, :2] > .5)
assert np.all(iscs[:, -1] < .5)
assert p[0] < .05 and p[1] < .05
assert p[2] > .01
iscs = isc(data, pairwise=True)
observed, p, distribution = permutation_isc(iscs, pairwise=True)
assert np.all(iscs[:, :2] > .5)
assert np.all(iscs[:, -1] < .5)
assert p[0] < .05 and p[1] < .05
assert p[2] > .01
# Check that ISC computation and permutation observed are same
iscs = isc(data, pairwise=False)
observed, p, distribution = permutation_isc(iscs, pairwise=False,
summary_statistic='median')
assert np.allclose(observed, isc(data, pairwise=False,
summary_statistic='median'),
rtol=1e-03)
# Check that ISC computation and permuation observed are same
iscs = isc(data, pairwise=True)
observed, p, distribution = permutation_isc(iscs, pairwise=True,
summary_statistic='mean')
assert np.allclose(observed, isc(data, pairwise=True,
summary_statistic='mean'),
rtol=1e-03)
logger.info("Finished testing permutaton test")
def test_timeshift_isc():
# Set parameters for toy time series data
n_subjects = 20
n_TRs = 60
n_voxels = 30
logger.info("Testing circular time-shift")
# Circular time-shift on one sample, leave-one-out
data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=n_voxels, data_type='array')
observed, p, distribution = timeshift_isc(data, pairwise=False,
summary_statistic='median',
n_shifts=200)
# Circular time-shift on one sample, pairwise
data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=n_voxels, data_type='array')
observed, p, distribution = timeshift_isc(data, pairwise=True,
summary_statistic='median',
n_shifts=200)
# Circular time-shift on one sample, leave-one-out
data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=n_voxels, data_type='array')
observed, p, distribution = timeshift_isc(data, pairwise=False,
summary_statistic='mean',
n_shifts=200)
# Check output p-values
data = correlated_timeseries(20, 60, noise=.5,
random_state=42)
iscs = isc(data, pairwise=False)
observed, p, distribution = timeshift_isc(data, pairwise=False)
assert np.all(iscs[:, :2] > .5)
assert np.all(iscs[:, -1] < .5)
assert p[0] < .05 and p[1] < .05
assert p[2] > .01
iscs = isc(data, pairwise=True)
observed, p, distribution = timeshift_isc(data, pairwise=True)
assert np.all(iscs[:, :2] > .5)
assert np.all(iscs[:, -1] < .5)
assert p[0] < .05 and p[1] < .05
assert p[2] > .01
# Check that ISC computation and permutation observed are same
iscs = isc(data, pairwise=False)
observed, p, distribution = timeshift_isc(data, pairwise=False,
summary_statistic='median')
assert np.allclose(observed, isc(data, pairwise=False,
summary_statistic='median'),
rtol=1e-03)
# Check that ISC computation and permuation observed are same
iscs = isc(data, pairwise=True)
observed, p, distribution = timeshift_isc(data, pairwise=True,
summary_statistic='mean')
assert np.allclose(observed, isc(data, pairwise=True,
summary_statistic='mean'),
rtol=1e-03)
logger.info("Finished testing circular time-shift")
# Phase randomization test
def test_phaseshift_isc():
# Set parameters for toy time series data
n_subjects = 20
n_TRs = 60
n_voxels = 30
logger.info("Testing phase randomization")
data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=n_voxels, data_type='array')
observed, p, distribution = phaseshift_isc(data, pairwise=True,
summary_statistic='median',
n_shifts=200)
# Phase randomization one-sample test, leave-one-out
data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=n_voxels, data_type='array')
observed, p, distribution = phaseshift_isc(data, pairwise=False,
summary_statistic='mean',
n_shifts=200)
# Check output p-values
data = correlated_timeseries(20, 60, noise=.5,
random_state=42)
iscs = isc(data, pairwise=False)
observed, p, distribution = phaseshift_isc(data, pairwise=False)
assert np.all(iscs[:, :2] > .5)
assert np.all(iscs[:, -1] < .5)
assert p[0] < .05 and p[1] < .05
assert p[2] > .01
iscs = isc(data, pairwise=True)
observed, p, distribution = phaseshift_isc(data, pairwise=True)
assert np.all(iscs[:, :2] > .5)
assert np.all(iscs[:, -1] < .5)
assert p[0] < .05 and p[1] < .05
assert p[2] > .01
# Check that ISC computation and permutation observed are same
iscs = isc(data, pairwise=False)
observed, p, distribution = phaseshift_isc(data, pairwise=False,
summary_statistic='median')
assert np.allclose(observed, isc(data, pairwise=False,
summary_statistic='median'),
rtol=1e-03)
# Check that ISC computation and permuation observed are same
iscs = isc(data, pairwise=True)
observed, p, distribution = phaseshift_isc(data, pairwise=True,
summary_statistic='mean')
assert np.allclose(observed, isc(data, pairwise=True,
summary_statistic='mean'),
rtol=1e-03)
logger.info("Finished testing phase randomization")
# Test ISFC
def test_isfc_options():
# Set parameters for toy time series data
n_subjects = 20
n_TRs = 60
n_voxels = 30
logger.info("Testing ISFC options")
data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=n_voxels, data_type='array')
isfcs, iscs = isfc(data, pairwise=False, summary_statistic=None)
assert isfcs.shape == (n_subjects, n_voxels * (n_voxels - 1) / 2)
assert iscs.shape == (n_subjects, n_voxels)
# Without vectorized upper triangle
isfcs = isfc(data, pairwise=False, summary_statistic=None,
vectorize_isfcs=False)
assert isfcs.shape == (n_subjects, n_voxels, n_voxels)
# Just two subjects
isfcs, iscs = isfc(data[..., :2], pairwise=False, summary_statistic=None)
assert isfcs.shape == (n_voxels * (n_voxels - 1) / 2,)
assert iscs.shape == (n_voxels,)
isfcs = isfc(data[..., :2], pairwise=False, summary_statistic=None,
vectorize_isfcs=False)
assert isfcs.shape == (n_voxels, n_voxels)
# ISFC with pairwise approach
isfcs, iscs = isfc(data, pairwise=True, summary_statistic=None)
assert isfcs.shape == (n_subjects * (n_subjects - 1) / 2,
n_voxels * (n_voxels - 1) / 2)
assert iscs.shape == (n_subjects * (n_subjects - 1) / 2,
n_voxels)
isfcs = isfc(data, pairwise=True, summary_statistic=None,
vectorize_isfcs=False)
assert isfcs.shape == (n_subjects * (n_subjects - 1) / 2,
n_voxels, n_voxels)
# ISFC with summary statistics
isfcs, iscs = isfc(data, pairwise=True, summary_statistic='mean')
isfcs, iscs = isfc(data, pairwise=True, summary_statistic='median')
# Check output p-values
data = correlated_timeseries(20, 60, noise=.5,
random_state=42)
isfcs = isfc(data, pairwise=False, vectorize_isfcs=False)
assert np.all(isfcs[:, 0, 1] > .5) and np.all(isfcs[:, 1, 0] > .5)
assert np.all(isfcs[:, :2, 2] < .5) and np.all(isfcs[:, 2, :2] < .5)
isfcs = isfc(data, pairwise=True, vectorize_isfcs=False)
assert np.all(isfcs[:, 0, 1] > .5) and np.all(isfcs[:, 1, 0] > .5)
assert np.all(isfcs[:, :2, 2] < .5) and np.all(isfcs[:, 2, :2] < .5)
# Check that ISC and ISFC diagonal are identical
iscs = isc(data, pairwise=False)
isfcs = isfc(data, pairwise=False, vectorize_isfcs=False)
for s in np.arange(len(iscs)):
assert np.allclose(isfcs[s, ...].diagonal(), iscs[s, :], rtol=1e-03)
isfcs, iscs_v = isfc(data, pairwise=False)
assert np.allclose(iscs, iscs_v, rtol=1e-03)
# Check that ISC and ISFC diagonal are identical (pairwise)
iscs = isc(data, pairwise=True)
isfcs = isfc(data, pairwise=True, vectorize_isfcs=False)
for s in np.arange(len(iscs)):
assert np.allclose(isfcs[s, ...].diagonal(), iscs[s, :], rtol=1e-03)
isfcs, iscs_v = isfc(data, pairwise=True)
assert np.allclose(iscs, iscs_v, rtol=1e-03)
# Generate 'targets' data and use for ISFC
data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=n_voxels, data_type='array')
n_targets = 15
targets_data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=n_targets,
data_type='array')
isfcs = isfc(data, targets=targets_data, pairwise=False,
vectorize_isfcs=False)
assert isfcs.shape == (n_subjects, n_voxels, n_targets)
# Ensure 'square' output enforced
isfcs = isfc(data, targets=targets_data, pairwise=False,
vectorize_isfcs=True)
assert isfcs.shape == (n_subjects, n_voxels, n_targets)
# Check list input for targets
targets_data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=n_targets,
data_type='list')
isfcs = isfc(data, targets=targets_data, pairwise=False,
vectorize_isfcs=False)
assert isfcs.shape == (n_subjects, n_voxels, n_targets)
# Check that mismatching subjects / TRs breaks targets
targets_data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=n_targets,
data_type='array')
with pytest.raises(ValueError):
isfcs = isfc(data, targets=targets_data[..., :-1],
pairwise=False, vectorize_isfcs=False)
assert isfcs.shape == (n_subjects, n_voxels, n_targets)
with pytest.raises(ValueError):
isfcs = isfc(data, targets=targets_data[:-1, ...],
pairwise=False, vectorize_isfcs=False)
# Check targets for only 2 subjects
isfcs = isfc(data[..., :2], targets=targets_data[..., :2],
pairwise=False, summary_statistic=None)
assert isfcs.shape == (2, n_voxels, n_targets)
isfcs = isfc(data[..., :2], targets=targets_data[..., :2],
pairwise=True, summary_statistic=None)
assert isfcs.shape == (2, n_voxels, n_targets)
# Check that supplying targets enforces leave-one-out
isfcs_pw = isfc(data, targets=targets_data, pairwise=True,
vectorize_isfcs=False, tolerate_nans=False)
assert isfcs_pw.shape == (n_subjects, n_voxels, n_targets)
logger.info("Finished testing ISFC options")
# Check for proper handling of NaNs in ISFC
def test_isfc_nans():
# Set parameters for toy time series data
n_subjects = 20
n_TRs = 60
n_voxels = 30
random_state = 42
logger.info("Testing ISC options")
data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=n_voxels, data_type='array',
random_state=random_state)
# Inject NaNs into data
data[0, 0, 0] = np.nan
# Don't tolerate NaNs, should lose zeroeth voxel
isfcs_loo = isfc(data, pairwise=False, vectorize_isfcs=False,
tolerate_nans=False)
assert np.sum(np.isnan(isfcs_loo)) == n_subjects * (n_voxels * 2 - 1)
# With vectorized ISFCs
isfcs_loo, iscs_loo = isfc(data, pairwise=False, vectorize_isfcs=True,
tolerate_nans=False)
assert np.sum(np.isnan(isfcs_loo)) == n_subjects * (n_voxels - 1)
# Tolerate all NaNs, only subject with NaNs yields NaN
isfcs_loo = isfc(data, pairwise=False, vectorize_isfcs=False,
tolerate_nans=True)
assert np.sum(np.isnan(isfcs_loo)) == n_voxels * 2 - 1
isfcs_loo, iscs_loo = isfc(data, pairwise=False, vectorize_isfcs=True,
tolerate_nans=True)
assert np.sum(np.isnan(isfcs_loo)) == n_voxels - 1
# Pairwise approach shouldn't care
isfcs_pw_T = isfc(data, pairwise=True, vectorize_isfcs=False,
tolerate_nans=True)
isfcs_pw_F = isfc(data, pairwise=True, vectorize_isfcs=False,
tolerate_nans=False)
assert np.allclose(isfcs_pw_T, isfcs_pw_F, equal_nan=True)
assert (np.sum(np.isnan(isfcs_pw_T)) ==
np.sum(np.isnan(isfcs_pw_F)) ==
(n_voxels * 2 - 1) * (n_subjects - 1))
isfcs_pw_T, iscs_pw_T = isfc(data, pairwise=True, vectorize_isfcs=True,
tolerate_nans=True)
isfcs_pw_F, iscs_pw_T = isfc(data, pairwise=True, vectorize_isfcs=True,
tolerate_nans=False)
assert np.allclose(isfcs_pw_T, isfcs_pw_F, equal_nan=True)
assert (np.sum(np.isnan(isfcs_pw_T)) ==
np.sum(np.isnan(isfcs_pw_F)) ==
(n_voxels - 1) * (n_subjects - 1))
# Set proportion of nans to reject (70% and 90% non-NaN)
data[0, 0, :] = np.nan
data[0, 1, :n_subjects - int(n_subjects * .7)] = np.nan
data[0, 2, :n_subjects - int(n_subjects * .9)] = np.nan
isfcs_loo_T = isfc(data, pairwise=False, vectorize_isfcs=False,
tolerate_nans=True)
isfcs_loo_F = isfc(data, pairwise=False, vectorize_isfcs=False,
tolerate_nans=False)
isfcs_loo_95 = isfc(data, pairwise=False, vectorize_isfcs=False,
tolerate_nans=.95)
isfcs_loo_90 = isfc(data, pairwise=False, vectorize_isfcs=False,
tolerate_nans=.90)
isfcs_loo_80 = isfc(data, pairwise=False, vectorize_isfcs=False,
tolerate_nans=.8)
isfcs_loo_70 = isfc(data, pairwise=False, vectorize_isfcs=False,
tolerate_nans=.7)
isfcs_loo_60 = isfc(data, pairwise=False, vectorize_isfcs=False,
tolerate_nans=.6)
assert (np.sum(np.isnan(isfcs_loo_F)) ==
np.sum(np.isnan(isfcs_loo_95)) == 3420)
assert (np.sum(np.isnan(isfcs_loo_80)) ==
np.sum(np.isnan(isfcs_loo_90)) == 2430)
assert (np.sum(np.isnan(isfcs_loo_T)) ==
np.sum(np.isnan(isfcs_loo_60)) ==
np.sum(np.isnan(isfcs_loo_70)) == 1632)
assert np.array_equal(np.sum(np.isnan(isfcs_loo_F), axis=0),
np.sum(np.isnan(isfcs_loo_95), axis=0))
assert np.array_equal(np.sum(np.isnan(isfcs_loo_80), axis=0),
np.sum(np.isnan(isfcs_loo_90), axis=0))
assert np.all((np.array_equal(
np.sum(np.isnan(isfcs_loo_T), axis=0),
np.sum(np.isnan(isfcs_loo_60), axis=0)),
np.array_equal(
np.sum(np.isnan(isfcs_loo_T), axis=0),
np.sum(np.isnan(isfcs_loo_70), axis=0)),
np.array_equal(
np.sum(np.isnan(isfcs_loo_60), axis=0),
np.sum(np.isnan(isfcs_loo_70), axis=0))))
isfcs_loo_T, _ = isfc(data, pairwise=False, vectorize_isfcs=True,
tolerate_nans=True)
isfcs_loo_F, _ = isfc(data, pairwise=False, vectorize_isfcs=True,
tolerate_nans=False)
isfcs_loo_95, _ = isfc(data, pairwise=False, vectorize_isfcs=True,
tolerate_nans=.95)
isfcs_loo_90, _ = isfc(data, pairwise=False, vectorize_isfcs=True,
tolerate_nans=.90)
isfcs_loo_80, _ = isfc(data, pairwise=False, vectorize_isfcs=True,
tolerate_nans=.8)
isfcs_loo_70, _ = isfc(data, pairwise=False, vectorize_isfcs=True,
tolerate_nans=.7)
isfcs_loo_60, _ = isfc(data, pairwise=False, vectorize_isfcs=True,
tolerate_nans=.6)
assert (np.sum(np.isnan(isfcs_loo_F)) ==
np.sum(np.isnan(isfcs_loo_95)) == 1680)
assert (np.sum(np.isnan(isfcs_loo_80)) ==
np.sum(np.isnan(isfcs_loo_90)) == 1194)
assert (np.sum(np.isnan(isfcs_loo_T)) ==
np.sum(np.isnan(isfcs_loo_60)) ==
np.sum(np.isnan(isfcs_loo_70)) == 802)
assert np.array_equal(np.sum(np.isnan(isfcs_loo_F), axis=0),
np.sum(np.isnan(isfcs_loo_95), axis=0))
assert np.array_equal(np.sum(np.isnan(isfcs_loo_80), axis=0),
np.sum(np.isnan(isfcs_loo_90), axis=0))
assert np.all((np.array_equal(
np.sum(np.isnan(isfcs_loo_T), axis=0),
np.sum(np.isnan(isfcs_loo_60), axis=0)),
np.array_equal(
np.sum(np.isnan(isfcs_loo_T), axis=0),
np.sum(np.isnan(isfcs_loo_70), axis=0)),
np.array_equal(
np.sum(np.isnan(isfcs_loo_60), axis=0),
np.sum(np.isnan(isfcs_loo_70), axis=0))))
data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=n_voxels, data_type='array',
random_state=random_state)
# Make sure voxel with NaNs across all subjects is always removed
data[0, 0, :] = np.nan
isfcs_loo_T = isfc(data, pairwise=False, vectorize_isfcs=False,
tolerate_nans=True)
isfcs_loo_F = isfc(data, pairwise=False, vectorize_isfcs=False,
tolerate_nans=False)
assert np.allclose(isfcs_loo_T, isfcs_loo_F, equal_nan=True)
assert (np.sum(np.isnan(isfcs_loo_T)) ==
np.sum(np.isnan(isfcs_loo_F)) ==
1180)
isfcs_pw_T = isfc(data, pairwise=True, vectorize_isfcs=False,
tolerate_nans=True)
isfcs_pw_F = isfc(data, pairwise=True, vectorize_isfcs=False,
tolerate_nans=False)
assert np.allclose(isfcs_pw_T, isfcs_pw_F, equal_nan=True)
assert (np.sum(np.isnan(isfcs_pw_T)) ==
np.sum(np.isnan(isfcs_pw_T)) ==
11210)
# Check for NaN-handling in targets
n_targets = 15
data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=n_voxels, data_type='array',
random_state=random_state)
targets_data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=n_targets,
data_type='array')
# Inject NaNs into targets_data
targets_data[0, 0, 0] = np.nan
# Don't tolerate NaNs, should lose zeroeth voxel
isfcs_loo = isfc(data, targets=targets_data, pairwise=False,
vectorize_isfcs=False, tolerate_nans=False)
assert np.sum(np.isnan(isfcs_loo)) == (n_subjects - 1) * (n_targets * 2)
# Single NaN in targets will get averaged out with tolerate
isfcs_loo = isfc(data, targets=targets_data, pairwise=False,
vectorize_isfcs=False, tolerate_nans=True)
assert np.sum(np.isnan(isfcs_loo)) == 0
def test_squareform_isfc():
# Set parameters for toy time series data
n_subjects = 20
n_TRs = 60
n_voxels = 30
random_state = 42
logger.info("Testing ISC options")
data = simulated_timeseries(n_subjects, n_TRs,
n_voxels=n_voxels, data_type='array',
random_state=random_state)
# Generate square redundant ISFCs
isfcs_r = isfc(data, vectorize_isfcs=False)
assert isfcs_r.shape == (n_subjects, n_voxels, n_voxels)
# Squareform these into condensed ISFCs and ISCs
isfcs_c, iscs_c = squareform_isfc(isfcs_r)
assert isfcs_c.shape == (n_subjects, n_voxels * (n_voxels - 1) / 2)
assert iscs_c.shape == (n_subjects, n_voxels)
# Go back the other way and check it's the same
isfcs_new = squareform_isfc(isfcs_c, iscs_c)
assert np.array_equal(isfcs_r, isfcs_new)
# Check against ISC function
assert np.allclose(isc(data), iscs_c, rtol=1e-03)
# Check for two subjects
isfcs_r = isfc(data[..., :2], vectorize_isfcs=False)
assert isfcs_r.shape == (n_voxels, n_voxels)
isfcs_c, iscs_c = squareform_isfc(isfcs_r)
assert isfcs_c.shape == (n_voxels * (n_voxels - 1) / 2,)
assert iscs_c.shape == (n_voxels,)
assert np.array_equal(isfcs_r, squareform_isfc(isfcs_c, iscs_c))
if __name__ == '__main__':
test_isc_input()
test_isc_options()
test_isc_output()
test_isc_nans()
test_bootstrap_isc()
test_permutation_isc()
test_timeshift_isc()
test_phaseshift_isc()
test_isfc_options()
test_isfc_nans()
test_squareform_isfc()
logger.info("Finished all ISC tests")
| |
#!/usr/bin/env python2.7
#
# Copyright (c) 2016, Daniel Bolgheroni.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import cmd
import signal
import shlex
from time import sleep
from pyfirmata import Arduino, serial
from conf import Config
class Sh(cmd.Cmd):
prompt = 'rswtch> '
intro = 'type \'help\' to see available commands'
def default(self, line):
print(line + ": not found")
def do_EOF(self, line):
exit(0)
# overwrite help, since commands are simple, do not need independent
# help for each command
def do_help(self, line):
print("{0:<16} {1}".format("COMMAND", "DESCRIPTION"))
print("{0:<16} {1}".format("annotate n \"c\"", "annotate c in channel n (use quotes)"))
print("{0:<16} {1}".format("down n", "turn off the n channel"))
print("{0:<16} {1}".format("help", "this help"))
print("{0:<16} {1}".format("reset n", "turn the n channel off and on again after 2 seconds"))
print("{0:<16} {1}".format("status", "display the status of all channels, including annotations"))
print("{0:<16} {1}".format("toggle n", "turn the n channel off if its on, and vice-versa"))
print("{0:<16} {1}".format("up n", "turn on the n channel"))
### commands
# up
def do_up(self, line):
parser = shlex.shlex(line)
c = parser.get_token()
try:
channels[c].up()
except KeyError:
print("no channel")
# down
def do_down(self, line):
parser = shlex.shlex(line)
c = parser.get_token()
try:
channels[c].down()
except KeyError:
print("no channel")
# toggle
def do_toggle(self, line):
parser = shlex.shlex(line)
c = parser.get_token()
try:
channels[c].toggle()
except KeyError:
print("no channel")
# reset
def do_reset(self, line):
parser = shlex.shlex(line)
c = parser.get_token()
try:
channels[c].reset()
except KeyError:
print("no channel")
# status
def do_status(self, line):
status()
def do_annotate(self, line):
parser = shlex.shlex(line, posix=True)
c = parser.get_token()
try:
channels[c].annotation = parser.get_token()
except KeyError:
print("no channel")
# quit
def do_quit(self, line):
exit(0)
# handle ^C
@staticmethod
def handle_sigint(signum, frame):
exit(0)
class Channel():
# the relay module uses inverted logic, so
# 1 to bring pin down and 0 bring pin up
def __init__(self, pin, boardname):
self.__pin = pin
self.boardname = boardname
self.annotation = None
# up by default
self.__pin.write(0)
def up(self):
self.__pin.write(0)
def down(self):
self.__pin.write(1)
def toggle(self):
if self.__pin.read() == 0:
self.__pin.write(1)
else:
self.__pin.write(0)
def reset(self):
self.__pin.write(1)
sleep(2)
self.__pin.write(0)
@property
def status(self):
return 'up' if self.__pin.read() == 0 else 'down'
def status():
print("{0:>2} {1:<6} {2:<20.20} {3:<40.40}"
.format("CH", "STATUS", "BOARD", "ANNOTATION"))
print("{0:>2} {1:<6} {2:<20.20} {3:<40.40}"
.format("1", ch1.status, ch1.boardname, ch1.annotation))
print("{0:>2} {1:<6} {2:<20.20} {3:<40.40}"
.format("2", ch2.status, ch2.boardname, ch2.annotation))
print("{0:>2} {1:<6} {2:<20.20} {3:<40.40}"
.format("3", ch3.status, ch3.boardname, ch3.annotation))
print("{0:>2} {1:<6} {2:<20.20} {3:<40.40}"
.format("4", ch4.status, ch4.boardname, ch4.annotation))
if __name__ == '__main__':
opts = argparse.ArgumentParser()
opts.add_argument("-v", action="store_true",
help="shows board Firmata firmware version")
opts.add_argument("-f",
help="specify config file")
opts.add_argument("dev", help="serial device")
args = opts.parse_args()
# init Firmata module
try:
board = Arduino(args.dev)
except serial.serialutil.SerialException:
print("could not open port {0}".format(args.dev))
exit(1)
# try to get board firmata version
# this fails most of the times
if args.v:
v = board.get_firmata_version()
try:
print("{0}.{1}".format(v[0], v[1]))
exit(0)
except (NameError, TypeError):
print("could not get board firmata version")
exit(1)
# handle configuration file
if args.f:
config = Config(args.f)
else:
config = Config()
# turn off board led
led = board.get_pin('d:13:o')
led.write(0)
# configuring pins
ch1 = Channel(board.get_pin('d:9:o'), config.get_boardname(1))
ch2 = Channel(board.get_pin('d:8:o'), config.get_boardname(2))
ch3 = Channel(board.get_pin('d:7:o'), config.get_boardname(3))
ch4 = Channel(board.get_pin('d:6:o'), config.get_boardname(4))
channels = {'1': ch1, '2': ch2, '3': ch3, '4': ch4}
# start shell
signal.signal(signal.SIGINT, Sh.handle_sigint)
Sh().cmdloop()
| |
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Keep it Python2.5 compatible for GAE.
#
# Copyright 2007 Google Inc. All Rights Reserved.
#
# This code is meant to work on Python 2.4 and above only.
#
# TODO(robinson): Helpers for verbose, common checks like seeing if a
# descriptor's cpp_type is CPPTYPE_MESSAGE.
"""Contains a metaclass and helper functions used to create
protocol message classes from Descriptor objects at runtime.
Recall that a metaclass is the "type" of a class.
(A class is to a metaclass what an instance is to a class.)
In this case, we use the GeneratedProtocolMessageType metaclass
to inject all the useful functionality into the classes
output by the protocol compiler at compile-time.
The upshot of all this is that the real implementation
details for ALL pure-Python protocol buffers are *here in
this file*.
"""
__author__ = 'robinson@google.com (Will Robinson)'
import sys
if sys.version_info[0] < 3:
try:
from io import StringIO as BytesIO
except ImportError:
from io import StringIO as BytesIO
import copyreg as copyreg
else:
from io import BytesIO
import copyreg
import struct
import weakref
# We use "as" to avoid name collisions with variables.
from google.protobuf.internal import containers
from google.protobuf.internal import decoder
from google.protobuf.internal import encoder
from google.protobuf.internal import enum_type_wrapper
from google.protobuf.internal import message_listener as message_listener_mod
from google.protobuf.internal import type_checkers
from google.protobuf.internal import wire_format
from google.protobuf import descriptor as descriptor_mod
from google.protobuf import message as message_mod
from google.protobuf import text_format
_FieldDescriptor = descriptor_mod.FieldDescriptor
def NewMessage(bases, descriptor, dictionary):
_AddClassAttributesForNestedExtensions(descriptor, dictionary)
_AddSlots(descriptor, dictionary)
return bases
def InitMessage(descriptor, cls):
cls._decoders_by_tag = {}
cls._extensions_by_name = {}
cls._extensions_by_number = {}
if (descriptor.has_options and
descriptor.GetOptions().message_set_wire_format):
cls._decoders_by_tag[decoder.MESSAGE_SET_ITEM_TAG] = (
decoder.MessageSetItemDecoder(cls._extensions_by_number))
# Attach stuff to each FieldDescriptor for quick lookup later on.
for field in descriptor.fields:
_AttachFieldHelpers(cls, field)
_AddEnumValues(descriptor, cls)
_AddInitMethod(descriptor, cls)
_AddPropertiesForFields(descriptor, cls)
_AddPropertiesForExtensions(descriptor, cls)
_AddStaticMethods(cls)
_AddMessageMethods(descriptor, cls)
_AddPrivateHelperMethods(descriptor, cls)
copyreg.pickle(cls, lambda obj: (cls, (), obj.__getstate__()))
# Stateless helpers for GeneratedProtocolMessageType below.
# Outside clients should not access these directly.
#
# I opted not to make any of these methods on the metaclass, to make it more
# clear that I'm not really using any state there and to keep clients from
# thinking that they have direct access to these construction helpers.
def _PropertyName(proto_field_name):
"""Returns the name of the public property attribute which
clients can use to get and (in some cases) set the value
of a protocol message field.
Args:
proto_field_name: The protocol message field name, exactly
as it appears (or would appear) in a .proto file.
"""
# TODO(robinson): Escape Python keywords (e.g., yield), and test this support.
# nnorwitz makes my day by writing:
# """
# FYI. See the keyword module in the stdlib. This could be as simple as:
#
# if keyword.iskeyword(proto_field_name):
# return proto_field_name + "_"
# return proto_field_name
# """
# Kenton says: The above is a BAD IDEA. People rely on being able to use
# getattr() and setattr() to reflectively manipulate field values. If we
# rename the properties, then every such user has to also make sure to apply
# the same transformation. Note that currently if you name a field "yield",
# you can still access it just fine using getattr/setattr -- it's not even
# that cumbersome to do so.
# TODO(kenton): Remove this method entirely if/when everyone agrees with my
# position.
return proto_field_name
def _VerifyExtensionHandle(message, extension_handle):
"""Verify that the given extension handle is valid."""
if not isinstance(extension_handle, _FieldDescriptor):
raise KeyError('HasExtension() expects an extension handle, got: %s' %
extension_handle)
if not extension_handle.is_extension:
raise KeyError('"%s" is not an extension.' % extension_handle.full_name)
if not extension_handle.containing_type:
raise KeyError('"%s" is missing a containing_type.'
% extension_handle.full_name)
if extension_handle.containing_type is not message.DESCRIPTOR:
raise KeyError('Extension "%s" extends message type "%s", but this '
'message is of type "%s".' %
(extension_handle.full_name,
extension_handle.containing_type.full_name,
message.DESCRIPTOR.full_name))
def _AddSlots(message_descriptor, dictionary):
"""Adds a __slots__ entry to dictionary, containing the names of all valid
attributes for this message type.
Args:
message_descriptor: A Descriptor instance describing this message type.
dictionary: Class dictionary to which we'll add a '__slots__' entry.
"""
dictionary['__slots__'] = ['_cached_byte_size',
'_cached_byte_size_dirty',
'_fields',
'_unknown_fields',
'_is_present_in_parent',
'_listener',
'_listener_for_children',
'__weakref__',
'_oneofs']
def _IsMessageSetExtension(field):
return (field.is_extension and
field.containing_type.has_options and
field.containing_type.GetOptions().message_set_wire_format and
field.type == _FieldDescriptor.TYPE_MESSAGE and
field.message_type == field.extension_scope and
field.label == _FieldDescriptor.LABEL_OPTIONAL)
def _AttachFieldHelpers(cls, field_descriptor):
is_repeated = (field_descriptor.label == _FieldDescriptor.LABEL_REPEATED)
is_packed = (field_descriptor.has_options and
field_descriptor.GetOptions().packed)
if _IsMessageSetExtension(field_descriptor):
field_encoder = encoder.MessageSetItemEncoder(field_descriptor.number)
sizer = encoder.MessageSetItemSizer(field_descriptor.number)
else:
field_encoder = type_checkers.TYPE_TO_ENCODER[field_descriptor.type](
field_descriptor.number, is_repeated, is_packed)
sizer = type_checkers.TYPE_TO_SIZER[field_descriptor.type](
field_descriptor.number, is_repeated, is_packed)
field_descriptor._encoder = field_encoder
field_descriptor._sizer = sizer
field_descriptor._default_constructor = _DefaultValueConstructorForField(
field_descriptor)
def AddDecoder(wiretype, is_packed):
tag_bytes = encoder.TagBytes(field_descriptor.number, wiretype)
cls._decoders_by_tag[tag_bytes] = (
type_checkers.TYPE_TO_DECODER[field_descriptor.type](
field_descriptor.number, is_repeated, is_packed,
field_descriptor, field_descriptor._default_constructor))
AddDecoder(type_checkers.FIELD_TYPE_TO_WIRE_TYPE[field_descriptor.type],
False)
if is_repeated and wire_format.IsTypePackable(field_descriptor.type):
# To support wire compatibility of adding packed = true, add a decoder for
# packed values regardless of the field's options.
AddDecoder(wire_format.WIRETYPE_LENGTH_DELIMITED, True)
def _AddClassAttributesForNestedExtensions(descriptor, dictionary):
extension_dict = descriptor.extensions_by_name
for extension_name, extension_field in list(extension_dict.items()):
assert extension_name not in dictionary
dictionary[extension_name] = extension_field
def _AddEnumValues(descriptor, cls):
"""Sets class-level attributes for all enum fields defined in this message.
Also exporting a class-level object that can name enum values.
Args:
descriptor: Descriptor object for this message type.
cls: Class we're constructing for this message type.
"""
for enum_type in descriptor.enum_types:
setattr(cls, enum_type.name, enum_type_wrapper.EnumTypeWrapper(enum_type))
for enum_value in enum_type.values:
setattr(cls, enum_value.name, enum_value.number)
def _DefaultValueConstructorForField(field):
"""Returns a function which returns a default value for a field.
Args:
field: FieldDescriptor object for this field.
The returned function has one argument:
message: Message instance containing this field, or a weakref proxy
of same.
That function in turn returns a default value for this field. The default
value may refer back to |message| via a weak reference.
"""
if field.label == _FieldDescriptor.LABEL_REPEATED:
if field.has_default_value and field.default_value != []:
raise ValueError('Repeated field default value not empty list: %s' % (
field.default_value))
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
# We can't look at _concrete_class yet since it might not have
# been set. (Depends on order in which we initialize the classes).
message_type = field.message_type
def MakeRepeatedMessageDefault(message):
return containers.RepeatedCompositeFieldContainer(
message._listener_for_children, field.message_type)
return MakeRepeatedMessageDefault
else:
type_checker = type_checkers.GetTypeChecker(field)
def MakeRepeatedScalarDefault(message):
return containers.RepeatedScalarFieldContainer(
message._listener_for_children, type_checker)
return MakeRepeatedScalarDefault
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
# _concrete_class may not yet be initialized.
message_type = field.message_type
def MakeSubMessageDefault(message):
result = message_type._concrete_class()
result._SetListener(message._listener_for_children)
return result
return MakeSubMessageDefault
def MakeScalarDefault(message):
# TODO(protobuf-team): This may be broken since there may not be
# default_value. Combine with has_default_value somehow.
return field.default_value
return MakeScalarDefault
def _AddInitMethod(message_descriptor, cls):
"""Adds an __init__ method to cls."""
fields = message_descriptor.fields
def init(self, **kwargs):
self._cached_byte_size = 0
self._cached_byte_size_dirty = len(kwargs) > 0
self._fields = {}
# Contains a mapping from oneof field descriptors to the descriptor
# of the currently set field in that oneof field.
self._oneofs = {}
# _unknown_fields is () when empty for efficiency, and will be turned into
# a list if fields are added.
self._unknown_fields = ()
self._is_present_in_parent = False
self._listener = message_listener_mod.NullMessageListener()
self._listener_for_children = _Listener(self)
for field_name, field_value in list(kwargs.items()):
field = _GetFieldByName(message_descriptor, field_name)
if field is None:
raise TypeError("%s() got an unexpected keyword argument '%s'" %
(message_descriptor.name, field_name))
if field.label == _FieldDescriptor.LABEL_REPEATED:
copy = field._default_constructor(self)
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: # Composite
for val in field_value:
copy.add().MergeFrom(val)
else: # Scalar
copy.extend(field_value)
self._fields[field] = copy
elif field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
copy = field._default_constructor(self)
copy.MergeFrom(field_value)
self._fields[field] = copy
else:
setattr(self, field_name, field_value)
init.__module__ = None
init.__doc__ = None
cls.__init__ = init
def _GetFieldByName(message_descriptor, field_name):
"""Returns a field descriptor by field name.
Args:
message_descriptor: A Descriptor describing all fields in message.
field_name: The name of the field to retrieve.
Returns:
The field descriptor associated with the field name.
"""
try:
return message_descriptor.fields_by_name[field_name]
except KeyError:
raise ValueError('Protocol message has no "%s" field.' % field_name)
def _AddPropertiesForFields(descriptor, cls):
"""Adds properties for all fields in this protocol message type."""
for field in descriptor.fields:
_AddPropertiesForField(field, cls)
if descriptor.is_extendable:
# _ExtensionDict is just an adaptor with no state so we allocate a new one
# every time it is accessed.
cls.Extensions = property(lambda self: _ExtensionDict(self))
def _AddPropertiesForField(field, cls):
"""Adds a public property for a protocol message field.
Clients can use this property to get and (in the case
of non-repeated scalar fields) directly set the value
of a protocol message field.
Args:
field: A FieldDescriptor for this field.
cls: The class we're constructing.
"""
# Catch it if we add other types that we should
# handle specially here.
assert _FieldDescriptor.MAX_CPPTYPE == 10
constant_name = field.name.upper() + "_FIELD_NUMBER"
setattr(cls, constant_name, field.number)
if field.label == _FieldDescriptor.LABEL_REPEATED:
_AddPropertiesForRepeatedField(field, cls)
elif field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
_AddPropertiesForNonRepeatedCompositeField(field, cls)
else:
_AddPropertiesForNonRepeatedScalarField(field, cls)
def _AddPropertiesForRepeatedField(field, cls):
"""Adds a public property for a "repeated" protocol message field. Clients
can use this property to get the value of the field, which will be either a
_RepeatedScalarFieldContainer or _RepeatedCompositeFieldContainer (see
below).
Note that when clients add values to these containers, we perform
type-checking in the case of repeated scalar fields, and we also set any
necessary "has" bits as a side-effect.
Args:
field: A FieldDescriptor for this field.
cls: The class we're constructing.
"""
proto_field_name = field.name
property_name = _PropertyName(proto_field_name)
def getter(self):
field_value = self._fields.get(field)
if field_value is None:
# Construct a new object to represent this field.
field_value = field._default_constructor(self)
# Atomically check if another thread has preempted us and, if not, swap
# in the new object we just created. If someone has preempted us, we
# take that object and discard ours.
# WARNING: We are relying on setdefault() being atomic. This is true
# in CPython but we haven't investigated others. This warning appears
# in several other locations in this file.
field_value = self._fields.setdefault(field, field_value)
return field_value
getter.__module__ = None
getter.__doc__ = 'Getter for %s.' % proto_field_name
# We define a setter just so we can throw an exception with a more
# helpful error message.
def setter(self, new_value):
raise AttributeError('Assignment not allowed to repeated field '
'"%s" in protocol message object.' % proto_field_name)
doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name
setattr(cls, property_name, property(getter, setter, doc=doc))
def _AddPropertiesForNonRepeatedScalarField(field, cls):
"""Adds a public property for a nonrepeated, scalar protocol message field.
Clients can use this property to get and directly set the value of the field.
Note that when the client sets the value of a field by using this property,
all necessary "has" bits are set as a side-effect, and we also perform
type-checking.
Args:
field: A FieldDescriptor for this field.
cls: The class we're constructing.
"""
proto_field_name = field.name
property_name = _PropertyName(proto_field_name)
type_checker = type_checkers.GetTypeChecker(field)
default_value = field.default_value
valid_values = set()
def getter(self):
# TODO(protobuf-team): This may be broken since there may not be
# default_value. Combine with has_default_value somehow.
return self._fields.get(field, default_value)
getter.__module__ = None
getter.__doc__ = 'Getter for %s.' % proto_field_name
def field_setter(self, new_value):
# pylint: disable=protected-access
self._fields[field] = type_checker.CheckValue(new_value)
# Check _cached_byte_size_dirty inline to improve performance, since scalar
# setters are called frequently.
if not self._cached_byte_size_dirty:
self._Modified()
if field.containing_oneof is not None:
def setter(self, new_value):
field_setter(self, new_value)
self._UpdateOneofState(field)
else:
setter = field_setter
setter.__module__ = None
setter.__doc__ = 'Setter for %s.' % proto_field_name
# Add a property to encapsulate the getter/setter.
doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name
setattr(cls, property_name, property(getter, setter, doc=doc))
def _AddPropertiesForNonRepeatedCompositeField(field, cls):
"""Adds a public property for a nonrepeated, composite protocol message field.
A composite field is a "group" or "message" field.
Clients can use this property to get the value of the field, but cannot
assign to the property directly.
Args:
field: A FieldDescriptor for this field.
cls: The class we're constructing.
"""
# TODO(robinson): Remove duplication with similar method
# for non-repeated scalars.
proto_field_name = field.name
property_name = _PropertyName(proto_field_name)
# TODO(komarek): Can anyone explain to me why we cache the message_type this
# way, instead of referring to field.message_type inside of getter(self)?
# What if someone sets message_type later on (which makes for simpler
# dyanmic proto descriptor and class creation code).
message_type = field.message_type
def getter(self):
field_value = self._fields.get(field)
if field_value is None:
# Construct a new object to represent this field.
field_value = message_type._concrete_class() # use field.message_type?
field_value._SetListener(
_OneofListener(self, field)
if field.containing_oneof is not None
else self._listener_for_children)
# Atomically check if another thread has preempted us and, if not, swap
# in the new object we just created. If someone has preempted us, we
# take that object and discard ours.
# WARNING: We are relying on setdefault() being atomic. This is true
# in CPython but we haven't investigated others. This warning appears
# in several other locations in this file.
field_value = self._fields.setdefault(field, field_value)
return field_value
getter.__module__ = None
getter.__doc__ = 'Getter for %s.' % proto_field_name
# We define a setter just so we can throw an exception with a more
# helpful error message.
def setter(self, new_value):
raise AttributeError('Assignment not allowed to composite field '
'"%s" in protocol message object.' % proto_field_name)
# Add a property to encapsulate the getter.
doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name
setattr(cls, property_name, property(getter, setter, doc=doc))
def _AddPropertiesForExtensions(descriptor, cls):
"""Adds properties for all fields in this protocol message type."""
extension_dict = descriptor.extensions_by_name
for extension_name, extension_field in list(extension_dict.items()):
constant_name = extension_name.upper() + "_FIELD_NUMBER"
setattr(cls, constant_name, extension_field.number)
def _AddStaticMethods(cls):
# TODO(robinson): This probably needs to be thread-safe(?)
def RegisterExtension(extension_handle):
extension_handle.containing_type = cls.DESCRIPTOR
_AttachFieldHelpers(cls, extension_handle)
# Try to insert our extension, failing if an extension with the same number
# already exists.
actual_handle = cls._extensions_by_number.setdefault(
extension_handle.number, extension_handle)
if actual_handle is not extension_handle:
raise AssertionError(
'Extensions "%s" and "%s" both try to extend message type "%s" with '
'field number %d.' %
(extension_handle.full_name, actual_handle.full_name,
cls.DESCRIPTOR.full_name, extension_handle.number))
cls._extensions_by_name[extension_handle.full_name] = extension_handle
handle = extension_handle # avoid line wrapping
if _IsMessageSetExtension(handle):
# MessageSet extension. Also register under type name.
cls._extensions_by_name[
extension_handle.message_type.full_name] = extension_handle
cls.RegisterExtension = staticmethod(RegisterExtension)
def FromString(s):
message = cls()
message.MergeFromString(s)
return message
cls.FromString = staticmethod(FromString)
def _IsPresent(item):
"""Given a (FieldDescriptor, value) tuple from _fields, return true if the
value should be included in the list returned by ListFields()."""
if item[0].label == _FieldDescriptor.LABEL_REPEATED:
return bool(item[1])
elif item[0].cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
return item[1]._is_present_in_parent
else:
return True
def _AddListFieldsMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def ListFields(self):
all_fields = [item for item in list(self._fields.items()) if _IsPresent(item)]
all_fields.sort(key = lambda item: item[0].number)
return all_fields
cls.ListFields = ListFields
def _AddHasFieldMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
singular_fields = {}
for field in message_descriptor.fields:
if field.label != _FieldDescriptor.LABEL_REPEATED:
singular_fields[field.name] = field
# Fields inside oneofs are never repeated (enforced by the compiler).
for field in message_descriptor.oneofs:
singular_fields[field.name] = field
def HasField(self, field_name):
try:
field = singular_fields[field_name]
except KeyError:
raise ValueError(
'Protocol message has no singular "%s" field.' % field_name)
if isinstance(field, descriptor_mod.OneofDescriptor):
try:
return HasField(self, self._oneofs[field].name)
except KeyError:
return False
else:
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
value = self._fields.get(field)
return value is not None and value._is_present_in_parent
else:
return field in self._fields
cls.HasField = HasField
def _AddClearFieldMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def ClearField(self, field_name):
try:
field = message_descriptor.fields_by_name[field_name]
except KeyError:
try:
field = message_descriptor.oneofs_by_name[field_name]
if field in self._oneofs:
field = self._oneofs[field]
else:
return
except KeyError:
raise ValueError('Protocol message has no "%s" field.' % field_name)
if field in self._fields:
# Note: If the field is a sub-message, its listener will still point
# at us. That's fine, because the worst than can happen is that it
# will call _Modified() and invalidate our byte size. Big deal.
del self._fields[field]
if self._oneofs.get(field.containing_oneof, None) is field:
del self._oneofs[field.containing_oneof]
# Always call _Modified() -- even if nothing was changed, this is
# a mutating method, and thus calling it should cause the field to become
# present in the parent message.
self._Modified()
cls.ClearField = ClearField
def _AddClearExtensionMethod(cls):
"""Helper for _AddMessageMethods()."""
def ClearExtension(self, extension_handle):
_VerifyExtensionHandle(self, extension_handle)
# Similar to ClearField(), above.
if extension_handle in self._fields:
del self._fields[extension_handle]
self._Modified()
cls.ClearExtension = ClearExtension
def _AddClearMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def Clear(self):
# Clear fields.
self._fields = {}
self._unknown_fields = ()
self._Modified()
cls.Clear = Clear
def _AddHasExtensionMethod(cls):
"""Helper for _AddMessageMethods()."""
def HasExtension(self, extension_handle):
_VerifyExtensionHandle(self, extension_handle)
if extension_handle.label == _FieldDescriptor.LABEL_REPEATED:
raise KeyError('"%s" is repeated.' % extension_handle.full_name)
if extension_handle.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
value = self._fields.get(extension_handle)
return value is not None and value._is_present_in_parent
else:
return extension_handle in self._fields
cls.HasExtension = HasExtension
def _AddEqualsMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def __eq__(self, other):
if (not isinstance(other, message_mod.Message) or
other.DESCRIPTOR != self.DESCRIPTOR):
return False
if self is other:
return True
if not self.ListFields() == other.ListFields():
return False
# Sort unknown fields because their order shouldn't affect equality test.
unknown_fields = list(self._unknown_fields)
unknown_fields.sort()
other_unknown_fields = list(other._unknown_fields)
other_unknown_fields.sort()
return unknown_fields == other_unknown_fields
cls.__eq__ = __eq__
def _AddStrMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def __str__(self):
return text_format.MessageToString(self)
cls.__str__ = __str__
def _AddUnicodeMethod(unused_message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def __unicode__(self):
return text_format.MessageToString(self, as_utf8=True).decode('utf-8')
cls.__unicode__ = __unicode__
def _AddSetListenerMethod(cls):
"""Helper for _AddMessageMethods()."""
def SetListener(self, listener):
if listener is None:
self._listener = message_listener_mod.NullMessageListener()
else:
self._listener = listener
cls._SetListener = SetListener
def _BytesForNonRepeatedElement(value, field_number, field_type):
"""Returns the number of bytes needed to serialize a non-repeated element.
The returned byte count includes space for tag information and any
other additional space associated with serializing value.
Args:
value: Value we're serializing.
field_number: Field number of this value. (Since the field number
is stored as part of a varint-encoded tag, this has an impact
on the total bytes required to serialize the value).
field_type: The type of the field. One of the TYPE_* constants
within FieldDescriptor.
"""
try:
fn = type_checkers.TYPE_TO_BYTE_SIZE_FN[field_type]
return fn(field_number, value)
except KeyError:
raise message_mod.EncodeError('Unrecognized field type: %d' % field_type)
def _AddByteSizeMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def ByteSize(self):
if not self._cached_byte_size_dirty:
return self._cached_byte_size
size = 0
for field_descriptor, field_value in self.ListFields():
size += field_descriptor._sizer(field_value)
for tag_bytes, value_bytes in self._unknown_fields:
size += len(tag_bytes) + len(value_bytes)
self._cached_byte_size = size
self._cached_byte_size_dirty = False
self._listener_for_children.dirty = False
return size
cls.ByteSize = ByteSize
def _AddSerializeToStringMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def SerializeToString(self):
# Check if the message has all of its required fields set.
errors = []
if not self.IsInitialized():
raise message_mod.EncodeError(
'Message %s is missing required fields: %s' % (
self.DESCRIPTOR.full_name, ','.join(self.FindInitializationErrors())))
return self.SerializePartialToString()
cls.SerializeToString = SerializeToString
def _AddSerializePartialToStringMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def SerializePartialToString(self):
out = BytesIO()
self._InternalSerialize(out.write)
return out.getvalue()
cls.SerializePartialToString = SerializePartialToString
def InternalSerialize(self, write_bytes):
for field_descriptor, field_value in self.ListFields():
field_descriptor._encoder(write_bytes, field_value)
for tag_bytes, value_bytes in self._unknown_fields:
write_bytes(tag_bytes)
write_bytes(value_bytes)
cls._InternalSerialize = InternalSerialize
def _AddMergeFromStringMethod(message_descriptor, cls):
"""Helper for _AddMessageMethods()."""
def MergeFromString(self, serialized):
length = len(serialized)
try:
if self._InternalParse(serialized, 0, length) != length:
# The only reason _InternalParse would return early is if it
# encountered an end-group tag.
raise message_mod.DecodeError('Unexpected end-group tag.')
except (IndexError, TypeError):
# Now ord(buf[p:p+1]) == ord('') gets TypeError.
raise message_mod.DecodeError('Truncated message.')
except struct.error as e:
raise message_mod.DecodeError(e)
return length # Return this for legacy reasons.
cls.MergeFromString = MergeFromString
local_ReadTag = decoder.ReadTag
local_SkipField = decoder.SkipField
decoders_by_tag = cls._decoders_by_tag
def InternalParse(self, buffer, pos, end):
self._Modified()
field_dict = self._fields
unknown_field_list = self._unknown_fields
while pos != end:
(tag_bytes, new_pos) = local_ReadTag(buffer, pos)
field_decoder = decoders_by_tag.get(tag_bytes)
if field_decoder is None:
value_start_pos = new_pos
new_pos = local_SkipField(buffer, new_pos, end, tag_bytes)
if new_pos == -1:
return pos
if not unknown_field_list:
unknown_field_list = self._unknown_fields = []
unknown_field_list.append((tag_bytes, buffer[value_start_pos:new_pos]))
pos = new_pos
else:
pos = field_decoder(buffer, new_pos, end, self, field_dict)
return pos
cls._InternalParse = InternalParse
def _AddIsInitializedMethod(message_descriptor, cls):
"""Adds the IsInitialized and FindInitializationError methods to the
protocol message class."""
required_fields = [field for field in message_descriptor.fields
if field.label == _FieldDescriptor.LABEL_REQUIRED]
def IsInitialized(self, errors=None):
"""Checks if all required fields of a message are set.
Args:
errors: A list which, if provided, will be populated with the field
paths of all missing required fields.
Returns:
True iff the specified message has all required fields set.
"""
# Performance is critical so we avoid HasField() and ListFields().
for field in required_fields:
if (field not in self._fields or
(field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE and
not self._fields[field]._is_present_in_parent)):
if errors is not None:
errors.extend(self.FindInitializationErrors())
return False
for field, value in list(self._fields.items()): # dict can change size!
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
if field.label == _FieldDescriptor.LABEL_REPEATED:
for element in value:
if not element.IsInitialized():
if errors is not None:
errors.extend(self.FindInitializationErrors())
return False
elif value._is_present_in_parent and not value.IsInitialized():
if errors is not None:
errors.extend(self.FindInitializationErrors())
return False
return True
cls.IsInitialized = IsInitialized
def FindInitializationErrors(self):
"""Finds required fields which are not initialized.
Returns:
A list of strings. Each string is a path to an uninitialized field from
the top-level message, e.g. "foo.bar[5].baz".
"""
errors = [] # simplify things
for field in required_fields:
if not self.HasField(field.name):
errors.append(field.name)
for field, value in self.ListFields():
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
if field.is_extension:
name = "(%s)" % field.full_name
else:
name = field.name
if field.label == _FieldDescriptor.LABEL_REPEATED:
for i in range(len(value)):
element = value[i]
prefix = "%s[%d]." % (name, i)
sub_errors = element.FindInitializationErrors()
errors += [ prefix + error for error in sub_errors ]
else:
prefix = name + "."
sub_errors = value.FindInitializationErrors()
errors += [ prefix + error for error in sub_errors ]
return errors
cls.FindInitializationErrors = FindInitializationErrors
def _AddMergeFromMethod(cls):
LABEL_REPEATED = _FieldDescriptor.LABEL_REPEATED
CPPTYPE_MESSAGE = _FieldDescriptor.CPPTYPE_MESSAGE
def MergeFrom(self, msg):
if not isinstance(msg, cls):
raise TypeError(
"Parameter to MergeFrom() must be instance of same class: "
"expected %s got %s." % (cls.__name__, type(msg).__name__))
assert msg is not self
self._Modified()
fields = self._fields
for field, value in list(msg._fields.items()):
if field.label == LABEL_REPEATED:
field_value = fields.get(field)
if field_value is None:
# Construct a new object to represent this field.
field_value = field._default_constructor(self)
fields[field] = field_value
field_value.MergeFrom(value)
elif field.cpp_type == CPPTYPE_MESSAGE:
if value._is_present_in_parent:
field_value = fields.get(field)
if field_value is None:
# Construct a new object to represent this field.
field_value = field._default_constructor(self)
fields[field] = field_value
field_value.MergeFrom(value)
else:
self._fields[field] = value
if msg._unknown_fields:
if not self._unknown_fields:
self._unknown_fields = []
self._unknown_fields.extend(msg._unknown_fields)
cls.MergeFrom = MergeFrom
def _AddWhichOneofMethod(message_descriptor, cls):
def WhichOneof(self, oneof_name):
"""Returns the name of the currently set field inside a oneof, or None."""
try:
field = message_descriptor.oneofs_by_name[oneof_name]
except KeyError:
raise ValueError(
'Protocol message has no oneof "%s" field.' % oneof_name)
nested_field = self._oneofs.get(field, None)
if nested_field is not None and self.HasField(nested_field.name):
return nested_field.name
else:
return None
cls.WhichOneof = WhichOneof
def _AddMessageMethods(message_descriptor, cls):
"""Adds implementations of all Message methods to cls."""
_AddListFieldsMethod(message_descriptor, cls)
_AddHasFieldMethod(message_descriptor, cls)
_AddClearFieldMethod(message_descriptor, cls)
if message_descriptor.is_extendable:
_AddClearExtensionMethod(cls)
_AddHasExtensionMethod(cls)
_AddClearMethod(message_descriptor, cls)
_AddEqualsMethod(message_descriptor, cls)
_AddStrMethod(message_descriptor, cls)
_AddUnicodeMethod(message_descriptor, cls)
_AddSetListenerMethod(cls)
_AddByteSizeMethod(message_descriptor, cls)
_AddSerializeToStringMethod(message_descriptor, cls)
_AddSerializePartialToStringMethod(message_descriptor, cls)
_AddMergeFromStringMethod(message_descriptor, cls)
_AddIsInitializedMethod(message_descriptor, cls)
_AddMergeFromMethod(cls)
_AddWhichOneofMethod(message_descriptor, cls)
def _AddPrivateHelperMethods(message_descriptor, cls):
"""Adds implementation of private helper methods to cls."""
def Modified(self):
"""Sets the _cached_byte_size_dirty bit to true,
and propagates this to our listener iff this was a state change.
"""
# Note: Some callers check _cached_byte_size_dirty before calling
# _Modified() as an extra optimization. So, if this method is ever
# changed such that it does stuff even when _cached_byte_size_dirty is
# already true, the callers need to be updated.
if not self._cached_byte_size_dirty:
self._cached_byte_size_dirty = True
self._listener_for_children.dirty = True
self._is_present_in_parent = True
self._listener.Modified()
def _UpdateOneofState(self, field):
"""Sets field as the active field in its containing oneof.
Will also delete currently active field in the oneof, if it is different
from the argument. Does not mark the message as modified.
"""
other_field = self._oneofs.setdefault(field.containing_oneof, field)
if other_field is not field:
del self._fields[other_field]
self._oneofs[field.containing_oneof] = field
cls._Modified = Modified
cls.SetInParent = Modified
cls._UpdateOneofState = _UpdateOneofState
class _Listener(object):
"""MessageListener implementation that a parent message registers with its
child message.
In order to support semantics like:
foo.bar.baz.qux = 23
assert foo.HasField('bar')
...child objects must have back references to their parents.
This helper class is at the heart of this support.
"""
def __init__(self, parent_message):
"""Args:
parent_message: The message whose _Modified() method we should call when
we receive Modified() messages.
"""
# This listener establishes a back reference from a child (contained) object
# to its parent (containing) object. We make this a weak reference to avoid
# creating cyclic garbage when the client finishes with the 'parent' object
# in the tree.
if isinstance(parent_message, weakref.ProxyType):
self._parent_message_weakref = parent_message
else:
self._parent_message_weakref = weakref.proxy(parent_message)
# As an optimization, we also indicate directly on the listener whether
# or not the parent message is dirty. This way we can avoid traversing
# up the tree in the common case.
self.dirty = False
def Modified(self):
if self.dirty:
return
try:
# Propagate the signal to our parents iff this is the first field set.
self._parent_message_weakref._Modified()
except ReferenceError:
# We can get here if a client has kept a reference to a child object,
# and is now setting a field on it, but the child's parent has been
# garbage-collected. This is not an error.
pass
class _OneofListener(_Listener):
"""Special listener implementation for setting composite oneof fields."""
def __init__(self, parent_message, field):
"""Args:
parent_message: The message whose _Modified() method we should call when
we receive Modified() messages.
field: The descriptor of the field being set in the parent message.
"""
super(_OneofListener, self).__init__(parent_message)
self._field = field
def Modified(self):
"""Also updates the state of the containing oneof in the parent message."""
try:
self._parent_message_weakref._UpdateOneofState(self._field)
super(_OneofListener, self).Modified()
except ReferenceError:
pass
# TODO(robinson): Move elsewhere? This file is getting pretty ridiculous...
# TODO(robinson): Unify error handling of "unknown extension" crap.
# TODO(robinson): Support iteritems()-style iteration over all
# extensions with the "has" bits turned on?
class _ExtensionDict(object):
"""Dict-like container for supporting an indexable "Extensions"
field on proto instances.
Note that in all cases we expect extension handles to be
FieldDescriptors.
"""
def __init__(self, extended_message):
"""extended_message: Message instance for which we are the Extensions dict.
"""
self._extended_message = extended_message
def __getitem__(self, extension_handle):
"""Returns the current value of the given extension handle."""
_VerifyExtensionHandle(self._extended_message, extension_handle)
result = self._extended_message._fields.get(extension_handle)
if result is not None:
return result
if extension_handle.label == _FieldDescriptor.LABEL_REPEATED:
result = extension_handle._default_constructor(self._extended_message)
elif extension_handle.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
result = extension_handle.message_type._concrete_class()
try:
result._SetListener(self._extended_message._listener_for_children)
except ReferenceError:
pass
else:
# Singular scalar -- just return the default without inserting into the
# dict.
return extension_handle.default_value
# Atomically check if another thread has preempted us and, if not, swap
# in the new object we just created. If someone has preempted us, we
# take that object and discard ours.
# WARNING: We are relying on setdefault() being atomic. This is true
# in CPython but we haven't investigated others. This warning appears
# in several other locations in this file.
result = self._extended_message._fields.setdefault(
extension_handle, result)
return result
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
my_fields = self._extended_message.ListFields()
other_fields = other._extended_message.ListFields()
# Get rid of non-extension fields.
my_fields = [ field for field in my_fields if field.is_extension ]
other_fields = [ field for field in other_fields if field.is_extension ]
return my_fields == other_fields
def __ne__(self, other):
return not self == other
def __hash__(self):
raise TypeError('unhashable object')
# Note that this is only meaningful for non-repeated, scalar extension
# fields. Note also that we may have to call _Modified() when we do
# successfully set a field this way, to set any necssary "has" bits in the
# ancestors of the extended message.
def __setitem__(self, extension_handle, value):
"""If extension_handle specifies a non-repeated, scalar extension
field, sets the value of that field.
"""
_VerifyExtensionHandle(self._extended_message, extension_handle)
if (extension_handle.label == _FieldDescriptor.LABEL_REPEATED or
extension_handle.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE):
raise TypeError(
'Cannot assign to extension "%s" because it is a repeated or '
'composite type.' % extension_handle.full_name)
# It's slightly wasteful to lookup the type checker each time,
# but we expect this to be a vanishingly uncommon case anyway.
type_checker = type_checkers.GetTypeChecker(
extension_handle)
# pylint: disable=protected-access
self._extended_message._fields[extension_handle] = (
type_checker.CheckValue(value))
self._extended_message._Modified()
def _FindExtensionByName(self, name):
"""Tries to find a known extension with the specified name.
Args:
name: Extension full name.
Returns:
Extension field descriptor.
"""
return self._extended_message._extensions_by_name.get(name, None)
| |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/.
import unittest
from datetime import datetime
from cStringIO import StringIO
import trac.tests.compat
from trac.core import Component, TracError, implements
from trac.perm import PermissionError
from trac.resource import ResourceNotFound
from trac.test import Mock, MockRequest
from trac.util.datefmt import utc
from trac.versioncontrol.api import (
Changeset, DbRepositoryProvider, IRepositoryConnector, Node, NoSuchNode,
Repository, RepositoryManager)
from trac.versioncontrol.web_ui.browser import BrowserModule
from trac.web.tests.api import RequestHandlerPermissionsTestCaseBase
class MockRepositoryConnector(Component):
implements(IRepositoryConnector)
def get_supported_types(self):
yield 'mock', 8
def get_repository(self, repos_type, repos_dir, params):
def get_changeset(rev):
return Mock(Changeset, repos, rev, 'message', 'author',
datetime(2001, 1, 1, tzinfo=utc))
def get_node(path, rev):
if 'missing' in path:
raise NoSuchNode(path, rev)
kind = Node.FILE if 'file' in path else Node.DIRECTORY
node = Mock(Node, repos, path, rev, kind,
created_path=path, created_rev=rev,
get_entries=lambda: iter([]),
get_properties=lambda: {},
get_content=lambda: StringIO('content'),
get_content_length=lambda: 7,
get_content_type=lambda: 'application/octet-stream')
return node
if params['name'] == 'raise':
raise TracError("")
else:
repos = Mock(Repository, params['name'], params, self.log,
get_youngest_rev=lambda: 1,
get_changeset=get_changeset,
get_node=get_node,
previous_rev=lambda rev, path='': None,
next_rev=lambda rev, path='': None)
return repos
class BrowserModulePermissionsTestCase(RequestHandlerPermissionsTestCaseBase):
authz_policy = """\
[repository:*allow*@*/source:*deny*]
anonymous = !BROWSER_VIEW, !FILE_VIEW
[repository:*deny*@*/source:*allow*]
anonymous = BROWSER_VIEW, FILE_VIEW
[repository:*allow*@*]
anonymous = BROWSER_VIEW, FILE_VIEW
[repository:*deny*@*]
anonymous = !BROWSER_VIEW, !FILE_VIEW
"""
def setUp(self):
super(BrowserModulePermissionsTestCase, self).setUp(BrowserModule)
provider = DbRepositoryProvider(self.env)
provider.add_repository('(default)', '/', 'mock')
provider.add_repository('allow', '/', 'mock')
provider.add_repository('deny', '/', 'mock')
provider.add_repository('raise', '/', 'mock')
def tearDown(self):
RepositoryManager(self.env).reload_repositories()
super(BrowserModulePermissionsTestCase, self).tearDown()
def test_get_navigation_items_with_browser_view(self):
self.grant_perm('anonymous', 'BROWSER_VIEW')
provider = DbRepositoryProvider(self.env)
req = MockRequest(self.env, path_info='/')
self.assertEqual('browser', self.get_navigation_items(req).next()[1])
provider.remove_repository('allow')
self.assertEqual('browser', self.get_navigation_items(req).next()[1])
provider.remove_repository('deny')
self.assertEqual('browser', self.get_navigation_items(req).next()[1])
provider.remove_repository('(default)')
self.assertEqual([], list(self.get_navigation_items(req)))
def test_get_navigation_items_without_browser_view(self):
provider = DbRepositoryProvider(self.env)
req = MockRequest(self.env, path_info='/')
self.assertEqual('browser', self.get_navigation_items(req).next()[1])
provider.remove_repository('(default)')
self.assertEqual('browser', self.get_navigation_items(req).next()[1])
provider.remove_repository('deny')
self.assertEqual('browser', self.get_navigation_items(req).next()[1])
provider.remove_repository('allow')
self.assertEqual([], list(self.get_navigation_items(req)))
def test_repository_with_browser_view(self):
self.grant_perm('anonymous', 'BROWSER_VIEW')
req = MockRequest(self.env, authname='anonymous',
path_info='/browser/')
rv = self.process_request(req)
self.assertEqual('', rv[1]['repos'].name)
req = MockRequest(self.env, authname='anonymous',
path_info='/browser/allow')
rv = self.process_request(req)
self.assertEqual('allow', rv[1]['repos'].name)
req = MockRequest(self.env, authname='anonymous',
path_info='/browser/deny')
try:
self.process_request(req)
self.fail('PermissionError not raised')
except PermissionError as e:
self.assertEqual('BROWSER_VIEW', e.action)
self.assertEqual('source', e.resource.realm)
self.assertEqual('/', e.resource.id)
self.assertEqual('repository', e.resource.parent.realm)
self.assertEqual('deny', e.resource.parent.id)
DbRepositoryProvider(self.env).remove_repository('(default)')
req = MockRequest(self.env, path_info='/browser/')
rv = self.process_request(req)
self.assertEqual(None, rv[1]['repos'])
req = MockRequest(self.env, path_info='/browser/blah-blah-file')
try:
self.process_request(req)
self.fail('ResourceNotFound not raised')
except ResourceNotFound as e:
self.assertEqual('No node blah-blah-file', unicode(e))
def test_repository_without_browser_view(self):
req = MockRequest(self.env, authname='anonymous',
path_info='/browser/')
rv = self.process_request(req)
# cannot view default repository but don't raise PermissionError
self.assertEqual(None, rv[1]['repos'])
req = MockRequest(self.env, path_info='/browser/allow')
rv = self.process_request(req)
self.assertEqual('allow', rv[1]['repos'].name)
req = MockRequest(self.env, authname='anonymous',
path_info='/browser/deny')
try:
self.process_request(req)
self.fail('PermissionError not raised')
except PermissionError as e:
self.assertEqual('BROWSER_VIEW', e.action)
self.assertEqual('source', e.resource.realm)
self.assertEqual('/', e.resource.id)
self.assertEqual('repository', e.resource.parent.realm)
self.assertEqual('deny', e.resource.parent.id)
DbRepositoryProvider(self.env).remove_repository('(default)')
req = MockRequest(self.env, path_info='/browser/')
rv = self.process_request(req)
self.assertEqual(None, rv[1]['repos'])
req = MockRequest(self.env, authname='anonymous',
path_info='/browser/blah-blah-file')
try:
self.process_request(req)
self.fail('PermissionError not raised')
except PermissionError as e:
self.assertEqual('BROWSER_VIEW', e.action)
self.assertEqual(None, e.resource)
def test_node_with_file_view(self):
self.grant_perm('anonymous', 'BROWSER_VIEW', 'FILE_VIEW')
req = MockRequest(self.env, authname='anonymous',
path_info='/browser/file')
rv = self.process_request(req)
self.assertEqual('', rv[1]['repos'].name)
self.assertEqual('file', rv[1]['path'])
req = MockRequest(self.env, authname='anonymous',
path_info='/browser/allow-file')
rv = self.process_request(req)
self.assertEqual('', rv[1]['repos'].name)
self.assertEqual('allow-file', rv[1]['path'])
req = MockRequest(self.env, authname='anonymous',
path_info='/browser/deny-file')
try:
self.process_request(req)
self.fail('PermissionError not raised')
except PermissionError as e:
self.assertEqual('FILE_VIEW', e.action)
self.assertEqual('source', e.resource.realm)
self.assertEqual('deny-file', e.resource.id)
self.assertEqual('repository', e.resource.parent.realm)
self.assertEqual('', e.resource.parent.id)
def test_node_in_allowed_repos_with_file_view(self):
self.grant_perm('anonymous', 'BROWSER_VIEW', 'FILE_VIEW')
req = MockRequest(self.env, authname='anonymous',
path_info='/browser/allow/file')
rv = self.process_request(req)
self.assertEqual('allow', rv[1]['repos'].name)
self.assertEqual('file', rv[1]['path'])
req = MockRequest(self.env, authname='anonymous',
path_info='/browser/allow/allow-file')
rv = self.process_request(req)
self.assertEqual('allow', rv[1]['repos'].name)
self.assertEqual('allow-file', rv[1]['path'])
req = MockRequest(self.env, authname='anonymous',
path_info='/browser/allow/deny-file')
try:
self.process_request(req)
self.fail('PermissionError not raised')
except PermissionError as e:
self.assertEqual('FILE_VIEW', e.action)
self.assertEqual('source', e.resource.realm)
self.assertEqual('deny-file', e.resource.id)
self.assertEqual('repository', e.resource.parent.realm)
self.assertEqual('allow', e.resource.parent.id)
def test_node_in_denied_repos_with_file_view(self):
self.grant_perm('anonymous', 'BROWSER_VIEW', 'FILE_VIEW')
req = MockRequest(self.env, authname='anonymous',
path_info='/browser/deny/allow-file')
rv = self.process_request(req)
self.assertEqual('deny', rv[1]['repos'].name)
self.assertEqual('allow-file', rv[1]['path'])
for path in ('file', 'deny-file'):
req = MockRequest(self.env, authname='anonymous',
path_info='/browser/deny/' + path)
try:
self.process_request(req)
self.fail('PermissionError not raised (path: %r)' % path)
except PermissionError as e:
self.assertEqual('FILE_VIEW', e.action)
self.assertEqual('source', e.resource.realm)
self.assertEqual(path, e.resource.id)
self.assertEqual('repository', e.resource.parent.realm)
self.assertEqual('deny', e.resource.parent.id)
def test_missing_node_with_browser_view(self):
self.grant_perm('anonymous', 'BROWSER_VIEW')
req = MockRequest(self.env, path_info='/browser/allow/missing')
self.assertRaises(ResourceNotFound, self.process_request, req)
req = MockRequest(self.env, path_info='/browser/deny/missing')
self.assertRaises(ResourceNotFound, self.process_request, req)
req = MockRequest(self.env, path_info='/browser/missing')
self.assertRaises(ResourceNotFound, self.process_request, req)
def test_missing_node_without_browser_view(self):
req = MockRequest(self.env, path_info='/browser/allow/missing')
self.assertRaises(ResourceNotFound, self.process_request, req)
req = MockRequest(self.env, path_info='/browser/deny/missing')
self.assertRaises(ResourceNotFound, self.process_request, req)
req = MockRequest(self.env, path_info='/browser/missing')
self.assertRaises(ResourceNotFound, self.process_request, req)
def test_repository_index_with_hidden_default_repos(self):
self.grant_perm('anonymous', 'BROWSER_VIEW', 'FILE_VIEW')
provider = DbRepositoryProvider(self.env)
provider.modify_repository('(default)', {'hidden': 'enabled'})
req = MockRequest(self.env, authname='anonymous',
path_info='/browser/')
template, data, content_type = self.process_request(req)
self.assertEqual(None, data['repos'])
repo_data = data['repo'] # for repository index
self.assertEqual('allow', repo_data['repositories'][0][0])
self.assertEqual('raise', repo_data['repositories'][1][0])
self.assertEqual(2, len(repo_data['repositories']))
def test_node_in_hidden_default_repos(self):
self.grant_perm('anonymous', 'BROWSER_VIEW', 'FILE_VIEW')
provider = DbRepositoryProvider(self.env)
provider.modify_repository('(default)', {'hidden': 'enabled'})
req = MockRequest(self.env, path_info='/browser/blah-blah-file')
template, data, content_type = self.process_request(req)
self.assertEqual('', data['reponame'])
self.assertEqual('blah-blah-file', data['path'])
def test_no_viewable_repositories_with_browser_view(self):
self.grant_perm('anonymous', 'BROWSER_VIEW')
provider = DbRepositoryProvider(self.env)
provider.remove_repository('allow')
provider.remove_repository('(default)')
provider.remove_repository('raise')
req = MockRequest(self.env, authname='anonymous',
path_info='/browser/')
try:
self.process_request(req)
self.fail('ResourceNotFound not raised')
except ResourceNotFound as e:
self.assertEqual('No viewable repositories', unicode(e))
req = MockRequest(self.env, path_info='/browser/allow/')
try:
self.process_request(req)
self.fail('ResourceNotFound not raised')
except ResourceNotFound as e:
self.assertEqual('No node allow', unicode(e))
req = MockRequest(self.env, authname='anonymous',
path_info='/browser/deny/')
try:
self.process_request(req)
self.fail('PermissionError not raised')
except PermissionError as e:
self.assertEqual('BROWSER_VIEW', e.action)
self.assertEqual('source', e.resource.realm)
self.assertEqual('/', e.resource.id)
self.assertEqual('repository', e.resource.parent.realm)
self.assertEqual('deny', e.resource.parent.id)
provider.remove_repository('deny')
req = MockRequest(self.env, path_info='/browser/')
try:
self.process_request(req)
self.fail('ResourceNotFound not raised')
except ResourceNotFound as e:
self.assertEqual('No viewable repositories', unicode(e))
req = MockRequest(self.env, path_info='/browser/deny/')
try:
self.process_request(req)
self.fail('ResourceNotFound not raised')
except ResourceNotFound as e:
self.assertEqual('No node deny', unicode(e))
def test_no_viewable_repositories_without_browser_view(self):
provider = DbRepositoryProvider(self.env)
provider.remove_repository('allow')
req = MockRequest(self.env, authname='anonymous',
path_info='/browser/')
try:
self.process_request(req)
self.fail('PermissionError not raised')
except PermissionError as e:
self.assertEqual('BROWSER_VIEW', e.action)
self.assertEqual(None, e.resource)
provider.remove_repository('deny')
provider.remove_repository('(default)')
req = MockRequest(self.env, authname='anonymous',
path_info='/browser/')
try:
self.process_request(req)
self.fail('PermissionError not raised')
except PermissionError as e:
self.assertEqual('BROWSER_VIEW', e.action)
self.assertEqual(None, e.resource)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(BrowserModulePermissionsTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| |
import datetime
from django.contrib.auth.models import User
from django.contrib.auth.hashers import make_password
from django.conf import settings
from money import Money
import factory
from ..models import (MangoPayNaturalUser, MangoPayBankAccount,
MangoPayLegalUser, MangoPayWallet,
MangoPayCardRegistration, MangoPayCard,
MangoPayRefund, MangoPayPayIn, MangoPayPage,
MangoPayPayOut, MangoPayDocument, MangoPayTransfer)
from ..constants import (IDENTITY_PROOF, BUSINESS, BANK_WIRE, CARD_WEB,
BA_BIC_IBAN, BA_US, BA_OTHER)
user_model_factory = getattr(
settings,
"AUTH_USER_MODEL_FACTORY",
"mangopay.tests.factories.UserFactory")
class UserFactory(factory.DjangoModelFactory):
class Meta:
model = User
username = factory.Sequence(lambda n: 'username{0}'.format(n))
first_name = "Sven"
last_name = "Svensons"
is_active = True
is_superuser = False
is_staff = False
email = "swede@swedishman.com"
password = make_password("password")
class MangoPayNaturalUserFactory(factory.DjangoModelFactory):
class Meta:
model = MangoPayNaturalUser
mangopay_id = None
user = factory.SubFactory(user_model_factory)
birthday = datetime.date(1989, 10, 20)
country_of_residence = "US"
nationality = "SE"
address = None
occupation = None
income_range = None
class LightAuthenticationMangoPayNaturalUserFactory(MangoPayNaturalUserFactory):
pass
class RegularAuthenticationMangoPayNaturalUserFactory(MangoPayNaturalUserFactory):
address = "Somewhere over the rainbow"
occupation = "Cobbler"
income_range = 1
class MangoPayLegalUserFactory(factory.DjangoModelFactory):
class Meta:
model = MangoPayLegalUser
type = BUSINESS
mangopay_id = None
user = factory.SubFactory(user_model_factory)
birthday = datetime.date(1989, 10, 20)
country_of_residence = "US"
nationality = "SE"
address = None
business_name = "FundedByMe AB"
generic_business_email = "hello@fundedbyme.com"
first_name = "Arno"
last_name = "Smit"
headquaters_address = None
email = None
class LightAuthenticationMangoPayLegalUserFactory(
MangoPayLegalUserFactory):
pass
class RegularAuthenticationMangoPayLegalUserFactory(
MangoPayLegalUserFactory):
address = "Hammerby Sjostad 3"
headquaters_address = "Sveavagen 1"
email = "arno.smit@fundedbyme.com"
class MangoPayBankAccountFactory(factory.DjangoModelFactory):
class Meta:
model = MangoPayBankAccount
mangopay_user = factory.SubFactory(MangoPayNaturalUserFactory)
mangopay_id = None
address = "Hundred Acre Wood"
class MangoPayIBANBankAccountFactory(MangoPayBankAccountFactory):
account_type = BA_BIC_IBAN
iban = "SE3550000000054910000003"
country = "SE"
bic = "DABAIE2D"
class MangoPayOTHERBankAccountFactory(MangoPayBankAccountFactory):
account_type = BA_OTHER
account_number = "66112231"
country = "SY"
bic = "DABAIE2D"
class MangoPayUSBankAccountFactory(MangoPayBankAccountFactory):
country = "US"
account_type = BA_US
account_number = "3327586"
aba = "021000089"
deposit_account_type = "CHECKING"
class MangoPayCardFactory(factory.DjangoModelFactory):
class Meta:
model = MangoPayCard
mangopay_id = None
expiration_date = None
alias = None
is_active = False
is_valid = None
class MangoPayCardRegistrationFactory(factory.DjangoModelFactory):
class Meta:
model = MangoPayCardRegistration
mangopay_id = None
mangopay_user = factory.SubFactory(MangoPayNaturalUserFactory)
mangopay_card = factory.SubFactory(MangoPayCardFactory)
class MangoPayDocumentFactory(factory.DjangoModelFactory):
class Meta:
model = MangoPayDocument
mangopay_id = None
mangopay_user = factory.SubFactory(MangoPayNaturalUserFactory)
type = IDENTITY_PROOF
status = None
refused_reason_message = None
class MangoPayPageFactory(factory.DjangoModelFactory):
class Meta:
model = MangoPayPage
document = factory.SubFactory(MangoPayDocumentFactory)
file = "fake_file.jpg"
class MangoPayWalletFactory(factory.DjangoModelFactory):
class Meta:
model = MangoPayWallet
mangopay_id = None
mangopay_user = factory.SubFactory(MangoPayNaturalUserFactory)
currency = "EUR"
class MangoPayPayOutFactory(factory.DjangoModelFactory):
class Meta:
model = MangoPayPayOut
mangopay_id = None
mangopay_user = factory.SubFactory(MangoPayNaturalUserFactory)
mangopay_wallet = factory.SubFactory(MangoPayWalletFactory)
mangopay_bank_account = factory.SubFactory(MangoPayBankAccountFactory)
execution_date = None
status = None
debited_funds = Money(0, "EUR")
fees = Money(0, "EUR")
class MangoPayPayInAbstractFactory(factory.DjangoModelFactory):
class Meta:
model = MangoPayPayIn
abstract = True
mangopay_id = None
execution_date = None
status = None
result_code = None
mangopay_user = factory.SubFactory(MangoPayNaturalUserFactory)
mangopay_wallet = factory.SubFactory(MangoPayWalletFactory)
class MangoPayPayInFactory(MangoPayPayInAbstractFactory):
class Meta:
model = MangoPayPayIn
mangopay_card = factory.SubFactory(MangoPayCardFactory)
secure_mode_redirect_url = None
type = CARD_WEB
@factory.post_generation
def mangopay_refunds(self, create, extracted, **kwargs):
if not create:
# Simple build, do nothing.
return
if extracted:
# A list of refunds were passed in, use them
for mangopay_refund in extracted:
self.mangopay_refunds.add(mangopay_refund)
class MangoPayPayInBankWireFactory(MangoPayPayInAbstractFactory):
class Meta:
model = MangoPayPayIn
wire_reference = None
mangopay_bank_account = None
type = BANK_WIRE
class MangoPayRefundFactory(factory.DjangoModelFactory):
class Meta:
model = MangoPayRefund
mangopay_id = None
mangopay_user = factory.SubFactory(MangoPayNaturalUserFactory)
mangopay_pay_in = factory.SubFactory(MangoPayPayInFactory)
execution_date = None
status = None
result_code = None
class MangoPayTransferFactory(factory.DjangoModelFactory):
class Meta:
model = MangoPayTransfer
mangopay_id = None
mangopay_debited_wallet = factory.SubFactory(MangoPayWalletFactory)
mangopay_credited_wallet = factory.SubFactory(MangoPayWalletFactory)
debited_funds = Money(0, "EUR")
execution_date = None
status = None
result_code = None
| |
#!/usr/bin/env python
"""Compute the RBP features."""
from __future__ import print_function
import argparse
import re
import subprocess as sp
import sys
import uuid
from os import mkdir
from shutil import rmtree
import numpy as np
import pandas as pd
from rnacommender import fasta_utils
from rnacommender import pfam_utils
__author__ = "Gianluca Corrado"
__copyright__ = "Copyright 2016, Gianluca Corrado"
__license__ = "MIT"
__maintainer__ = "Gianluca Corrado"
__email__ = "gianluca.corrado@unitn.it"
__status__ = "Production"
class RBPVectorizer():
"""Compute the RBP features."""
def __init__(self, fasta_ref, fasta_sel, output, include_all_sel=False,
verbose=True):
"""
Constructor.
Parameters
----------
fasta_ref : str
Fasta file containing the reference sequences. The similarity will
be computed against the reference sequences.
fasta_sel : str
Fasta file containing the selected sequences. The similarity will
be computed for the selected sequences. (This might be the same
file as fasta_ref).
output : str
Name of the output file. The output file is an HDF5 containing a
pandas DataFrame, in which the columns are the selected sequence
names and the rows are the reference sequence names.
include_all_sel : bool (default: False)
Includes all the selected sequences even when they have zero
similarity with all thereference sequences. If a sequence is both
in the reference and selected sets, and ithas zero similarity
with all the reference proteins except itself it will included
only if this flag is set.
verbose : bool (default : True)
Print information to STDOUT.
"""
self.fasta_ref = fasta_ref
self.fasta_sel = fasta_sel
self.output = output
self.pfam_scan_ref = None
self.pfam_scan_sel = None
self.include_all_sel = include_all_sel
self.verbose = verbose
self._temp_fold = "temp_" + str(uuid.uuid4())
self._dom_ref_fold = "%s/domains_ref" % self._temp_fold
self._dom_sel_fold = "%s/domains_sel" % self._temp_fold
self._seeds_fold = "%s/seeds" % self._temp_fold
self._mod_fold = "%s/mod" % self._temp_fold
self._fisher_ref_fold = "%s/fisher_scores_ref" % self._temp_fold
self._fisher_sel_fold = "%s/fisher_scores_sel" % self._temp_fold
def _pfam_scan(self):
"""Scan the sequences against the Pfam database."""
if self.verbose:
print("Scanning RBP sequences against Pfam...")
sys.stdout.flush()
if self.fasta_ref != self.fasta_sel:
self.pfam_scan_ref = "%s/pfam_scan_ref.txt" % self._temp_fold
self.pfam_scan_sel = "%s/pfam_scan_sel.txt" % self._temp_fold
nf_ref = open(self.pfam_scan_ref, "w")
nf_ref.write(pfam_utils.search_header())
nf_sel = open(self.pfam_scan_sel, "w")
nf_sel.write(pfam_utils.search_header())
fasta_ref = fasta_utils.import_fasta(self.fasta_ref)
fasta_sel = fasta_utils.import_fasta(self.fasta_sel)
to_scan = sorted(set(fasta_ref.keys() + fasta_sel.keys()))
for rbp in to_scan:
if self.verbose:
print(rbp, end=', ')
sys.stdout.flush()
if rbp in fasta_ref.keys() and rbp in fasta_sel.keys():
try:
assert fasta_ref[rbp] == fasta_sel[rbp]
except AssertionError:
print('%s: sequence mismatch between ref and sel')
seq = fasta_ref[rbp]
text = pfam_utils.sequence_search(rbp, seq)
nf_ref.write(text)
nf_sel.write(text)
elif rbp in fasta_ref.keys():
seq = fasta_ref[rbp]
text = pfam_utils.sequence_search(rbp, seq)
nf_ref.write(text)
else:
seq = fasta_sel[rbp]
text = pfam_utils.sequence_search(rbp, seq)
nf_sel.write(text)
nf_ref.close()
nf_sel.close()
else:
pfam_scan_file = "%s/pfam_scan.txt" % self._temp_fold
self.pfam_scan_ref = pfam_scan_file
self.pfam_scan_sel = pfam_scan_file
nf = open(pfam_scan_file, "w")
nf.write(pfam_utils.search_header())
fasta = fasta_utils.import_fasta(self.fasta_ref)
for rbp in sorted(fasta.keys()):
if self.verbose:
print(rbp, end=', ')
sys.stdout.flush()
seq = fasta[rbp]
text = pfam_utils.sequence_search(rbp, seq)
nf.write(text)
nf.close()
if self.verbose:
print("Done.\n")
sys.stdout.flush()
def _overlapping_domains(self):
"""Compute the set of domains contributing to the similarity."""
if self.verbose:
print("Determining domain list...", end=' ')
sys.stdout.flush()
if self.pfam_scan_ref != self.pfam_scan_sel:
data_ref = pfam_utils.read_pfam_output(self.pfam_scan_ref)
doms_ref = set(a.split('.')[0] for a in data_ref["hmm_acc"])
data_sel = pfam_utils.read_pfam_output(self.pfam_scan_sel)
doms_sel = set(a.split('.')[0] for a in data_sel["hmm_acc"])
dom_list = sorted(list(doms_ref & doms_sel))
else:
data = pfam_utils.read_pfam_output(self.pfam_scan_ref)
data["hmm_acc"] = [a.split('.')[0] for a in data["hmm_acc"]]
rbp_dom = data[["seq_id", "hmm_acc"]].drop_duplicates()
group = rbp_dom.groupby("hmm_acc").count()
doms = (group[group > 1]).dropna().index
dom_list = sorted(list(doms))
if self.verbose:
print("Done.\n")
sys.stdout.flush()
return dom_list
def _prepare_domains(self, dom_list):
"""Select domain subsequences from the entire protein sequences."""
def prepare_domains(fasta_dic, dom_list, pfam_scan, out_folder):
out_file_dic = {}
for acc in dom_list:
out_file_dic[acc] = open("%s/%s.fa" % (out_folder, acc), "w")
f = open(pfam_scan)
f.readline()
for line in f:
split = line.split()
rbp = split[0]
start = int(split[3])
stop = int(split[4])
acc = split[5].split('.')[0]
if acc in out_file_dic.keys():
out_file_dic[acc].write(
">%s:%i-%i\n%s\n" % (rbp, start, stop,
fasta_dic[rbp][start:stop]))
f.close()
for acc in dom_list:
out_file_dic[acc].close()
if self.verbose:
print("Preparing fasta files with domain sequences...", end=' ')
sys.stdout.flush()
mkdir(self._dom_ref_fold)
fasta_ref = fasta_utils.import_fasta(self.fasta_ref)
prepare_domains(fasta_ref, dom_list, self.pfam_scan_ref,
self._dom_ref_fold)
mkdir(self._dom_sel_fold)
fasta_sel = fasta_utils.import_fasta(self.fasta_sel)
prepare_domains(fasta_sel, dom_list, self.pfam_scan_sel,
self._dom_sel_fold)
if self.verbose:
print("Done.\n")
sys.stdout.flush()
def _download_seeds(self, dom_list):
"""Download seed sequences for the needed domains."""
if self.verbose:
print("Downloading domain seeds from http://pfam.xfam.org/...",
end=' ')
sys.stdout.flush()
mkdir(self._seeds_fold)
for acc in dom_list:
seed = pfam_utils.download_seed_seqs(acc)
if seed is not None:
nf = open("%s/%s.fa" % (self._seeds_fold, acc), "w")
nf.write(seed)
nf.close()
if self.verbose:
print("Done.\n")
sys.stdout.flush()
def _build_models(self, dom_list):
"""Wrapper for SAM 3.5 buildmodel."""
if self.verbose:
print("Building HMM models...")
sys.stdout.flush()
mkdir(self._mod_fold)
for acc in dom_list:
cmd = "buildmodel %s/%s -train %s/%s.fa -randseed 0" % (
self._mod_fold, acc, self._seeds_fold, acc)
sp.check_call(cmd, shell=True)
if self.verbose:
print("Done.\n")
sys.stdout.flush()
def _compute_fisher_scores(self, dom_list):
"""Wrapper for SAM 3.5 get_fisher_scores."""
def get_fisher_scores(dom_list, mod_fold, dom_fold, fisher_fold):
for acc in dom_list:
cmd = "get_fisher_scores run -i %s/%s.mod -db %s/%s.fa" % (
mod_fold, acc, dom_fold, acc)
fisher = sp.check_output(cmd, shell=True)
nf = open("%s/%s.txt" % (fisher_fold, acc), "w")
nf.write(fisher)
nf.close()
if self.verbose:
print("Computing Fisher scores...")
sys.stdout.flush()
mkdir(self._fisher_ref_fold)
get_fisher_scores(dom_list, self._mod_fold, self._dom_ref_fold,
self._fisher_ref_fold)
mkdir(self._fisher_sel_fold)
get_fisher_scores(dom_list, self._mod_fold, self._dom_sel_fold,
self._fisher_sel_fold)
if self.verbose:
print("Done.\n")
sys.stdout.flush()
def _ekm(self, dom_list):
"""Compute the empirical kernel map from the Fisher scores."""
def process_seg(e):
"""Process segment of a SAM 3.5 get_fisher_scores output file."""
seg = e.split()
c = seg[0].split(':')[0]
m = map(float, seg[3:])
return c, m
def read_sam_file(samfile):
"""Read a SAM 3.5 get_fisher_scores output file."""
print(samfile) # debug
f = open(samfile)
data = f.read()
f.close()
columns = []
m = []
split = re.split(">A ", data)[1:]
for e in split:
c, m_ = process_seg(e)
columns.append(c)
m.append(m_)
m = np.matrix(m)
print(m.shape) # debug
print(columns) # debug
df = pd.DataFrame(data=m.T, columns=columns)
return df
def dom_features(fisher_fold, dom_list, names=None):
"""Compute the features with respect to a domain type."""
dfs = []
for acc in dom_list:
df = read_sam_file("%s/%s.txt" % (fisher_fold, acc))
df = df.groupby(df.columns, axis=1).mean()
dfs.append(df)
con = pd.concat(dfs, ignore_index=True)
if names is not None:
add = sorted(list(set(names) - set(con.columns)))
fil = sorted(list(set(names) - set(add)))
con = con[fil]
for c in add:
con[c] = np.zeros(len(con.index), dtype='float64')
con = con[names]
con = con.fillna(0.0)
return con
if self.verbose:
print("Computing Fisher scores...", end=' ')
sys.stdout.flush()
ref_names = fasta_utils.seq_names(self.fasta_ref)
ref = dom_features(self._fisher_ref_fold, dom_list, names=ref_names)
ekm_ref = ref.T.dot(ref)
ekm_ref.index = ekm_ref.columns
if self.include_all_sel:
sel_names = fasta_utils.seq_names(self.fasta_sel)
sel = dom_features(self._fisher_sel_fold, dom_list,
names=sel_names)
else:
sel = dom_features(self._fisher_sel_fold, dom_list)
ekm_sel = sel.T.dot(sel)
ekm_sel.index = ekm_sel.columns
ekm = ref.T.dot(sel)
for rs in ekm.columns:
for rr in ekm.index:
if ekm_ref[rr][rr] > 0 and ekm_sel[rs][rs] > 0:
ekm[rs][rr] /= np.sqrt(ekm_ref[rr][rr] * ekm_sel[rs][rs])
if self.include_all_sel:
# needed if a protein that is in ref and sel has no
# domains from dom_list
for rs in ekm.columns:
for rr in ekm.index:
if rr == rs:
ekm[rs][rr] = 1.0
store = pd.io.pytables.HDFStore(self.output)
store["features"] = ekm
store.close()
if self.verbose:
print("Done.\n")
print("RBP features saved to %s" % self.output)
sys.stdout.flush()
def vectorize(self):
"""Produce the RBP features."""
# create a temporary folder
mkdir(self._temp_fold)
# scan the RBP sequences against Pfam
self._pfam_scan()
# determine the accession numbers of the pfam domains needed for
# computing the features
dom_list = self._overlapping_domains()
# prepare fasta file with the sequence of the domains
self._prepare_domains(dom_list)
# download the alignment of the seeds from pfam and convert it to fasta
self._download_seeds(dom_list)
# compile the models using SAM 3.5
self._build_models(dom_list)
# compute fisher scores using SAM 3.5
self._compute_fisher_scores(dom_list)
# compute the empirical kernel map
self._ekm(dom_list)
# remove the temporary folder
rmtree(self._temp_fold)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('fasta_ref', metavar='fasta_ref', type=str,
help="""Fasta file containing the reference \
RBP sequences.""")
parser.add_argument('fasta_sel', metavar='fasta_sel', type=str,
help="""Fasta file containing the selected RBP \
sequences.""")
parser.add_argument('output', metavar='output', type=str,
help="""File name of the HDF Store to save the RBP \
features.""")
parser.add_argument('--all-sel', dest='all_sel', action='store_true',
default=False,
help="""Return one vector for each selected RBP (even \
if the similarity is null with all the reference \
RBPs).""")
parser.add_argument('--quiet', dest='quiet', action='store_true',
default=False,
help="""Do not print information at STDOUT.""")
args = parser.parse_args()
v = RBPVectorizer(fasta_ref=args.fasta_ref,
fasta_sel=args.fasta_sel,
include_all_sel=args.all_sel,
output=args.output,
verbose=(not args.quiet))
v.vectorize()
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import glob
import os
import os.path
import time
import boto
from config import *
import amis
import labs
import vpc
def _create_tags(conn, instance, current, lab_tag, user, amibuild, amikey, script):
"""Create instance tags"""
print "Creating instance tags for: {0}-{1}...".format(user, instance['NAME'])
tags = {'Name': '{0}-{1}'.format(user, instance['NAME']),
'Lab': '{0}'.format(lab_tag),
'Trainer': '{0}'.format(TRAINER),
'User': '{0}'.format(user),
'AMI-Build': '{0}'.format(amibuild),
'AMI-Key': '{0}'.format(amikey),
'Script': '{0}'.format(script)}
conn.create_tags(current.id, tags)
def _check_name_tag(conn, user_vpc, instance):
"""Check for existing name tag"""
instances = get_vpc_instances(conn, user_vpc)
for i in instances:
if i.tags['Name'] == TRAINER + '-' + instance['NAME']:
print "An instance with the name tag '{0}' already exists ...".format(instance['NAME'])
instance['NAME'] = raw_input("Enter a different name: ")
_check_name_tag(conn, user_vpc, instance)
return instance
def _create_elastic_ips(conn, instance, current, user):
"""Create AWS elastic IP"""
if instance['ELASTIC_IP']:
print "Allocating elastic ip for: {0} ...".format(instance['NAME'])
elastic_ip = conn.allocate_address(domain='vpc')
conn.associate_address(instance_id=current.id,
allocation_id=elastic_ip.allocation_id)
else:
print "Elastic ip not specified for: {0}-{1}. Skipping ...".format(user, instance['NAME'])
# TODO: leaving just in case we want to implement later
# dns
#def _create_dns(instance):
#r_conn = boto.route53.connect_to_region(config['AWS_REGION'])
#zone = r_conn.get_zone(config['HOSTED-ZONE'])
## create zone if it doesn't exist
#if zone:
#print "Hosted zone {0} exists ...".format(config['HOSTED-ZONE'])
#else:
#print "Hosted zone '{0}' not found. Creating it ...".format(config['HOSTED-ZONE'])
#zone = r_conn.create_zone(config['HOSTED-ZONE'])
#if zone.find_records(instance['NAME'], 'CNAME'):
#print "DNS entrry {0} exists ... deleting it".format(instance['NAME'])
#zone.delete_cname(instance['NAME'])
#print "Creating DNS entry for: {0} ...".format(instance['NAME'])
#zone.add_cname(instance['NAME'], current.public_dns_name, ttl=config['DEFAULT-TTL'])
def configure_devices(instance):
"""Configure block device map and device info text for USER_DATA"""
# block devices
dinfo = ""
bdm = boto.ec2.blockdevicemapping.BlockDeviceMapping()
for device in instance['device']:
block_device = boto.ec2.blockdevicemapping.EBSBlockDeviceType()
block_device.size = device['SIZE']
block_device.delete_on_termination = device['DELETE_ON_TERMINATE']
bdm[device['DEVICE']] = block_device
# device info
if device['MOUNT'] != "/":
f = device['FILESYSTEM']
m = device['MOUNT']
d = device['DEVICE'].replace('s', 'xv')
dinfo = "# device info"
dinfo += '\nmkfs -t {0} {1}'.format(f, d)
dinfo += '\nmkdir {0}'.format(m)
dinfo += '\nmount {0} {1}'.format(d, m)
dinfo += '\necho "{0} {1} {2} defaults 0 2" >> /etc/fstab'.format(d, m, f)
return bdm, dinfo
def output_user_files(conn, user_vpc, lab_tag):
"""Write user lab/instance info file"""
with open(USER_FILE) as users:
for user in users:
user = user.split(',')[0].strip()
# directory to store instance info on host
if not os.path.exists('/host/{0}/users/{1}'.format(VPC, user)):
os.makedirs('/host/{0}/users/{1}'.format(VPC, user))
info = labs.get_user_instance_info(conn, user_vpc, lab_tag, user)
with open('/host/{0}/users/{1}/{2}.txt'.format(VPC, user, lab_tag), 'w') as f:
f.write('AWS Instances:\n')
for i in info:
f.write(i)
def get_vpc_instances(conn, vpc):
"""Get all vpc instances"""
instances = []
reservations = conn.get_all_reservations(filters = {'vpc-id': vpc.id})
for r in reservations:
instances.append(r.instances[0])
return instances
def terminate_all_instances(conn, user_vpc):
instance_ids = []
instances = get_vpc_instances(conn, user_vpc)
# get all ids
for instance in instances:
instance_ids.append(instance.id)
if instance_ids:
conn.terminate_instances(instance_ids=instance_ids)
print "\nTerminate request sent for all instances ...\n"
# remove all local user lab/instance details text files
with open('/host/{0}/key-pairs.txt'.format(VPC)) as users:
for user in users:
for f in glob.glob('/host/{0}/users/{1}/*.txt'.format(VPC, user.split('-')[0])):
os.remove(f)
return instances
else:
print "\nVPC has no instances ..."
def confirm_terminated(instances):
"""Check instance.state of instances to confirm termination"""
for instance in instances:
while instance.state != 'terminated':
print " Waiting for all instances to terminate ..."
time.sleep(5)
instance.update()
def launch_instances(conn, user_vpc, script, lab,
labmod, cfg, security_groups, subnets):
"""Launch lab instances for each user"""
instances = []
lab_tag = labs.calculate_lab_tag(conn, user_vpc, lab)
print "Launching '{0}' lab with tag: {1}".format(lab, lab_tag)
# debug log directory
if not os.path.exists('train/logs'):
os.makedirs('train/logs')
zone_count = vpc.get_starting_zone(subnets)
zone_max = zone_count + len(subnets)
with open(USER_FILE) as users:
for user in users:
user = user.split(',')[0].strip()
amikey = 0
for instance in cfg['instance']:
amibuild = True
for count in range(instance['COUNT']):
current = instance.copy()
if 'NAME' in instance:
if current['COUNT'] > 1:
current['NAME'] = instance['NAME'] + '-' + str(count)
else:
current['NAME'] = instance['NAME']
if 'NAMES' in instance:
current['NAME'] = instance['NAMES'][count]
# autorotate zones
if instance['COUNT'] > 1:
current['ZONE'] = zone_count
zone_count += 1
if zone_count == zone_max:
zone_count = vpc.get_starting_zone(subnets)
# check for unique 'Name' tag
# Removed this check for speed
# TODO: Handle somehow - prompt or autoset a name
#_check_name_tag(conn, user_vpc, current)
# security group ids
sids = vpc.get_sg_ids(cfg, current, security_groups, VPC_TAG)
# device info
bdm, dinfo = configure_devices(current)
# network interface
interface = vpc.create_interface(vpc.get_subnet_id(current, subnets), sids)
# ami id
ami_id = AMIS[getattr(labmod, current['AMI_KEY'])]
# custom ami available?
images = conn.get_all_images(owners = ['self'])
name_tag = TRAINER + '-{0}-'.format(VPC) + \
'{0}-'.format(lab) + \
'{0}-'.format(script) + \
'{0}'.format(amikey)
for image in images:
if 'Lab' in image.tags:
if image.tags['Name'] == name_tag:
current['SCRIPT'] = 'AMIBUILD'
ami_id = image.id
# user data script
udata = getattr(labmod, current['SCRIPT'])
# save the 'user data' script for reference
# useful for lab creation/debug
with open('train/logs/{0}.sh'.format(current['NAME']), 'w') as file:
file.write(udata.format(fqdn=current['NAME'], dinfo=dinfo))
# launch instance
print "Launching instance: {0}-{1} ...".format(user, current['NAME'])
reservation = conn.run_instances(image_id=ami_id,
key_name=user + '-{0}'.format(VPC),
user_data=udata.format(fqdn=current['NAME'],
dinfo=dinfo),
instance_type=current['INSTANCE_TYPE'],
network_interfaces=interface,
block_device_map = bdm,
instance_profile_name=VPC_TAG)
# get instance object
current_res = reservation.instances[0]
# save instance/current
instances.append([current, current_res, user, amibuild, amikey, script])
amibuild = False
amikey += 1
# wait for all instances to finish booting
print "Waiting for instances to initialize ..."
time.sleep(20)
for instance in instances:
while instance[1].state != 'running':
print "Waiting for instance '{0}-{1}' to initialize ...".format(instance[2],
instance[0]['NAME'])
time.sleep(0.5)
instance[1].update()
# set elastic ips and tag instances
for instance in instances:
# disable elastic_ips (for now)
#_create_elastic_ips(conn, instance[0], instance[1], instance[2])
_create_tags(conn, instance[0], instance[1], lab_tag,
instance[2], instance[3], instance[4], instance[5])
final = labs.get_lab_instance_info(conn, user_vpc, lab_tag)
output_user_files(conn, user_vpc, lab_tag)
print "\nLab '{0}' launched with tag '{1}':".format(lab, lab_tag)
print "\n Instances:"
for instance in final:
print instance
print ''
| |
"""Compatibility layer for different database engines
This modules stores logic specific to different database engines. Things
like time-related functions that are similar but not identical, or
information as to expose certain features or not and how to expose them.
For instance, Hive/Presto supports partitions and have a specific API to
list partitions. Other databases like Vertica also support partitions but
have different API to get to them. Other databases don't support partitions
at all. The classes here will use a common interface to specify all this.
The general idea is to use static classes and an inheritance scheme.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
import inspect
import textwrap
import time
from flask_babel import lazy_gettext as _
Grain = namedtuple('Grain', 'name label function')
class LimitMethod(object):
"""Enum the ways that limits can be applied"""
FETCH_MANY = 'fetch_many'
WRAP_SQL = 'wrap_sql'
class BaseEngineSpec(object):
engine = 'base' # str as defined in sqlalchemy.engine.engine
time_grains = tuple()
limit_method = LimitMethod.FETCH_MANY
@classmethod
def epoch_to_dttm(cls):
raise NotImplementedError()
@classmethod
def epoch_ms_to_dttm(cls):
return cls.epoch_to_dttm().replace('{col}', '({col}/1000.0)')
@classmethod
def extra_table_metadata(cls, database, table_name, schema_name):
"""Returns engine-specific table metadata"""
return {}
@classmethod
def convert_dttm(cls, target_type, dttm):
return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S'))
@classmethod
def handle_cursor(cls, cursor, query, session):
"""Handle a live cursor between the execute and fetchall calls
The flow works without this method doing anything, but it allows
for handling the cursor and updating progress information in the
query object"""
pass
class PostgresEngineSpec(BaseEngineSpec):
engine = 'postgresql'
time_grains = (
Grain("Time Column", _('Time Column'), "{col}"),
Grain("second", _('second'), "DATE_TRUNC('second', {col})"),
Grain("minute", _('minute'), "DATE_TRUNC('minute', {col})"),
Grain("hour", _('hour'), "DATE_TRUNC('hour', {col})"),
Grain("day", _('day'), "DATE_TRUNC('day', {col})"),
Grain("week", _('week'), "DATE_TRUNC('week', {col})"),
Grain("month", _('month'), "DATE_TRUNC('month', {col})"),
Grain("quarter", _('quarter'), "DATE_TRUNC('quarter', {col})"),
Grain("year", _('year'), "DATE_TRUNC('year', {col})"),
)
@classmethod
def epoch_to_dttm(cls):
return "(timestamp 'epoch' + {col} * interval '1 second')"
@classmethod
def convert_dttm(cls, target_type, dttm):
return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S'))
class SqliteEngineSpec(BaseEngineSpec):
engine = 'sqlite'
time_grains = (
Grain('Time Column', _('Time Column'), '{col}'),
Grain('day', _('day'), 'DATE({col})'),
Grain("week", _('week'),
"DATE({col}, -strftime('%w', {col}) || ' days')"),
Grain("month", _('month'),
"DATE({col}, -strftime('%d', {col}) || ' days')"),
)
@classmethod
def epoch_to_dttm(cls):
return "datetime({col}, 'unixepoch')"
@classmethod
def convert_dttm(cls, target_type, dttm):
iso = dttm.isoformat().replace('T', ' ')
if '.' not in iso:
iso += '.000000'
return "'{}'".format(iso)
class MySQLEngineSpec(BaseEngineSpec):
engine = 'mysql'
time_grains = (
Grain('Time Column', _('Time Column'), '{col}'),
Grain("second", _('second'), "DATE_ADD(DATE({col}), "
"INTERVAL (HOUR({col})*60*60 + MINUTE({col})*60"
" + SECOND({col})) SECOND)"),
Grain("minute", _('minute'), "DATE_ADD(DATE({col}), "
"INTERVAL (HOUR({col})*60 + MINUTE({col})) MINUTE)"),
Grain("hour", _('hour'), "DATE_ADD(DATE({col}), "
"INTERVAL HOUR({col}) HOUR)"),
Grain('day', _('day'), 'DATE({col})'),
Grain("week", _('week'), "DATE(DATE_SUB({col}, "
"INTERVAL DAYOFWEEK({col}) - 1 DAY))"),
Grain("month", _('month'), "DATE(DATE_SUB({col}, "
"INTERVAL DAYOFMONTH({col}) - 1 DAY))"),
Grain("quarter", _('quarter'), "MAKEDATE(YEAR({col}), 1) "
"+ INTERVAL QUARTER({col}) QUARTER - INTERVAL 1 QUARTER"),
Grain("year", _('year'), "DATE(DATE_SUB({col}, "
"INTERVAL DAYOFYEAR({col}) - 1 DAY))"),
)
@classmethod
def convert_dttm(cls, target_type, dttm):
if target_type.upper() in ('DATETIME', 'DATE'):
return "STR_TO_DATE('{}', '%Y-%m-%d %H:%i:%s')".format(
dttm.strftime('%Y-%m-%d %H:%M:%S'))
return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S'))
@classmethod
def epoch_to_dttm(cls):
return "from_unixtime({col})"
class PrestoEngineSpec(BaseEngineSpec):
engine = 'presto'
time_grains = (
Grain('Time Column', _('Time Column'), '{col}'),
Grain('second', _('second'),
"date_trunc('second', CAST({col} AS TIMESTAMP))"),
Grain('minute', _('minute'),
"date_trunc('minute', CAST({col} AS TIMESTAMP))"),
Grain('hour', _('hour'),
"date_trunc('hour', CAST({col} AS TIMESTAMP))"),
Grain('day', _('day'),
"date_trunc('day', CAST({col} AS TIMESTAMP))"),
Grain('week', _('week'),
"date_trunc('week', CAST({col} AS TIMESTAMP))"),
Grain('month', _('month'),
"date_trunc('month', CAST({col} AS TIMESTAMP))"),
Grain('quarter', _('quarter'),
"date_trunc('quarter', CAST({col} AS TIMESTAMP))"),
Grain("week_ending_saturday", _('week_ending_saturday'),
"date_add('day', 5, date_trunc('week', date_add('day', 1, "
"CAST({col} AS TIMESTAMP))))"),
Grain("week_start_sunday", _('week_start_sunday'),
"date_add('day', -1, date_trunc('week', "
"date_add('day', 1, CAST({col} AS TIMESTAMP))))"),
)
@classmethod
def convert_dttm(cls, target_type, dttm):
if target_type.upper() in ('DATE', 'DATETIME'):
return "from_iso8601_date('{}')".format(dttm.isoformat())
return "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S'))
@classmethod
def epoch_to_dttm(cls):
return "from_unixtime({col})"
@staticmethod
def show_partition_pql(
table_name, schema_name=None, order_by=None, limit=100):
if schema_name:
table_name = schema_name + '.' + table_name
order_by = order_by or []
order_by_clause = ''
if order_by:
order_by_clause = "ORDER BY " + ', '.join(order_by) + " DESC"
limit_clause = ''
if limit:
limit_clause = "LIMIT {}".format(limit)
return textwrap.dedent("""\
SHOW PARTITIONS
FROM {table_name}
{order_by_clause}
{limit_clause}
""").format(**locals())
@classmethod
def extra_table_metadata(cls, database, table_name, schema_name):
indexes = database.get_indexes(table_name, schema_name)
if not indexes:
return {}
cols = indexes[0].get('column_names', [])
pql = cls.show_partition_pql(table_name, schema_name, cols)
df = database.get_df(pql, schema_name)
latest_part = df.to_dict(orient='records')[0] if not df.empty else None
partition_query = cls.show_partition_pql(table_name, schema_name, cols)
return {
'partitions': {
'cols': cols,
'latest': latest_part,
'partitionQuery': partition_query,
}
}
@classmethod
def handle_cursor(cls, cursor, query, session):
"""Updates progress information"""
polled = cursor.poll()
# poll returns dict -- JSON status information or ``None``
# if the query is done
# https://github.com/dropbox/PyHive/blob/
# b34bdbf51378b3979eaf5eca9e956f06ddc36ca0/pyhive/presto.py#L178
while polled:
# Update the object and wait for the kill signal.
stats = polled.get('stats', {})
if stats:
completed_splits = float(stats.get('completedSplits'))
total_splits = float(stats.get('totalSplits'))
if total_splits and completed_splits:
progress = 100 * (completed_splits / total_splits)
if progress > query.progress:
query.progress = progress
session.commit()
time.sleep(1)
polled = cursor.poll()
class MssqlEngineSpec(BaseEngineSpec):
engine = 'mssql'
epoch_to_dttm = "dateadd(S, {col}, '1970-01-01')"
time_grains = (
Grain("Time Column", _('Time Column'), "{col}"),
Grain("second", _('second'), "DATEADD(second, "
"DATEDIFF(second, '2000-01-01', {col}), '2000-01-01')"),
Grain("minute", _('minute'), "DATEADD(minute, "
"DATEDIFF(minute, 0, {col}), 0)"),
Grain("5 minute", _('5 minute'), "DATEADD(minute, "
"DATEDIFF(minute, 0, {col}) / 5 * 5, 0)"),
Grain("half hour", _('half hour'), "DATEADD(minute, "
"DATEDIFF(minute, 0, {col}) / 30 * 30, 0)"),
Grain("hour", _('hour'), "DATEADD(hour, "
"DATEDIFF(hour, 0, {col}), 0)"),
Grain("day", _('day'), "DATEADD(day, "
"DATEDIFF(day, 0, {col}), 0)"),
Grain("week", _('week'), "DATEADD(week, "
"DATEDIFF(week, 0, {col}), 0)"),
Grain("month", _('month'), "DATEADD(month, "
"DATEDIFF(month, 0, {col}), 0)"),
Grain("quarter", _('quarter'), "DATEADD(quarter, "
"DATEDIFF(quarter, 0, {col}), 0)"),
Grain("year", _('year'), "DATEADD(year, "
"DATEDIFF(year, 0, {col}), 0)"),
)
@classmethod
def convert_dttm(cls, target_type, dttm):
return "CONVERT(DATETIME, '{}', 126)".format(dttm.isoformat())
class RedshiftEngineSpec(PostgresEngineSpec):
engine = 'redshift'
class OracleEngineSpec(PostgresEngineSpec):
engine = 'oracle'
@classmethod
def convert_dttm(cls, target_type, dttm):
return (
"""TO_TIMESTAMP('{}', 'YYYY-MM-DD"T"HH24:MI:SS.ff6')"""
).format(dttm.isoformat())
class VerticaEngineSpec(PostgresEngineSpec):
engine = 'vertica'
engines = {
o.engine: o for o in globals().values()
if inspect.isclass(o) and issubclass(o, BaseEngineSpec)}
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
# from glob import glob
from setuptools import setup
# import DistUtilsExtra.command.build_extra
# import DistUtilsExtra.command.build_i18n
# import DistUtilsExtra.command.clean_i18n
# to update i18n .mo files (and merge .pot file into .po files) run on Linux:
# ,,python setup.py build_i18n -m''
# silence pyflakes, __VERSION__ is properly assigned below...
__VERSION__ = '3.0'
# for line in file('networkmgr').readlines():
# if (line.startswith('__VERSION__')):
# exec(line.strip())
PROGRAM_VERSION = __VERSION__
def datafilelist(installbase, sourcebase):
datafileList = []
for root, subFolders, files in os.walk(sourcebase):
fileList = []
for f in files:
fileList.append(os.path.join(root, f))
datafileList.append((root.replace(sourcebase, installbase), fileList))
return datafileList
# '{prefix}/share/man/man1'.format(prefix=sys.prefix), glob('data/*.1')),
data_files = [
('{prefix}/share/applications'.format(prefix=sys.prefix), ['src/gbi.desktop']),
('{prefix}/lib/gbi'.format(prefix=sys.prefix), ['src/install.png']),
('{prefix}/lib/gbi'.format(prefix=sys.prefix), ['src/logo.png']),
('{prefix}/lib/gbi'.format(prefix=sys.prefix), ['src/create_cfg.py']),
('{prefix}/lib/gbi'.format(prefix=sys.prefix), ['src/end.py']),
('{prefix}/lib/gbi'.format(prefix=sys.prefix), ['src/error.py']),
('{prefix}/lib/gbi'.format(prefix=sys.prefix), ['src/gbiWindow.py']),
('{prefix}/lib/gbi'.format(prefix=sys.prefix), ['src/ghostbsd-style.css']),
('{prefix}/lib/gbi'.format(prefix=sys.prefix), ['src/desktopbsd-style.css']),
('{prefix}/lib/gbi'.format(prefix=sys.prefix), ['src/install.py']),
('{prefix}/lib/gbi'.format(prefix=sys.prefix), ['src/installType.py']),
('{prefix}/lib/gbi'.format(prefix=sys.prefix), ['src/keyboard.py']),
('{prefix}/lib/gbi'.format(prefix=sys.prefix), ['src/language.py']),
('{prefix}/lib/gbi'.format(prefix=sys.prefix), ['src/partition.py']),
('{prefix}/lib/gbi'.format(prefix=sys.prefix), ['src/partition_handler.py']),
('{prefix}/lib/gbi'.format(prefix=sys.prefix), ['src/root.py']),
('{prefix}/lib/gbi'.format(prefix=sys.prefix), ['src/slides.py']),
('{prefix}/lib/gbi'.format(prefix=sys.prefix), ['src/timezone.py']),
('{prefix}/lib/gbi'.format(prefix=sys.prefix), ['src/use_disk.py']),
('{prefix}/lib/gbi'.format(prefix=sys.prefix), ['src/use_zfs.py']),
('{prefix}/lib/gbi'.format(prefix=sys.prefix), ['src/addUser.py']),
('{prefix}/lib/gbi/backend-query'.format(prefix=sys.prefix), ['src/backend-query/detect-laptop.sh']),
('{prefix}/lib/gbi/backend-query'.format(prefix=sys.prefix), ['src/backend-query/detect-nics.sh']),
('{prefix}/lib/gbi/backend-query'.format(prefix=sys.prefix), ['src/backend-query/detect-sheme.sh']),
('{prefix}/lib/gbi/backend-query'.format(prefix=sys.prefix), ['src/backend-query/detect-vmware.sh']),
('{prefix}/lib/gbi/backend-query'.format(prefix=sys.prefix), ['src/backend-query/detect-wifi.sh']),
('{prefix}/lib/gbi/backend-query'.format(prefix=sys.prefix), ['src/backend-query/disk-info.sh']),
('{prefix}/lib/gbi/backend-query'.format(prefix=sys.prefix), ['src/backend-query/disk-label.sh']),
('{prefix}/lib/gbi/backend-query'.format(prefix=sys.prefix), ['src/backend-query/disk-list.sh']),
('{prefix}/lib/gbi/backend-query'.format(prefix=sys.prefix), ['src/backend-query/disk-part.sh']),
('{prefix}/lib/gbi/backend-query'.format(prefix=sys.prefix), ['src/backend-query/enable-net.sh']),
('{prefix}/lib/gbi/backend-query'.format(prefix=sys.prefix), ['src/backend-query/list-components.sh']),
('{prefix}/lib/gbi/backend-query'.format(prefix=sys.prefix), ['src/backend-query/list-rsync-backups.sh']),
('{prefix}/lib/gbi/backend-query'.format(prefix=sys.prefix), ['src/backend-query/list-tzones.sh']),
('{prefix}/lib/gbi/backend-query'.format(prefix=sys.prefix), ['src/backend-query/query-langs.sh']),
('{prefix}/lib/gbi/backend-query'.format(prefix=sys.prefix), ['src/backend-query/send-logs.sh']),
('{prefix}/lib/gbi/backend-query'.format(prefix=sys.prefix), ['src/backend-query/setup-ssh-keys.sh']),
('{prefix}/lib/gbi/backend-query'.format(prefix=sys.prefix), ['src/backend-query/sys-mem.sh']),
('{prefix}/lib/gbi/backend-query'.format(prefix=sys.prefix), ['src/backend-query/test-live.sh']),
('{prefix}/lib/gbi/backend-query'.format(prefix=sys.prefix), ['src/backend-query/test-netup.sh']),
('{prefix}/lib/gbi/backend-query'.format(prefix=sys.prefix), ['src/backend-query/update-part-list.sh']),
('{prefix}/lib/gbi/backend-query'.format(prefix=sys.prefix), ['src/backend-query/xkeyboard-layouts.sh']),
('{prefix}/lib/gbi/backend-query'.format(prefix=sys.prefix), ['src/backend-query/xkeyboard-models.sh']),
('{prefix}/lib/gbi/backend-query'.format(prefix=sys.prefix), ['src/backend-query/xkeyboard-variants.sh']),
('{prefix}/lib/gbi/keyboard'.format(prefix=sys.prefix), ['src/keyboard/layout']),
('{prefix}/lib/gbi/keyboard'.format(prefix=sys.prefix), ['src/keyboard/model']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/af']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/am']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/ara']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/at']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/az']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/ba']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/bd']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/be']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/bg']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/br']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/brai']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/by']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/ca']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/ch']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/cn']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/cz']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/de']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/dk']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/ee']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/epo']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/es']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/fi']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/fo']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/fr']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/gb']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/ge']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/gh']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/gr']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/hr']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/hu']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/ie']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/il']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/in']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/iq']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/ir']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/is']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/it']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/jp']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/ke']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/kg']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/kz']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/latam']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/lk']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/lt']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/lv']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/ma']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/me']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/mk']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/ml']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/mt']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/ng']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/nl']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/no']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/ph']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/pk']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/pl']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/pt']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/ro']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/rs']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/ru']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/se']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/si']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/sk']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/sy']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/th']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/tj']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/tm']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/tr']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/ua']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/us']),
('{prefix}/lib/gbi/keyboard/variant'.format(prefix=sys.prefix), ['src/keyboard/variant/uz']),
('{prefix}/lib/gbi/slide-images/desktopbsd'.format(prefix=sys.prefix), ['src/slide-images/desktopbsd/D-logo.png']),
('{prefix}/lib/gbi/slide-images/desktopbsd'.format(prefix=sys.prefix), ['src/slide-images/desktopbsd/browser.png']),
('{prefix}/lib/gbi/slide-images/desktopbsd'.format(prefix=sys.prefix), ['src/slide-images/desktopbsd/customize.png']),
('{prefix}/lib/gbi/slide-images/desktopbsd'.format(prefix=sys.prefix), ['src/slide-images/desktopbsd/email.png']),
('{prefix}/lib/gbi/slide-images/desktopbsd'.format(prefix=sys.prefix), ['src/slide-images/desktopbsd/help.png']),
('{prefix}/lib/gbi/slide-images/desktopbsd'.format(prefix=sys.prefix), ['src/slide-images/desktopbsd/music.png']),
('{prefix}/lib/gbi/slide-images/desktopbsd'.format(prefix=sys.prefix), ['src/slide-images/desktopbsd/office.png']),
('{prefix}/lib/gbi/slide-images/desktopbsd'.format(prefix=sys.prefix), ['src/slide-images/desktopbsd/photo.png']),
('{prefix}/lib/gbi/slide-images/desktopbsd'.format(prefix=sys.prefix), ['src/slide-images/desktopbsd/social.png']),
('{prefix}/lib/gbi/slide-images/desktopbsd'.format(prefix=sys.prefix), ['src/slide-images/desktopbsd/software.png']),
('{prefix}/lib/gbi/slide-images/desktopbsd'.format(prefix=sys.prefix), ['src/slide-images/desktopbsd/videos.png']),
('{prefix}/lib/gbi/slide-images/desktopbsd'.format(prefix=sys.prefix), ['src/slide-images/desktopbsd/welcome.png']),
('{prefix}/lib/gbi/slide-images/ghostbsd'.format(prefix=sys.prefix), ['src/slide-images/ghostbsd/browser.png']),
('{prefix}/lib/gbi/slide-images/ghostbsd'.format(prefix=sys.prefix), ['src/slide-images/ghostbsd/customize.png']),
('{prefix}/lib/gbi/slide-images/ghostbsd'.format(prefix=sys.prefix), ['src/slide-images/ghostbsd/email.png']),
('{prefix}/lib/gbi/slide-images/ghostbsd'.format(prefix=sys.prefix), ['src/slide-images/ghostbsd/help.png']),
('{prefix}/lib/gbi/slide-images/ghostbsd'.format(prefix=sys.prefix), ['src/slide-images/ghostbsd/G-logo.png']),
('{prefix}/lib/gbi/slide-images/ghostbsd'.format(prefix=sys.prefix), ['src/slide-images/ghostbsd/music.png']),
('{prefix}/lib/gbi/slide-images/ghostbsd'.format(prefix=sys.prefix), ['src/slide-images/ghostbsd/office.png']),
('{prefix}/lib/gbi/slide-images/ghostbsd'.format(prefix=sys.prefix), ['src/slide-images/ghostbsd/photo.png']),
('{prefix}/lib/gbi/slide-images/ghostbsd'.format(prefix=sys.prefix), ['src/slide-images/ghostbsd/social.png']),
('{prefix}/lib/gbi/slide-images/ghostbsd'.format(prefix=sys.prefix), ['src/slide-images/ghostbsd/software.png']),
('{prefix}/lib/gbi/slide-images/ghostbsd'.format(prefix=sys.prefix), ['src/slide-images/ghostbsd/welcome.png']),
('{prefix}/lib/gbi/timezone'.format(prefix=sys.prefix), ['src/timezone/continent']),
('{prefix}/lib/gbi/timezone/city'.format(prefix=sys.prefix), ['src/timezone/city/Africa']),
('{prefix}/lib/gbi/timezone/city'.format(prefix=sys.prefix), ['src/timezone/city/America']),
('{prefix}/lib/gbi/timezone/city'.format(prefix=sys.prefix), ['src/timezone/city/Antarctica']),
('{prefix}/lib/gbi/timezone/city'.format(prefix=sys.prefix), ['src/timezone/city/Arctic']),
('{prefix}/lib/gbi/timezone/city'.format(prefix=sys.prefix), ['src/timezone/city/Asia']),
('{prefix}/lib/gbi/timezone/city'.format(prefix=sys.prefix), ['src/timezone/city/Atlantic']),
('{prefix}/lib/gbi/timezone/city'.format(prefix=sys.prefix), ['src/timezone/city/Australia']),
('{prefix}/lib/gbi/timezone/city'.format(prefix=sys.prefix), ['src/timezone/city/Europe']),
('{prefix}/lib/gbi/timezone/city'.format(prefix=sys.prefix), ['src/timezone/city/Indian']),
('{prefix}/lib/gbi/timezone/city'.format(prefix=sys.prefix), ['src/timezone/city/Pacific']),
]
data_files.extend(datafilelist('{prefix}/share/locale'.format(prefix=sys.prefix), 'build/mo'))
# cmdclass ={
# "build" : DistUtilsExtra.command.build_extra.build_extra,
# "build_i18n" : DistUtilsExtra.command.build_i18n.build_i18n,
# "clean": DistUtilsExtra.command.clean_i18n.clean_i18n,
# }
setup(name="gbi",
version=PROGRAM_VERSION,
description="GBI is the GhostBSD/DesktopBSD front end user interface for pc-sysinstall",
license='BSD',
author='Eric Turgeon',
url='https://github/GhostBSD/gbi/',
package_dir={'': '.'},
data_files=data_files,
# install_requires = [ 'setuptools', ],
scripts=['gbi'],)
# cmdclass = cmdclass,
| |
#!/usr/bin/env python
import threading
import Queue
import uspp.uspp as serial
import message_dispatcher as dispatcher
# ------------------------------------------------------------------------------
class CanException(Exception):
pass
# ------------------------------------------------------------------------------
class Message:
"""Class representation of a CAN message"""
def __init__(self, id, data=[], extended=True, rtr=False, timestamp=None):
self.id = id
self.extended = extended
self.rtr = rtr
self.data = data
self.timestamp = timestamp
def __str__(self):
"""Create a string representation of the message"""
buf = []
if self.timestamp != None:
buf.append("[%6.1f] " % (self.timestamp / 10.0))
if self.extended:
buf.append("id: %08x dlc: %d >" % (self.id, len(self.data)))
else:
buf.append("id: %8x dlc: %d >" % (self.id, len(self.data)))
if self.rtr:
buf.append("rtr")
else:
for data in self.data:
buf.append("%02x" % data)
return " ".join(buf)
# ------------------------------------------------------------------------------
class SerialInterface:
"""Abstract base class for a CAN interface.
Uses two threads, one to receive from and the other to transmit messages
to the serial port.
"""
MESSAGE = 0
RAW = 1
def __init__(self, port = None, baud = 9600, debug = False):
self.port = port
self.baudrate = baud
self.debugFlag = debug
self.isConnected = False
self.__receiverStopEvent = threading.Event()
self.__receiveQueue = Queue.Queue()
self._buf = []
def __del__(self):
self.disconnect()
def send(self, message):
"""Send a message"""
self._interface.write(self._encode(message))
def get(self, block = True, timeout = None):
"""Get the last received message
Wait for a new message if there is no one in the queue and "block"
is set True."""
return self.__receiveQueue.get(block, timeout)
def connect(self, port = None, baud = None, debug = None):
"""Connect to a serial Port"""
# close an existing connection (if there is any)
if self.isConnected:
self.disconnect()
self.port = port if port else self.port
self.baudrate = baud if baud else self.baudrate
self.debugFlag = debug if debug else self.debugFlag
# open serial port
try:
self._interface = serial.SerialPort(self.port, timeout = 200, speed = self.baudrate)
self._interface.flush()
except serial.SerialPortException:
raise CanException("could not connect to %s" % self.port)
self.__receiverStopEvent.clear()
# start the receiver thread
self.__receiverThread = threading.Thread(target = self.__receive)
self.__receiverThread.start()
self.isConnected = True
def disconnect(self):
"""Disconnect from the serial port"""
if not self.isConnected:
return
# send a stop event
self.__receiverStopEvent.set()
# wait for the two threads to stop their work
self.__receiverThread.join()
# close serial port
try:
del self._interface
except serial.SerialPortException, e:
raise CanException(e)
self.isConnected = False
def _debug(self, text):
if self.debugFlag:
print text
def _sendRaw(self, data):
self._interface.write(data)
def _decode(self, chr):
"""Collects and decodes messages"""
pass
def _encode(self, message):
"""Transform a CAN message to a byte stream"""
pass
def __receive(self):
"""Receiver Thread
Try to read and decode messages from the serial port.
"""
while not self.__receiverStopEvent.isSet():
try:
msg = self._decode(self._interface.read())
if msg:
#self.__receiveQueue.put(msg)
self._processMessage(msg)
except serial.SerialPortException:
self.__receiverStopEvent.wait(0.001)
# ------------------------------------------------------------------------------
class Usb2Can(SerialInterface, dispatcher.MessageDispatcher):
"""Interface for all devices compatible with the CAN232 from Lawicel
see http://www.can232.com/can232.pdf for further information
"""
def __init__(self, port = None, baud = 9600, debug = False):
SerialInterface.__init__(self, port, baud, debug)
dispatcher.MessageDispatcher.__init__(self)
def connect(self, port = None, baud = None, debug = None):
SerialInterface.connect(self, port, baud, debug)
# initialize serial interface by sending some returns and
# trying to close the channel
self._sendRaw("\r\r\rC\r")
# set bitrate and open the channel
self._sendRaw("S4\r")
#self._sendRaw("S6\r")
self._sendRaw("O\r")
def _decode(self, chr):
if chr != '\r':
self._buf.append(chr)
return None
else:
data = ''.join(self._buf)
self._buf = []
if not data:
return None
type = data[0]
if type == 'T':
# extended frame
data2 = data[10:]
message_data = []
for x in range(0, len(data2), 2):
message_data.append(int(data2[x:x + 2], 16))
message = Message( int(data[1:9], 16), message_data, extended = True, rtr = False )
elif type == 't':
data2 = data[5:]
message_data = []
for x in range(0, len(data2), 2):
message_data.append(int(data2[x:x + 2], 16))
message = Message( int(data[1:4], 16), message_data, extended = False, rtr = False )
elif type == 'R':
message = Message( int(data[1:9], 16), [None] * int(data[9]), extended = True, rtr = True )
elif type == 'r':
message = Message( int(data[1:4], 16), [None] * int(data[4]), extended = False, rtr = True )
else:
# all other frame-types are not supported
return None
#only dump frames which pass the acceptance filter. This is done in Bootloader._get_message()
#self._debug("> " + str(message))
return message
def _encode(self, message):
buf = []
self._debug("< " + str(message))
if message.rtr:
if message.extended:
buf.append("R%08x%01x" % (message.id, len(message.data)))
else:
buf.append("r%03x%01x" % (message.id, len(message.data)))
else:
if message.extended:
buf.append("T%08x%01x" % (message.id, len(message.data)))
else:
buf.append("t%03x%01x" % (message.id, len(message.data)))
for data in message.data:
buf.append("%02x" % data)
buf.append("\r")
return ''.join(buf)
# ------------------------------------------------------------------------------
class CanDebugger(SerialInterface, dispatcher.MessageDispatcher):
"""Interface to the Command Shell from the CAN Debugger"""
def __init__(self, port, baud = 9600, debug = False):
SerialInterface.__init__(self, port, baud, debug)
dispatcher.MessageDispatcher.__init__(self)
import re
self.regularExpression = re.compile("^[$ ]*(?P<timestamp>\d+):[ ]+(?P<id>\w+)[ ](?P<len>\d)(( rtr)|(( >)?(?P<data>( \w\w)*)))", re.IGNORECASE)
def connect(self, port = None, baud = None, debug = None):
SerialInterface.connect(self, port, baud, debug)
# set filter to receive all messages
self._sendRaw("set filter 0 0 0\r")
self._sendRaw("set filter 1 0 0\r")
self._sendRaw("set filter 2 0 0\r")
self._sendRaw("set filter 3 0 0\r")
def _decode(self, chr):
if chr != '\n':
self._buf.append(chr)
else:
result = self.regularExpression.match(''.join(self._buf))
self._buf = []
if result:
dict = result.groupdict()
data = dict['data']
if data:
msg_data = []
while data:
msg_data.append(int(data[1:3], 16))
data = data[3:]
rtr = False
else:
msg_data = [None] * int(dict['len'])
rtr = True
id = int(dict['id'], 16)
extended = True if len(dict['id']) > 3 else False
timestamp = int(dict['timestamp'], 10)
# create message
message = Message(id, msg_data, extended = extended, rtr = rtr, timestamp = timestamp)
self._debug("> " + str(message))
return message
return None
def _encode(self, message):
buf = ["> "]
self._debug("< " + str(message))
if message.extended:
buf.append("%04x %d" % (message.id, len(message.data)))
else:
buf.append("%x %d" % (message.id, len(message.data)))
if message.rtr:
buf.append(" rtr\r")
else:
buf.append(" ")
for byte in message.data:
buf.append("%02x" % byte)
buf.append("\r")
return ''.join(buf)
# ------------------------------------------------------------------------------
class DebugInterface(SerialInterface, dispatcher.MessageDispatcher):
"""Prints every message without sending it to a serial interface"""
def __init__(self, port = None, baud = 9600):
dispatcher.MessageDispatcher.__init__(self, None)
def connect(self):
pass
def disconnect(self):
pass
def send(self, message):
print message
def sendRaw(self, data):
pass
def get(self, block, timeout):
while 1:
pass
| |
from datetime import datetime, timedelta
import json
import requests
from StringIO import StringIO
class DictObject(dict):
__getattr__ = dict.get
__setattr__ = dict.__setitem__
def __init__(self, obj, nest=False):
super(DictObject, self).__init__(obj)
if nest:
for k, v in obj.items():
if isinstance(v, dict):
self[k] = DictObject(v, nest=True)
class DynamicConfigSource(object):
full_fetch_required = False
def __init__(self):
pass
def get(self, key):
raise Exception("You should be using a subclass of DynamicConfigSource.")
def set(self, key, value):
raise Exception("You should be using a subclass of DynamicConfigSource.")
def get_all(self):
raise Exception("You should be using a subclass of DynamicConfigSource")
class RedisConfigSource(DynamicConfigSource):
def __init__(self, redis, prefix=None):
super(RedisConfigSource, self).__init__()
self.redis = redis
self.prefix = prefix or "CONFIGI:"
def _redis_key(self, key):
return self.prefix + key
def get(self, key):
value = self.redis.get(self._redis_key(key))
if value:
return json.loads(value)
return value
def set(self, key, value):
self.redis.set(self._redis_key(key), json.dumps(value))
def delete(self, key):
self.redis.delete(self._redis_key(key))
def get_all(self):
settings = {}
for key in self.redis.scan_iter(self.prefix + "*"):
settings[key.replace(self.prefix, "")] = json.loads(self.redis.get(key))
return settings
class JSONConfigSource(DynamicConfigSource):
full_fetch_required = True
def __init__(self, url):
super(JSONConfigSource, self).__init__()
self.url = url
def get(self, key):
json_data = json.loads(requests.get(self.url))
return json_data.get(key)
def set(self, key, value):
raise Exception("This source is read-only.")
def delete(self, key):
raise Exception("This source is read-only.")
def get_all(self):
return json.loads(requests.get(self.url))
class S3ConfigSource(DynamicConfigSource):
full_fetch_required = True
def __init__(self, s3_key_instance):
super(S3ConfigSource, self).__init__()
self.k = s3_key_instance
def get(self, key):
return self.get_all().get(key)
def set(self, key, value):
contents = self.get_all()
contents[key] = value
if 's3.Object' in self.k.__class__.__name__:
output = StringIO()
json.dump(contents, output)
output.seek(0)
self.k.put(Body=output)
else:
self.k.set_contents_from_string(json.dumps(contents))
def delete(self, key):
contents = self.get_all()
del contents[key]
if 's3.Object' in self.k.__class__.__name__:
output = StringIO()
json.dump(contents, output)
output.seek(0)
self.k.put(Body=output)
else:
self.k.set_contents_from_string(json.dumps(contents))
def get_all(self):
if 's3.Object' in self.k.__class__.__name__:
return json.loads(self.k.get()['Body'].read().decode('utf-8'))
return json.loads(self.k.get_contents_as_string())
class DynamicConfig(object):
intrinsic_keys = ['source', 'expiry', 'cache', 'quiet_mode', 'namespace_dicts', 'defaults']
def __init__(self, source, expiry=300, quiet_mode=True, namespace_dicts=True, defaults=None):
self.source = source
self.expiry = expiry
self.cache = {}
self.quiet_mode = quiet_mode
self.namespace_dicts = namespace_dicts
self.defaults = defaults or {}
def _is_expired(self, last_update):
return (last_update + timedelta(seconds=self.expiry)) < datetime.utcnow()
def _refresh(self, key):
if self.source.full_fetch_required:
self.cache = {k: (v, datetime.utcnow()) for k, v in self.source.get_all().items()}
else:
value = self.source.get(key)
if value is not None:
self.cache[key] = (value, datetime.utcnow())
value = self.cache.get(key)
if value is not None:
return value[0]
return value
def all_values(self):
return self.source.get_all()
def delete(self, key):
self.source.delete(key)
def __getattr__(self, key):
if key in DynamicConfig.intrinsic_keys:
print ""
return super(DynamicConfig, self).__getattribute__(key)
value = None
try:
data = self.cache.get(key)
if data and not self._is_expired(data[1]):
value = data[0]
else:
value = self._refresh(key)
except Exception as e:
import traceback
traceback.print_exc()
if self.quiet_mode:
print "DynamicConfigError: Could not get key {}".format(key)
else:
raise DynamicConfigError("Could not get key {}".format(key))
if value is None:
if isinstance(self.defaults, DynamicConfig):
value = getattr(self.defaults, key)
else:
value = self.defaults.get(key)
if self.namespace_dicts and isinstance(value, dict):
value = DictObject(value, nest=True)
return value
def __setattr__(self, key, value):
if key in DynamicConfig.intrinsic_keys:
super(DynamicConfig, self).__setattr__(key, value)
return
try:
self.cache[key] = (value, datetime.utcnow())
self.source.set(key, value)
except Exception as e:
import traceback
traceback.print_exc()
if self.quiet_mode:
print "DynamicConfigError: Could not set key {}".format(key)
return
else:
raise DynamicConfigError("Could not set key {}".format(key))
class DynamicConfigError(Exception):
pass
# End
| |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import re
import sys
import tempfile
import time
import traceback
import zipfile
from django.conf import settings
from django.shortcuts import render_to_response
from django.http import HttpResponse
from django.core.urlresolvers import reverse
from django.core.servers.basehttp import FileWrapper
from django.shortcuts import redirect
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_http_methods
import django.views.debug
import desktop.conf
import desktop.log.log_buffer
from desktop.api import massaged_tags_for_json, massaged_documents_for_json,\
_get_docs
from desktop.lib import django_mako
from desktop.lib.conf import GLOBAL_CONFIG
from desktop.lib.django_util import login_notrequired, render_json, render
from desktop.lib.i18n import smart_str
from desktop.lib.paths import get_desktop_root
from desktop.log.access import access_log_level, access_warn
from desktop.models import UserPreferences, Settings
from desktop import appmanager
LOG = logging.getLogger(__name__)
@require_http_methods(['HEAD'])
def is_alive(request):
return HttpResponse('')
def home(request):
docs = _get_docs(request.user)
apps = appmanager.get_apps_dict(request.user)
return render('home.mako', request, {
'apps': apps,
'json_documents': json.dumps(massaged_documents_for_json(docs, request.user)),
'json_tags': json.dumps(massaged_tags_for_json(docs, request.user)),
'tours_and_tutorials': Settings.get_settings().tours_and_tutorials
})
@access_log_level(logging.WARN)
def log_view(request):
"""
We have a log handler that retains the last X characters of log messages.
If it is attached to the root logger, this view will display that history,
otherwise it will report that it can't be found.
"""
if not request.user.is_superuser:
return HttpResponse(_("You must be a superuser."))
l = logging.getLogger()
for h in l.handlers:
if isinstance(h, desktop.log.log_buffer.FixedBufferHandler):
return render('logs.mako', request, dict(log=[l for l in h.buf], query=request.GET.get("q", "")))
return render('logs.mako', request, dict(log=[_("No logs found!")]))
@access_log_level(logging.WARN)
def download_log_view(request):
"""
Zip up the log buffer and then return as a file attachment.
"""
if not request.user.is_superuser:
return HttpResponse(_("You must be a superuser."))
l = logging.getLogger()
for h in l.handlers:
if isinstance(h, desktop.log.log_buffer.FixedBufferHandler):
try:
# We want to avoid doing a '\n'.join of the entire log in memory
# in case it is rather big. So we write it to a file line by line
# and pass that file to zipfile, which might follow a more efficient path.
tmp = tempfile.NamedTemporaryFile()
log_tmp = tempfile.NamedTemporaryFile("w+t")
for l in h.buf:
log_tmp.write(smart_str(l) + '\n')
# This is not just for show - w/out flush, we often get truncated logs
log_tmp.flush()
t = time.time()
zip = zipfile.ZipFile(tmp, "w", zipfile.ZIP_DEFLATED)
zip.write(log_tmp.name, "hue-logs/hue-%s.log" % t)
zip.close()
length = tmp.tell()
# if we don't seek to start of file, no bytes will be written
tmp.seek(0)
wrapper = FileWrapper(tmp)
response = HttpResponse(wrapper, content_type="application/zip")
response['Content-Disposition'] = 'attachment; filename=hue-logs-%s.zip' % t
response['Content-Length'] = length
return response
except Exception, e:
logging.exception("Couldn't construct zip file to write logs to: %s") % e
return log_view(request)
return render_to_response("logs.mako", dict(log=[_("No logs found.")]))
@access_log_level(logging.DEBUG)
def prefs(request, key=None):
"""Get or set preferences."""
if key is None:
d = dict( (x.key, x.value) for x in UserPreferences.objects.filter(user=request.user))
return render_json(d)
else:
if "set" in request.REQUEST:
try:
x = UserPreferences.objects.get(user=request.user, key=key)
except UserPreferences.DoesNotExist:
x = UserPreferences(user=request.user, key=key)
x.value = request.REQUEST["set"]
x.save()
return render_json(True)
if "delete" in request.REQUEST:
try:
x = UserPreferences.objects.get(user=request.user, key=key)
x.delete()
return render_json(True)
except UserPreferences.DoesNotExist:
return render_json(False)
else:
try:
x = UserPreferences.objects.get(user=request.user, key=key)
return render_json(x.value)
except UserPreferences.DoesNotExist:
return render_json(None)
def bootstrap(request):
"""Concatenates bootstrap.js files from all installed Hue apps."""
# Has some None's for apps that don't have bootsraps.
all_bootstraps = [ (app, app.get_bootstrap_file()) for app in appmanager.DESKTOP_APPS if request.user.has_hue_permission(action="access", app=app.name) ]
# Iterator over the streams.
concatenated = [ "\n/* %s */\n%s" % (app.name, b.read()) for app, b in all_bootstraps if b is not None ]
# HttpResponse can take an iteratable as the first argument, which
# is what happens here.
return HttpResponse(concatenated, content_type='text/javascript')
_status_bar_views = []
def register_status_bar_view(view):
global _status_bar_views
_status_bar_views.append(view)
@access_log_level(logging.DEBUG)
def status_bar(request):
"""
Concatenates multiple views together to build up a "status bar"/"status_bar".
These views are registered using register_status_bar_view above.
"""
resp = ""
for view in _status_bar_views:
try:
r = view(request)
if r.status_code == 200:
resp += r.content
else:
LOG.warning("Failed to execute status_bar view %s" % (view,))
except:
LOG.exception("Failed to execute status_bar view %s" % (view,))
return HttpResponse(resp)
def dump_config(request):
# Note that this requires login (as do most apps).
show_private = False
conf_dir = os.path.realpath(os.getenv("HUE_CONF_DIR", get_desktop_root("conf")))
if not request.user.is_superuser:
return HttpResponse(_("You must be a superuser."))
if request.GET.get("private"):
show_private = True
apps = sorted(appmanager.DESKTOP_MODULES, key=lambda app: app.name)
apps_names = [app.name for app in apps]
top_level = sorted(GLOBAL_CONFIG.get().values(), key=lambda obj: apps_names.index(obj.config.key))
return render("dump_config.mako", request, dict(
show_private=show_private,
top_level=top_level,
conf_dir=conf_dir,
apps=apps))
@access_log_level(logging.WARN)
def threads(request):
"""Dumps out server threads. Useful for debugging."""
if not request.user.is_superuser:
return HttpResponse(_("You must be a superuser."))
out = []
for thread_id, stack in sys._current_frames().iteritems():
out.append("Thread id: %s" % thread_id)
for filename, lineno, name, line in traceback.extract_stack(stack):
out.append(" %-20s %s(%d)" % (name, filename, lineno))
out.append(" %-80s" % (line))
out.append("")
return HttpResponse("\n".join(out), content_type="text/plain")
@access_log_level(logging.WARN)
def memory(request):
"""Dumps out server threads. Useful for debugging."""
if not request.user.is_superuser:
return HttpResponse(_("You must be a superuser."))
if not hasattr(settings, 'MEMORY_PROFILER'):
return HttpResponse(_("You must enable the memory profiler via the memory_profiler config in the hue.ini."))
# type, from, to, index
command_order = {
'type': 0,
'from': 1,
'to': 2,
'index': 3
}
default_command = [None, None, None, None]
commands = []
for item in request.GET:
res = re.match(r'(?P<command>\w+)\.(?P<count>\d+)', item)
if res:
d = res.groupdict()
count = int(d['count'])
command = str(d['command'])
while len(commands) <= count:
commands.append(default_command[:])
commands[count][command_order.get(command)] = request.GET.get(item)
heap = settings.MEMORY_PROFILER.heap()
for command in commands:
if command[0] is not None:
heap = getattr(heap, command[0])
if command[1] is not None and command[2] is not None:
heap = heap[int(command[1]):int(command[2])]
if command[3] is not None:
heap = heap[int(command[3])]
return HttpResponse(str(heap), content_type="text/plain")
def jasmine(request):
return render('jasmine.mako', request, None)
@login_notrequired
def unsupported(request):
return render('unsupported.mako', request, None)
def index(request):
return home(request)
def serve_404_error(request, *args, **kwargs):
"""Registered handler for 404. We just return a simple error"""
access_warn(request, "404 not found")
return render("404.mako", request, dict(uri=request.build_absolute_uri()), status=404)
def serve_500_error(request, *args, **kwargs):
"""Registered handler for 500. We use the debug view to make debugging easier."""
try:
exc_info = sys.exc_info()
if exc_info:
if desktop.conf.HTTP_500_DEBUG_MODE.get() and exc_info[0] and exc_info[1]:
# If (None, None, None), default server error describing why this failed.
return django.views.debug.technical_500_response(request, *exc_info)
else:
# Could have an empty traceback
return render("500.mako", request, {'traceback': traceback.extract_tb(exc_info[2])})
else:
# exc_info could be empty
return render("500.mako", request, {})
finally:
# Fallback to default 500 response if ours fails
# Will end up here:
# - Middleware or authentication backends problems
# - Certain missing imports
# - Packaging and install issues
pass
_LOG_LEVELS = {
"critical": logging.CRITICAL,
"error": logging.ERROR,
"warning": logging.WARNING,
"info": logging.INFO,
"debug": logging.DEBUG
}
_MAX_LOG_FRONTEND_EVENT_LENGTH = 1024
_LOG_FRONTEND_LOGGER = logging.getLogger("desktop.views.log_frontend_event")
@login_notrequired
def log_frontend_event(request):
"""
Logs arguments to server's log. Returns an
empty string.
Parameters (specified via either GET or POST) are
"logname", "level" (one of "debug", "info", "warning",
"error", or "critical"), and "message".
"""
def get(param, default=None):
return request.REQUEST.get(param, default)
level = _LOG_LEVELS.get(get("level"), logging.INFO)
msg = "Untrusted log event from user %s: %s" % (
request.user,
get("message", "")[:_MAX_LOG_FRONTEND_EVENT_LENGTH])
_LOG_FRONTEND_LOGGER.log(level, msg)
return HttpResponse("")
def commonheader(title, section, user, padding="90px"):
"""
Returns the rendered common header
"""
current_app = None
other_apps = []
if user.is_authenticated():
apps = appmanager.get_apps(user)
apps_list = appmanager.get_apps_dict(user)
for app in apps:
if app.display_name not in [
'beeswax', 'impala', 'pig', 'jobsub', 'jobbrowser', 'metastore', 'hbase', 'sqoop', 'oozie', 'filebrowser',
'useradmin', 'search', 'help', 'about', 'zookeeper', 'proxy', 'rdbms', 'spark', 'indexer', 'security']:
other_apps.append(app)
if section == app.display_name:
current_app = app
else:
apps_list = []
return django_mako.render_to_string("common_header.mako", {
'current_app': current_app,
'apps': apps_list,
'other_apps': other_apps,
'title': title,
'section': section,
'padding': padding,
'user': user,
'is_demo': desktop.conf.DEMO_ENABLED.get()
})
def commonshare():
return django_mako.render_to_string("common_share.mako", {})
def commonfooter(messages=None):
"""
Returns the rendered common footer
"""
if messages is None:
messages = {}
hue_settings = Settings.get_settings()
return django_mako.render_to_string("common_footer.mako", {
'messages': messages,
'version': settings.HUE_DESKTOP_VERSION,
'collect_usage': collect_usage(),
'tours_and_tutorials': hue_settings.tours_and_tutorials
})
def collect_usage():
return desktop.conf.COLLECT_USAGE.get() and Settings.get_settings().collect_usage
# If the app's conf.py has a config_validator() method, call it.
CONFIG_VALIDATOR = 'config_validator'
#
# Cache config errors because (1) they mostly don't go away until restart,
# and (2) they can be costly to compute. So don't stress the system just because
# the dock bar wants to refresh every n seconds.
#
# The actual viewing of all errors may choose to disregard the cache.
#
_CONFIG_ERROR_LIST = None
def _get_config_errors(request, cache=True):
"""Returns a list of (confvar, err_msg) tuples."""
global _CONFIG_ERROR_LIST
if not cache or _CONFIG_ERROR_LIST is None:
error_list = [ ]
for module in appmanager.DESKTOP_MODULES:
# Get the config_validator() function
try:
validator = getattr(module.conf, CONFIG_VALIDATOR)
except AttributeError:
continue
if not callable(validator):
LOG.warn("Auto config validation: %s.%s is not a function" %
(module.conf.__name__, CONFIG_VALIDATOR))
continue
try:
error_list.extend(validator(request.user))
except Exception, ex:
LOG.exception("Error in config validation by %s: %s" % (module.nice_name, ex))
_CONFIG_ERROR_LIST = error_list
return _CONFIG_ERROR_LIST
def check_config(request):
"""Check config and view for the list of errors"""
if not request.user.is_superuser:
return HttpResponse(_("You must be a superuser."))
conf_dir = os.path.realpath(os.getenv("HUE_CONF_DIR", get_desktop_root("conf")))
return render('check_config.mako', request, {
'error_list': _get_config_errors(request, cache=False),
'conf_dir': conf_dir
},
force_template=True)
def check_config_ajax(request):
"""Alert administrators about configuration problems."""
if not request.user.is_superuser:
return HttpResponse('')
error_list = _get_config_errors(request)
if not error_list:
# Return an empty response, rather than using the mako template, for performance.
return HttpResponse('')
return render('config_alert_dock.mako',
request,
dict(error_list=error_list),
force_template=True)
| |
"""Media accessors
All media accessor __init__s take a `root` argument, which should be a path
to the root of the media directory.
Alternatively, `root` can be a custom MediaFile subclass.
Most __init__s take an ORM object as a second argument.
Their various methods take a number of arguments specifying exactly which
file you want (such as the female sprite, backsprite, etc.).
ValueError is raised when the specified file cannot be found.
The accessors use fallbacks: for example Bulbasaur's males and females look the
same, so if you request Bulbasaur's female sprite, it will give you the common
image. Or for a Pokemon without individual form sprites, you will get the
common base sprite. Or for versions witout shiny Pokemon, you will always
get the non-shiny version (that's how shiny Pokemon looked there!).
However arguments such as `animated` don't use fallbacks.
You can set `strict` to True to disable these fallbacks and cause ValueError
to be raised when the exact specific file you asked for is not found. This is
useful for listing non-duplicate sprites, for example.
Use keyword arguments when calling the media-getting methods, unless noted
otherwise.
The returned "file" objects have useful attributes like relative_path,
path, and open().
All images are in the PNG format, except animations (GIF). All sounds are OGGs.
"""
import os
from functools import partial
import six
class MediaFile(object):
"""Represents a file: picture, sound, etc.
Attributes:
path_elements: List of directory/file names that make up relative_path
relative_path: Filesystem path relative to the root
path: Absolute path to the file
exists: True if the file exists
media_available: false if no media is available at the given root.
open(): Open the file
"""
def __init__(self, root, *path_elements):
self.path_elements = path_elements
self.root = root
@property
def relative_path(self):
return os.path.join(*self.path_elements)
@property
def path(self):
return os.path.join(self.root, *self.path_elements)
def open(self):
"""Open this file for reading, in the appropriate mode (i.e. binary)
"""
return open(self.path, 'rb')
@property
def exists(self):
return os.path.exists(self.path)
@property
def media_available(self):
return os.path.isdir(self.root)
def __eq__(self, other):
return self.path == other.path
def __ne__(self, other):
return self.path != other.path
def __str__(self):
return '<Pokedex file %s>' % self.relative_path
class BaseMedia(object):
def __init__(self, root):
if isinstance(root, six.string_types):
self.file_class = partial(MediaFile, root)
else:
self.file_class = root
@property
def available(self):
return self.file_class().media_available
def from_path_elements(self, path_elements, basename, extension,
surely_exists=False):
filename = basename + extension
path_elements = [self.toplevel_dir] + path_elements + [filename]
mfile = self.file_class(*path_elements)
if surely_exists or mfile.exists:
return mfile
else:
raise ValueError('File %s not found' % mfile.path)
class _BasePokemonMedia(BaseMedia):
toplevel_dir = 'pokemon'
has_gender_differences = False
is_species = False
is_proper = False
introduced_in = 0
# Info about of what's inside the pokemon main sprite directories, so we
# don't have to check directory existence all the time.
_pokemon_sprite_info = {
'red-blue': (1, set('back gray'.split())),
'red-green': (1, set('back gray'.split())),
'yellow': (1, set('back gray gbc'.split())),
'gold': (2, set('back shiny'.split())),
'silver': (2, set('back shiny'.split())),
'crystal': (2, set('animated back shiny'.split())),
'ruby-sapphire': (3, set('back shiny'.split())),
'emerald': (3, set('animated back shiny frame2'.split())),
'firered-leafgreen': (3, set('back shiny'.split())),
'diamond-pearl': (4, set('back shiny female frame2'.split())),
'platinum': (4, set('back shiny female frame2'.split())),
'heartgold-soulsilver': (4, set('back shiny female frame2'.split())),
'black-white': (5, set('back shiny female'.split())),
}
def __init__(self, root, species_id, form_postfix=None):
BaseMedia.__init__(self, root)
self.species_id = str(species_id)
self.form_postfix = form_postfix
def _get_file(self, path_elements, extension, strict, surely_exists=False):
basename = str(self.species_id)
if self.form_postfix:
fullname = basename + self.form_postfix
try:
return self.from_path_elements(
path_elements, fullname, extension,
surely_exists=surely_exists)
except ValueError:
if strict:
raise
return self.from_path_elements(path_elements, basename, extension,
surely_exists=surely_exists)
def sprite(self,
version='black-white',
# The media directories are in this order:
animated=False,
back=False,
color=None,
shiny=False,
female=False,
frame=None,
strict=False,
):
"""Get a main sprite sprite for a pokemon.
Everything except version should be given as a keyword argument.
Either specify version as an ORM object, or give the version path as
a string (which is the only way to get 'red-green'). Leave the default
for the latest version.
animated: get a GIF animation (currently Crystal & Emerald only)
back: get a backsprite instead of a front one
color: can be 'color' (RGBY only) or 'gbc' (Yellow only)
shiny: get a shiny sprite. In old versions, gives a normal sprite unless
`strict` is set
female: get a female sprite instead of male. For pokemon with no sexual
dimorphism, gets the common sprite unless `strict` is set.
frame: set to 2 to get the second frame of the animation
(Emerald, DPP, and HG/SS only)
If the sprite is not found, raise a ValueError.
"""
if isinstance(version, six.string_types):
version_dir = version
try:
generation, info = self._pokemon_sprite_info[version_dir]
except KeyError:
raise ValueError('Version directory %s not found', version_dir)
else:
version_dir = version.identifier
try:
generation, info = self._pokemon_sprite_info[version_dir]
except KeyError:
version_group = version.version_group
version_dir = '-'.join(
v.identifier for v in version_group.versions)
try:
generation, info = self._pokemon_sprite_info[version_dir]
except KeyError:
raise ValueError('Version directory %s not found', version_dir)
if generation < self.introduced_in:
raise ValueError("Pokemon %s didn't exist in %s" % (
self.species_id, version_dir))
path_elements = ['main-sprites', version_dir]
if animated:
if 'animated' not in info:
raise ValueError("No animated sprites for %s" % version_dir)
path_elements.append('animated')
extension = '.gif'
else:
extension = '.png'
if back:
if version_dir == 'emerald':
# Emerald backsprites are the same as ruby/sapphire
if strict:
raise ValueError("Emerald uses R/S backsprites")
if animated:
raise ValueError("No animated backsprites for Emerald")
path_elements[1] = version_dir = 'ruby-sapphire'
if version_dir == 'crystal' and animated:
raise ValueError("No animated backsprites for Crystal")
path_elements.append('back')
if color == 'gray':
if 'gray' not in info:
raise ValueError("No grayscale sprites for %s" % version_dir)
path_elements.append('gray')
elif color == 'gbc':
if 'gbc' not in info:
raise ValueError("No GBC sprites for %s" % version_dir)
path_elements.append('gbc')
elif color:
raise ValueError("Unknown color scheme: %s" % color)
if shiny:
if 'shiny' in info:
path_elements.append('shiny')
elif strict:
raise ValueError("No shiny sprites for %s" % version_dir)
if female:
female_sprite = self.has_gender_differences
# Chimecho's female back frame 2 sprite has one hand in
# a slightly different pose, in Platinum and HGSS
# (we have duplicate sprites frame 1, for convenience)
if self.species_id == '358' and back and version_dir in (
'platinum', 'heartgold-soulsilver'):
female_sprite = True
female_sprite = female_sprite and 'female' in info
if female_sprite:
path_elements.append('female')
elif strict:
raise ValueError(
'Pokemon %s has no gender differences' % self.species_id)
if not frame or frame == 1:
pass
elif frame == 2:
if 'frame2' in info:
path_elements.append('frame%s' % frame)
else:
raise ValueError("No frame 2 for %s" % version_dir)
else:
raise ValueError("Bad frame %s" % frame)
return self._get_file(path_elements, extension, strict=strict,
# Avoid a stat in the common case
surely_exists=(self.is_species and version_dir == 'black-white'
and not back and not female))
def _maybe_female(self, path_elements, female, strict):
if female:
if self.has_gender_differences:
elements = path_elements + ['female']
try:
return self._get_file(elements, '.png', strict=strict)
except ValueError:
if strict:
raise
elif strict:
raise ValueError(
'Pokemon %s has no gender differences' % self.species_id)
return self._get_file(path_elements, '.png', strict=strict)
def icon(self, female=False, strict=False):
"""Get the Pokemon's menu icon"""
return self._maybe_female(['icons'], female, strict)
def sugimori(self, female=False, strict=False):
"""Get the Pokemon's official art, drawn by Ken Sugimori"""
return self._maybe_female(['sugimori'], female, strict)
def overworld(self,
direction='down',
shiny=False,
female=False,
frame=1,
strict=False,
):
"""Get an overworld sprite
direction: 'up', 'down', 'left', or 'right'
shiny: true for a shiny sprite
female: true for female sprite (or the common one for both M & F)
frame: 2 for the second animation frame
strict: disable fallback for `female`
"""
path_elements = ['overworld']
if shiny:
path_elements.append('shiny')
if female:
if self.has_gender_differences:
path_elements.append('female')
elif strict:
raise ValueError('No female overworld sprite')
else:
female = False
path_elements.append(direction)
if frame and frame > 1:
path_elements.append('frame%s' % frame)
try:
return self._get_file(path_elements, '.png', strict=strict)
except ValueError:
if female and not strict:
path_elements.remove('female')
return self._get_file(path_elements, '.png', strict=strict)
else:
raise
def footprint(self, strict=False):
"""Get the Pokemon's footprint"""
return self._get_file(['footprints'], '.png', strict=strict)
def trozei(self, strict=False):
"""Get the Pokemon's animated Trozei sprite"""
return self._get_file(['trozei'], '.gif', strict=strict)
def cry(self, strict=False):
"""Get the Pokemon's cry"""
return self._get_file(['cries'], '.ogg', strict=strict)
def cropped_sprite(self, strict=False):
"""Get the Pokemon's cropped sprite"""
return self._get_file(['cropped'], '.png', strict=strict)
class PokemonFormMedia(_BasePokemonMedia):
"""Media related to a PokemonForm
"""
is_proper = True
def __init__(self, root, pokemon_form):
species_id = pokemon_form.species.id
if pokemon_form.form_identifier:
form_postfix = '-' + pokemon_form.form_identifier
else:
form_postfix = None
_BasePokemonMedia.__init__(self, root, species_id, form_postfix)
self.form = pokemon_form
species = pokemon_form.species
self.has_gender_differences = species.has_gender_differences
self.introduced_in = pokemon_form.version_group.generation_id
class PokemonSpeciesMedia(_BasePokemonMedia):
"""Media related to a PokemonSpecies
"""
is_species = True
is_proper = True
def __init__(self, root, species):
_BasePokemonMedia.__init__(self, root, species.id)
self.has_gender_differences = species.has_gender_differences
self.introduced_in = species.generation_id
class UnknownPokemonMedia(_BasePokemonMedia):
"""Media related to the unknown Pokemon ("?")
Note that not a lot of files are available for it.
"""
def __init__(self, root):
_BasePokemonMedia.__init__(self, root, '0')
class EggMedia(_BasePokemonMedia):
"""Media related to a pokemon egg
Note that not a lot of files are available for these.
Give a Manaphy as `species` to get the Manaphy egg.
"""
def __init__(self, root, species=None):
if species and species.identifier == 'manaphy':
postfix = '-manaphy'
else:
postfix = None
_BasePokemonMedia.__init__(self, root, 'egg', postfix)
class SubstituteMedia(_BasePokemonMedia):
"""Media related to the Substitute sprite
Note that not a lot of files are available for Substitute.
"""
def __init__(self, root):
_BasePokemonMedia.__init__(self, root, 'substitute')
class _BaseItemMedia(BaseMedia):
toplevel_dir = 'items'
def underground(self, rotation=0):
"""Get the item's sprite as it appears in the Sinnoh underground
Rotation can be 0, 90, 180, or 270.
"""
if rotation:
basename = self.identifier + '-%s' % rotation
else:
basename = self.identifier
return self.from_path_elements(['underground'], basename, '.png')
class ItemMedia(_BaseItemMedia):
"""Media related to an item
"""
def __init__(self, root, item):
_BaseItemMedia.__init__(self, root)
self.item = item
self.identifier = item.identifier
def sprite(self, version=None):
"""Get the item's sprite
If version is not given, use the latest version.
"""
identifier = self.identifier
# Handle machines
# We check the identifier, so that we don't query the machine
# information for any item.
if identifier.startswith(('tm', 'hm')):
try:
int(identifier[2:])
except ValueError:
# Not really a TM/HM
pass
else:
machines = self.item.machines
if version:
try:
machine = [
m for m in machines
if m.version_group == version.version_group
][0]
except IndexError:
raise ValueError("%s doesn't exist in %s" % (
identifier, version.identifier))
else:
# They're ordered, so get the last one
machine = machines[-1]
type_identifier = machine.move.type.identifier
identifier = identifier[:2] + '-' + type_identifier
elif identifier.startswith('data-card-'):
try:
int(identifier[10:])
except ValueError:
# Not a real data card???
pass
else:
identifier = 'data-card'
if version is not None:
generation_id = version.generation.id
if generation_id <= 3 and identifier == 'dowsing-mchn':
identifier = 'itemfinder'
try:
gen = 'gen%s' % generation_id
return self.from_path_elements([gen], identifier, '.png')
except ValueError:
pass
return self.from_path_elements([], identifier, '.png',
surely_exists=True)
def underground(self, rotation=0):
"""Get the item's sprite as it appears in the Sinnoh underground
Rotation can be 0, 90, 180, or 270.
"""
if not self.item.appears_underground:
raise ValueError("%s doesn't appear underground" % self.identifier)
return super(ItemMedia, self).underground(rotation=rotation)
def berry_image(self):
"""Get a berry's big sprite
"""
if not self.item.berry:
raise ValueError("%s is not a berry" % self.identifier)
return self.from_path_elements(['berries'], self.identifier, '.png')
class UndergroundRockMedia(_BaseItemMedia):
"""Media related to a rock in the Sinnoh underground
rock_type can be one of: i, ii, o, o-big, s, t, z
"""
def __init__(self, root, rock_type):
_BaseItemMedia.__init__(self, root)
self.identifier = 'rock-%s' % rock_type
class UndergroundSphereMedia(_BaseItemMedia):
"""Media related to a sphere in the Sinnoh underground
color can be one of: red, blue, green, pale, prism
"""
def __init__(self, root, color, big=False):
_BaseItemMedia.__init__(self, root)
self.identifier = '%s-sphere' % color
if big:
self.identifier += '-big'
class _SimpleIconMedia(BaseMedia):
def __init__(self, root, thing):
BaseMedia.__init__(self, root)
self.identifier = thing.identifier
def icon(self):
return self.from_path_elements([], self.identifier, '.png')
class DamageClassMedia(_SimpleIconMedia):
toplevel_dir = 'damage-classes'
class HabitatMedia(_SimpleIconMedia):
toplevel_dir = 'habitats'
class ShapeMedia(_SimpleIconMedia):
toplevel_dir = 'shapes'
class ItemPocketMedia(_SimpleIconMedia):
toplevel_dir = 'item-pockets'
def icon(self, selected=False):
if selected:
return self.from_path_elements(
['selected'], self.identifier, '.png')
else:
return self.from_path_elements([], self.identifier, '.png')
class _LanguageIconMedia(_SimpleIconMedia):
def icon(self, lang='en'):
return self.from_path_elements([lang], self.identifier, '.png')
class ContestTypeMedia(_LanguageIconMedia):
toplevel_dir = 'contest-types'
class TypeMedia(_LanguageIconMedia):
toplevel_dir = 'types'
''' XXX: No accessors for:
chrome
fonts
ribbons
'''
| |
#
# Ceilometer documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 27 11:38:59 2011.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT = os.path.abspath(os.path.join(BASE_DIR, "..", ".."))
sys.path.insert(0, ROOT)
sys.path.insert(0, BASE_DIR)
# This is required for ReadTheDocs.org, but isn't a bad idea anyway.
os.environ['DJANGO_SETTINGS_MODULE'] = 'openstack_dashboard.settings'
def write_autodoc_index():
def find_autodoc_modules(module_name, sourcedir):
"""Return a list of modules in the SOURCE directory."""
modlist = []
os.chdir(os.path.join(sourcedir, module_name))
print("SEARCHING %s" % sourcedir)
for root, dirs, files in os.walk("."):
for filename in files:
if filename.endswith(".py"):
# remove the pieces of the root
elements = root.split(os.path.sep)
# replace the leading "." with the module name
elements[0] = module_name
# and get the base module name
base, extension = os.path.splitext(filename)
if not (base == "__init__"):
elements.append(base)
result = ".".join(elements)
#print result
modlist.append(result)
return modlist
RSTDIR = os.path.abspath(os.path.join(BASE_DIR, "sourcecode"))
SRCS = {'ceilometer': ROOT}
EXCLUDED_MODULES = ('ceilometer.tests',)
CURRENT_SOURCES = {}
if not(os.path.exists(RSTDIR)):
os.mkdir(RSTDIR)
CURRENT_SOURCES[RSTDIR] = ['autoindex.rst']
INDEXOUT = open(os.path.join(RSTDIR, "autoindex.rst"), "w")
INDEXOUT.write("=================\n")
INDEXOUT.write("Source Code Index\n")
INDEXOUT.write("=================\n")
for modulename, path in SRCS.items():
sys.stdout.write("Generating source documentation for %s\n" %
modulename)
INDEXOUT.write("\n%s\n" % modulename.capitalize())
INDEXOUT.write("%s\n" % ("=" * len(modulename),))
INDEXOUT.write(".. toctree::\n")
INDEXOUT.write(" :maxdepth: 1\n")
INDEXOUT.write("\n")
MOD_DIR = os.path.join(RSTDIR, modulename)
CURRENT_SOURCES[MOD_DIR] = []
if not(os.path.exists(MOD_DIR)):
os.mkdir(MOD_DIR)
for module in find_autodoc_modules(modulename, path):
if any([module.startswith(exclude)
for exclude
in EXCLUDED_MODULES]):
print("Excluded module %s." % module)
continue
mod_path = os.path.join(path, *module.split("."))
generated_file = os.path.join(MOD_DIR, "%s.rst" % module)
INDEXOUT.write(" %s/%s\n" % (modulename, module))
# Find the __init__.py module if this is a directory
if os.path.isdir(mod_path):
source_file = ".".join((os.path.join(mod_path, "__init__"),
"py",))
else:
source_file = ".".join((os.path.join(mod_path), "py"))
CURRENT_SOURCES[MOD_DIR].append("%s.rst" % module)
# Only generate a new file if the source has changed or we don't
# have a doc file to begin with.
if not os.access(generated_file, os.F_OK) or \
os.stat(generated_file).st_mtime < \
os.stat(source_file).st_mtime:
print("Module %s updated, generating new documentation." \
% module)
FILEOUT = open(generated_file, "w")
header = "The :mod:`%s` Module" % module
FILEOUT.write("%s\n" % ("=" * len(header),))
FILEOUT.write("%s\n" % header)
FILEOUT.write("%s\n" % ("=" * len(header),))
FILEOUT.write(".. automodule:: %s\n" % module)
FILEOUT.write(" :members:\n")
FILEOUT.write(" :undoc-members:\n")
FILEOUT.write(" :show-inheritance:\n")
FILEOUT.write(" :noindex:\n")
FILEOUT.close()
INDEXOUT.close()
# Delete auto-generated .rst files for sources which no longer exist
for directory, subdirs, files in list(os.walk(RSTDIR)):
for old_file in files:
if old_file not in CURRENT_SOURCES.get(directory, []):
print("Removing outdated file for %s" % old_file)
os.remove(os.path.join(directory, old_file))
write_autodoc_index()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings.
# They can be extensions coming with Sphinx (named 'sphinx.ext.*')
# or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinxcontrib.autohttp.flask',
'wsmeext.sphinxext',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.viewcode',
'sphinxcontrib.pecanwsme.rest',
'oslosphinx',
'sphinxcontrib.docbookrestapi.setup'
]
wsme_protocols = ['restjson', 'restxml']
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
if os.getenv('HUDSON_PUBLISH_DOCS'):
templates_path = ['_ga', '_templates']
else:
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Ceilometer'
copyright = u'2012-2015, OpenStack Foundation'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['**/#*', '**~', '**/#*#']
# The reST default role (used for this markup: `text`)
# to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
primary_domain = 'py'
nitpicky = False
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme_path = ['.']
# html_theme = '_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"nosidebar": "false"
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
git_cmd = "git log --pretty=format:'%ad, commit %h' --date=local -n1"
html_last_updated_fmt = os.popen(git_cmd).read()
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Ceilometerdoc'
# -- Options for LaTeX output -------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'Ceilometer.tex', u'Ceilometer Documentation',
u'OpenStack Foundation', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ceilometer', u'Ceilometer Documentation',
[u'OpenStack'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Ceilometer', u'Ceilometer Documentation', u'OpenStack',
'Ceilometer', 'One line description of project.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output --------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'Ceilometer'
epub_author = u'OpenStack'
epub_publisher = u'OpenStack'
epub_copyright = u'2012-2015, OpenStack'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be an ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
| |
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Policy Engine For Ironic."""
import sys
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log
from oslo_policy import policy
from iotronic.common import exception
from iotronic.common.i18n import _LW
_ENFORCER = None
CONF = cfg.CONF
LOG = log.getLogger(__name__)
default_policies = [
# Legacy setting, don't remove. Likely to be overridden by operators who
# forget to update their policy.json configuration file.
# This gets rolled into the new "is_admin" rule below.
policy.RuleDefault('admin_api',
'role:admin or role:administrator',
description='Legacy rule for cloud admin access'),
# is_public_api is set in the environment from AuthTokenMiddleware
policy.RuleDefault('public_api',
'is_public_api:True',
description='Internal flag for public API routes'),
policy.RuleDefault('is_admin',
'rule:admin_api',
description='Full read/write API access'),
policy.RuleDefault('is_admin_iot_project',
'role:admin_iot_project',
description='Full read/write API access'),
policy.RuleDefault('is_manager_iot_project',
'role:manager_iot_project',
description='Full read/write API access'),
policy.RuleDefault('is_user_iot',
'role:user_iot',
description='Full read/write API access'),
policy.RuleDefault('is_owner',
'user:%(owner)s',
description='full access to the owner'),
policy.RuleDefault('admin_or_owner',
'rule:is_admin or rule:is_owner',
description='full access to the owner or the admin'),
policy.RuleDefault('is_iot_member',
'rule:is_admin_iot_project '
'or rule:is_manager_iot_project or rule:is_user_iot',
description='define a member on iot context'),
]
# NOTE(deva): to follow policy-in-code spec, we define defaults for
# the granular policies in code, rather than in policy.json.
# All of these may be overridden by configuration, but we can
# depend on their existence throughout the code.
board_policies = [
policy.RuleDefault('iot:board:get',
'rule:is_admin or rule:is_iot_member',
description='Retrieve Board records'),
policy.RuleDefault('iot:board:create',
'rule:is_admin_iot_project',
description='Create Board records'),
policy.RuleDefault('iot:board:delete',
'rule:is_admin or rule:is_admin_iot_project '
'or rule:is_manager_iot_project',
description='Delete Board records'),
policy.RuleDefault('iot:board:update',
'rule:is_admin or rule:is_admin_iot_project '
'or rule:is_manager_iot_project',
description='Update Board records'),
]
plugin_policies = [
policy.RuleDefault('iot:plugin:get',
'rule:is_admin or rule:is_iot_member',
description='Retrieve Plugin records'),
policy.RuleDefault('iot:plugin:create',
'rule:is_iot_member',
description='Create Plugin records'),
policy.RuleDefault('iot:plugin:get_one', 'rule:admin_or_owner',
description='Retrieve a Plugin record'),
policy.RuleDefault('iot:plugin:delete', 'rule:admin_or_owner',
description='Delete Plugin records'),
policy.RuleDefault('iot:plugin:update', 'rule:admin_or_owner',
description='Update Plugin records'),
]
injection_plugin_policies = [
policy.RuleDefault('iot:plugin_on_board:get',
'rule:admin_or_owner',
description='Retrieve Plugin records'),
policy.RuleDefault('iot:plugin_remove:delete', 'rule:admin_or_owner',
description='Delete Plugin records'),
policy.RuleDefault('iot:plugin_action:post',
'rule:admin_or_owner',
description='Create Plugin records'),
policy.RuleDefault('iot:plugin_inject:put', 'rule:admin_or_owner',
description='Retrieve a Plugin record'),
]
def list_policies():
policies = (default_policies
+ board_policies
+ plugin_policies
+ injection_plugin_policies
)
return policies
@lockutils.synchronized('policy_enforcer')
def init_enforcer(policy_file=None, rules=None,
default_rule=None, use_conf=True):
"""Synchronously initializes the policy enforcer
:param policy_file: Custom policy file to use, if none is specified,
`CONF.oslo_policy.policy_file` will be used.
:param rules: Default dictionary / Rules to use. It will be
considered just in the first instantiation.
:param default_rule: Default rule to use,
CONF.oslo_policy.policy_default_rule will
be used if none is specified.
:param use_conf: Whether to load rules from config file.
"""
global _ENFORCER
if _ENFORCER:
return
# NOTE(deva): Register defaults for policy-in-code here so that they are
# loaded exactly once - when this module-global is initialized.
# Defining these in the relevant API modules won't work
# because API classes lack singletons and don't use globals.
_ENFORCER = policy.Enforcer(CONF, policy_file=policy_file,
rules=rules,
default_rule=default_rule,
use_conf=use_conf)
_ENFORCER.register_defaults(list_policies())
def get_enforcer():
"""Provides access to the single instance of Policy enforcer."""
if not _ENFORCER:
init_enforcer()
return _ENFORCER
def get_oslo_policy_enforcer():
# This method is for use by oslopolicy CLI scripts. Those scripts need the
# 'output-file' and 'namespace' options, but having those in sys.argv means
# loading the Ironic config options will fail as those are not expected to
# be present. So we pass in an arg list with those stripped out.
conf_args = []
# Start at 1 because cfg.CONF expects the equivalent of sys.argv[1:]
i = 1
while i < len(sys.argv):
if sys.argv[i].strip('-') in ['namespace', 'output-file']:
i += 2
continue
conf_args.append(sys.argv[i])
i += 1
cfg.CONF(conf_args, project='ironic')
return get_enforcer()
# NOTE(deva): We can't call these methods from within decorators because the
# 'target' and 'creds' parameter must be fetched from the call time
# context-local pecan.request magic variable, but decorators are compiled
# at module-load time.
def authorize(rule, target, creds, *args, **kwargs):
"""A shortcut for policy.Enforcer.authorize()
Checks authorization of a rule against the target and credentials, and
raises an exception if the rule is not defined.
Always returns true if CONF.auth_strategy == noauth.
Beginning with the Newton cycle, this should be used in place of 'enforce'.
"""
if CONF.auth_strategy == 'noauth':
return True
enforcer = get_enforcer()
try:
return enforcer.authorize(rule, target, creds, do_raise=True,
*args, **kwargs)
except policy.PolicyNotAuthorized:
raise exception.HTTPForbidden(resource=rule)
def check(rule, target, creds, *args, **kwargs):
"""A shortcut for policy.Enforcer.enforce()
Checks authorization of a rule against the target and credentials
and returns True or False.
"""
enforcer = get_enforcer()
return enforcer.enforce(rule, target, creds, *args, **kwargs)
def enforce(rule, target, creds, do_raise=False, exc=None, *args, **kwargs):
"""A shortcut for policy.Enforcer.enforce()
Checks authorization of a rule against the target and credentials.
Always returns true if CONF.auth_strategy == noauth.
"""
# NOTE(deva): this method is obsoleted by authorize(), but retained for
# backwards compatibility in case it has been used downstream.
# It may be removed in the Pike cycle.
LOG.warning(_LW(
"Deprecation warning: calls to ironic.common.policy.enforce() "
"should be replaced with authorize(). This method may be removed "
"in a future release."))
if CONF.auth_strategy == 'noauth':
return True
enforcer = get_enforcer()
return enforcer.enforce(rule, target, creds, do_raise=do_raise,
exc=exc, *args, **kwargs)
| |
'''
Copyright (C) 2012-2017 Diego Torres Milano
Created on Dec 1, 2012
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author: Diego Torres Milano
'''
import threading
__version__ = '12.5.4'
import sys
import warnings
if sys.executable:
if 'monkeyrunner' in sys.executable:
warnings.warn(
'''
You should use a 'python' interpreter, not 'monkeyrunner' for this module
''', RuntimeWarning)
import string
import datetime
import struct
import cStringIO as StringIO
import socket
import time
import re
import os
import platform
from com.dtmilano.android.window import Window
from com.dtmilano.android.common import _nd, _nh, _ns, obtainPxPy, obtainVxVy, \
obtainVwVh, profileStart, profileEnd
from com.dtmilano.android.adb.androidkeymap import KEY_MAP
DEBUG = False
DEBUG_SHELL = DEBUG and False
DEBUG_TOUCH = DEBUG and False
DEBUG_LOG = DEBUG and False
DEBUG_WINDOWS = DEBUG and False
DEBUG_COORDS = DEBUG and False
DEBUG_IMAGE_ROTATION = DEBUG and False
PIL_AVAILABLE = False
PROFILE = False
try:
HOSTNAME = os.environ['ANDROID_ADB_SERVER_HOST']
except:
HOSTNAME = 'localhost'
try:
PORT = int(os.environ['ANDROID_ADB_SERVER_PORT'])
except KeyError:
PORT = 5037
OKAY = 'OKAY'
FAIL = 'FAIL'
UP = 0
DOWN = 1
DOWN_AND_UP = 2
TIMEOUT = 15
WIFI_SERVICE = 'wifi'
# some device properties
VERSION_SDK_PROPERTY = 'ro.build.version.sdk'
VERSION_RELEASE_PROPERTY = 'ro.build.version.release'
class Device:
@staticmethod
def factory(_str):
if DEBUG:
print >> sys.stderr, "Device.factory(", _str, ")"
print >> sys.stderr, " _str=", repr(_str)
print >> sys.stderr, " _str=", _str.replace(' ', '_')
values = _str.split(None, 2)
if DEBUG:
print >> sys.stderr, "values=", values
return Device(*values)
def __init__(self, serialno, status, qualifiers=None):
self.serialno = serialno
self.status = status
self.qualifiers = qualifiers
def __str__(self):
return "<<<" + self.serialno + ", " + self.status + ", %s>>>" % self.qualifiers
class WifiManager:
'''
Simulates Android WifiManager.
@see: http://developer.android.com/reference/android/net/wifi/WifiManager.html
'''
WIFI_STATE_DISABLING = 0
WIFI_STATE_DISABLED = 1
WIFI_STATE_ENABLING = 2
WIFI_STATE_ENABLED = 3
WIFI_STATE_UNKNOWN = 4
WIFI_IS_ENABLED_RE = re.compile('Wi-Fi is enabled')
WIFI_IS_DISABLED_RE = re.compile('Wi-Fi is disabled')
def __init__(self, device):
self.device = device
def getWifiState(self):
'''
Gets the Wi-Fi enabled state.
@return: One of WIFI_STATE_DISABLED, WIFI_STATE_DISABLING, WIFI_STATE_ENABLED, WIFI_STATE_ENABLING, WIFI_STATE_UNKNOWN
'''
result = self.device.shell('dumpsys wifi')
if result:
state = result.splitlines()[0]
if self.WIFI_IS_ENABLED_RE.match(state):
return self.WIFI_STATE_ENABLED
elif self.WIFI_IS_DISABLED_RE.match(state):
return self.WIFI_STATE_DISABLED
print >> sys.stderr, "UNKNOWN WIFI STATE:", state
return self.WIFI_STATE_UNKNOWN
class Timer():
def __init__(self, timeout, handler, args):
self.timer = threading.Timer(timeout, handler, args)
def start(self):
self.timer.start()
def cancel(self):
self.timer.cancel()
class TimeoutException(Exception):
pass
class AdbClient:
UP = UP
DOWN = DOWN
DOWN_AND_UP = DOWN_AND_UP
def __init__(self, serialno=None, hostname=HOSTNAME, port=PORT, settransport=True, reconnect=True,
ignoreversioncheck=False, timeout=TIMEOUT):
self.Log = AdbClient.__Log(self)
self.serialno = serialno
self.hostname = hostname
self.port = port
self.timeout = timeout
self.timerId = -1
self.timers = {}
self.reconnect = reconnect
self.socket = AdbClient.connect(self.hostname, self.port, self.timeout)
self.checkVersion(ignoreversioncheck)
self.build = {}
''' Build properties '''
self.__displayInfo = None
''' Cached display info. Reset it to C{None} to force refetching display info '''
self.display = {}
''' The map containing the device's physical display properties: width, height and density '''
self.isTransportSet = False
if settransport and serialno != None:
self.__setTransport(timeout=timeout)
self.build[VERSION_SDK_PROPERTY] = int(self.__getProp(VERSION_SDK_PROPERTY))
self.initDisplayProperties()
def timeoutHandler(self, timerId):
print >> sys.stderr, "TIMEOUT HANDLER", timerId
self.timers[timerId] = "EXPIRED"
raise Timer.TimeoutException("Timer %d has expired" % timerId)
def setTimer(self, timeout):
self.timerId += 1
timer = Timer(timeout, self.timeoutHandler, [self.timerId])
timer.start()
self.timers[self.timerId] = timer
return self.timerId
def cancelTimer(self, timerId):
if self.timers[timerId] != "EXPIRED":
self.timers[timerId].cancel()
del self.timers[timerId]
def setSerialno(self, serialno):
if self.isTransportSet:
raise ValueError("Transport is already set, serialno cannot be set once this is done.")
self.serialno = serialno
self.__setTransport()
self.build[VERSION_SDK_PROPERTY] = int(self.__getProp(VERSION_SDK_PROPERTY))
def setReconnect(self, val):
self.reconnect = val
@staticmethod
def connect(hostname, port, timeout=TIMEOUT):
if DEBUG:
print >> sys.stderr, "AdbClient.connect(%s, %s, %s)" % (hostname, port, timeout)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# SO_LINGER: Idea proposed by kysersozelee (#173)
l_onoff = 1
l_linger = 0
s.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER,
struct.pack('ii', l_onoff, l_linger))
s.settimeout(timeout)
try:
s.connect((hostname, port))
except socket.error, ex:
raise RuntimeError("ERROR: Connecting to %s:%d: %s.\nIs adb running on your computer?" % (s, port, ex))
return s
def close(self):
if DEBUG:
print >> sys.stderr, "Closing socket...", self.socket
if self.socket:
self.socket.close()
def __del__(self):
try:
self.close()
except:
pass
def __send(self, msg, checkok=True, reconnect=False):
if DEBUG:
print >> sys.stderr, "__send(%s, checkok=%s, reconnect=%s)" % (msg, checkok, reconnect)
if not re.search('^host:', msg):
if not self.isTransportSet:
self.__setTransport()
else:
self.checkConnected()
b = bytearray(msg, 'utf-8')
self.socket.send('%04X%s' % (len(b), b))
if checkok:
self.__checkOk()
if reconnect:
if DEBUG:
print >> sys.stderr, " __send: reconnecting"
self.socket = AdbClient.connect(self.hostname, self.port, self.timeout)
self.__setTransport()
def __receive(self, nob=None, sock=None):
if DEBUG:
print >> sys.stderr, "__receive(nob=%s)" % (nob)
if not sock:
sock = self.socket
self.checkConnected(sock)
if nob is None:
nob = int(sock.recv(4), 16)
if DEBUG:
print >> sys.stderr, " __receive: receiving", nob, "bytes"
recv = bytearray(nob)
view = memoryview(recv)
nr = 0
while nr < nob:
l = sock.recv_into(view, len(view))
if DEBUG:
print >> sys.stderr, "l=", l, "nr=", nr
view = view[l:]
nr += l
if DEBUG:
print >> sys.stderr, " __receive: returning len=", len(recv)
return str(recv)
def __checkOk(self, sock=None):
if DEBUG:
print >> sys.stderr, "__checkOk()"
if not sock:
sock = self.socket
self.checkConnected(sock=sock)
timerId = self.setTimer(timeout=self.timeout)
recv = sock.recv(4)
if DEBUG:
print >> sys.stderr, " __checkOk: recv=", repr(recv)
try:
if recv != OKAY:
error = sock.recv(1024)
if error.startswith('0049'):
raise RuntimeError(
"ERROR: This computer is unauthorized. Please check the confirmation dialog on your device.")
else:
raise RuntimeError("ERROR: %s %s" % (repr(recv), error))
finally:
self.cancelTimer(timerId)
if DEBUG:
print >> sys.stderr, " __checkOk: returning True"
return True
def checkConnected(self, sock=None):
if DEBUG:
print >> sys.stderr, "checkConnected()"
if not sock:
sock = self.socket
if not sock:
raise RuntimeError("ERROR: Not connected")
if DEBUG:
print >> sys.stderr, " checkConnected: returning True"
return True
def checkVersion(self, ignoreversioncheck=False, reconnect=True):
if DEBUG:
print >> sys.stderr, "checkVersion(reconnect=%s) ignoreversioncheck=%s" % (reconnect, ignoreversioncheck)
self.__send('host:version', reconnect=False)
# HACK: MSG_WAITALL not available on windows
# version = self.socket.recv(8, socket.MSG_WAITALL)
version = self.__readExactly(self.socket, 8)
VALID_ADB_VERSIONS = ["00040024", "00040023", "00040020", "0004001f"]
if not (version in VALID_ADB_VERSIONS) and not ignoreversioncheck:
raise RuntimeError(
"ERROR: Incorrect ADB server version %s (expecting one of %s)" % (version, VALID_ADB_VERSIONS))
if reconnect:
self.socket = AdbClient.connect(self.hostname, self.port, self.timeout)
def __setTransport(self, timeout=60):
if DEBUG:
print >> sys.stderr, "__setTransport()"
if not self.serialno:
raise ValueError("serialno not set, empty or None")
self.checkConnected()
serialnoRE = re.compile(self.serialno)
found = False
devices = self.getDevices()
if len(devices) == 0 and timeout > 0:
print >> sys.stderr, "Empty device list, will wait %s secs for devices to appear" % self.timeout
# Sets the timeout to 5 to be able to loop while trying to receive new devices being added
_s = AdbClient.connect(self.hostname, self.port, timeout=5)
msg = 'host:track-devices'
b = bytearray(msg, 'utf-8')
try:
timerId = self.setTimer(timeout=timeout)
_s.send('%04X%s' % (len(b), b))
self.__checkOk(sock=_s)
# eat '0000'
_s.recv(4)
found = False
while not found:
sys.stderr.write(".")
sys.stderr.flush()
try:
for line in _s.recv(1024).splitlines():
# skip first 4 bytes containing the response size
device = Device.factory(line[4:])
if device.status == 'device':
devices.append(device)
found = True
break
if found:
break
except socket.timeout as ex:
# we continue trying until timer times out
pass
finally:
time.sleep(3)
if self.timers[timerId] == "EXPIRED":
break
self.cancelTimer(timerId)
except Timer.TimeoutException as ex:
print >> sys.stderr, "EXCEPTION", ex
pass
finally:
_s.close()
sys.stderr.write("\n")
sys.stderr.flush()
if len(devices) == 0:
raise RuntimeError("ERROR: There are no connected devices")
for device in devices:
if serialnoRE.match(device.serialno):
found = True
break
if not found:
raise RuntimeError("ERROR: couldn't find device that matches '%s' in %s" % (self.serialno, devices))
self.serialno = device.serialno
msg = 'host:transport:%s' % self.serialno
if DEBUG:
print >> sys.stderr, " __setTransport: msg=", msg
self.__send(msg, reconnect=False)
self.isTransportSet = True
def __checkTransport(self):
if not self.isTransportSet:
raise RuntimeError("ERROR: Transport is not set")
def __readExactly(self, sock, size):
if DEBUG:
print >> sys.stderr, "__readExactly(socket=%s, size=%d)" % (sock, size)
_buffer = bytearray(size)
view = memoryview(_buffer)
nb = 0
while nb < size:
l = sock.recv_into(view, len(view))
view = view[l:]
nb += l
return str(_buffer)
def getDevices(self):
if DEBUG:
print >> sys.stderr, "getDevices()"
self.__send('host:devices-l', checkok=False)
try:
self.__checkOk()
except RuntimeError, ex:
print >> sys.stderr, "**ERROR:", ex
return None
devices = []
for line in self.__receive().splitlines():
devices.append(Device.factory(line))
self.socket = AdbClient.connect(self.hostname, self.port, self.timeout)
return devices
def shell(self, cmd=None):
if DEBUG_SHELL:
print >> sys.stderr, "shell(cmd=%s)" % cmd
self.__checkTransport()
if cmd:
self.__send('shell:%s' % cmd, checkok=True, reconnect=False)
chunks = []
while True:
chunk = None
try:
chunk = self.socket.recv(4096)
except Exception, ex:
print >> sys.stderr, "ERROR:", ex
if not chunk:
break
chunks.append(chunk)
if self.reconnect:
if DEBUG:
print >> sys.stderr, "Reconnecting..."
self.close()
self.socket = AdbClient.connect(self.hostname, self.port, self.timeout)
self.__setTransport()
return ''.join(chunks)
else:
self.__send('shell:')
# sin = self.socket.makefile("rw")
# sout = self.socket.makefile("r")
# return (sin, sin)
sout = adbClient.socket.makefile("r")
return sout
def getRestrictedScreen(self):
''' Gets C{mRestrictedScreen} values from dumpsys. This is a method to obtain display dimensions '''
rsRE = re.compile('\s*mRestrictedScreen=\((?P<x>\d+),(?P<y>\d+)\) (?P<w>\d+)x(?P<h>\d+)')
for line in self.shell('dumpsys window').splitlines():
m = rsRE.match(line)
if m:
return m.groups()
raise RuntimeError("Couldn't find mRestrictedScreen in 'dumpsys window'")
def getDisplayInfo(self):
self.__checkTransport()
displayInfo = self.getLogicalDisplayInfo()
if displayInfo:
return displayInfo
displayInfo = self.getPhysicalDisplayInfo()
if displayInfo:
return displayInfo
raise RuntimeError("Couldn't find display info in 'wm size', 'dumpsys display' or 'dumpsys window'")
def getLogicalDisplayInfo(self):
'''
Gets C{mDefaultViewport} and then C{deviceWidth} and C{deviceHeight} values from dumpsys.
This is a method to obtain display logical dimensions and density
'''
self.__checkTransport()
logicalDisplayRE = re.compile(
'.*DisplayViewport\{valid=true, .*orientation=(?P<orientation>\d+), .*deviceWidth=(?P<width>\d+), deviceHeight=(?P<height>\d+).*')
for line in self.shell('dumpsys display').splitlines():
m = logicalDisplayRE.search(line, 0)
if m:
self.__displayInfo = {}
for prop in ['width', 'height', 'orientation']:
self.__displayInfo[prop] = int(m.group(prop))
for prop in ['density']:
d = self.__getDisplayDensity(None, strip=True, invokeGetPhysicalDisplayIfNotFound=True)
if d:
self.__displayInfo[prop] = d
else:
# No available density information
self.__displayInfo[prop] = -1.0
return self.__displayInfo
return None
def getPhysicalDisplayInfo(self):
''' Gets C{mPhysicalDisplayInfo} values from dumpsys. This is a method to obtain display dimensions and density'''
self.__checkTransport()
phyDispRE = re.compile('Physical size: (?P<width>\d+)x(?P<height>\d+).*Physical density: (?P<density>\d+)', re.DOTALL)
m = phyDispRE.search(self.shell('wm size; wm density'))
if m:
displayInfo = {}
for prop in ['width', 'height']:
displayInfo[prop] = int(m.group(prop))
for prop in ['density']:
displayInfo[prop] = float(m.group(prop))
return displayInfo
phyDispRE = re.compile(
'.*PhysicalDisplayInfo{(?P<width>\d+) x (?P<height>\d+), .*, density (?P<density>[\d.]+).*')
for line in self.shell('dumpsys display').splitlines():
m = phyDispRE.search(line, 0)
if m:
displayInfo = {}
for prop in ['width', 'height']:
displayInfo[prop] = int(m.group(prop))
for prop in ['density']:
# In mPhysicalDisplayInfo density is already a factor, no need to calculate
displayInfo[prop] = float(m.group(prop))
return displayInfo
# This could also be mSystem or mOverscanScreen
phyDispRE = re.compile('\s*mUnrestrictedScreen=\((?P<x>\d+),(?P<y>\d+)\) (?P<width>\d+)x(?P<height>\d+)')
# This is known to work on older versions (i.e. API 10) where mrestrictedScreen is not available
dispWHRE = re.compile('\s*DisplayWidth=(?P<width>\d+) *DisplayHeight=(?P<height>\d+)')
for line in self.shell('dumpsys window').splitlines():
m = phyDispRE.search(line, 0)
if not m:
m = dispWHRE.search(line, 0)
if m:
displayInfo = {}
for prop in ['width', 'height']:
displayInfo[prop] = int(m.group(prop))
for prop in ['density']:
d = self.__getDisplayDensity(None, strip=True, invokeGetPhysicalDisplayIfNotFound=False)
if d:
displayInfo[prop] = d
else:
# No available density information
displayInfo[prop] = -1.0
return displayInfo
def __getProp(self, key, strip=True):
if DEBUG:
print >> sys.stderr, "__getProp(%s, %s)" % (key, strip)
prop = self.shell('getprop %s' % key)
if strip:
prop = prop.rstrip('\r\n')
if DEBUG:
print >> sys.stderr, " __getProp: returning '%s'" % prop
return prop
def __getDisplayWidth(self, key, strip=True):
if self.__displayInfo and 'width' in self.__displayInfo:
return self.__displayInfo['width']
return self.getDisplayInfo()['width']
def __getDisplayHeight(self, key, strip=True):
if self.__displayInfo and 'height' in self.__displayInfo:
return self.__displayInfo['height']
return self.getDisplayInfo()['height']
def __getDisplayOrientation(self, key, strip=True):
if self.__displayInfo and 'orientation' in self.__displayInfo:
return self.__displayInfo['orientation']
displayInfo = self.getDisplayInfo()
if 'orientation' in displayInfo:
return displayInfo['orientation']
# Fallback method to obtain the orientation
# See https://github.com/dtmilano/AndroidViewClient/issues/128
surfaceOrientationRE = re.compile('SurfaceOrientation:\s+(\d+)')
output = self.shell('dumpsys input')
m = surfaceOrientationRE.search(output)
if m:
return int(m.group(1))
# We couldn't obtain the orientation
return -1
def __getDisplayDensity(self, key, strip=True, invokeGetPhysicalDisplayIfNotFound=True):
if self.__displayInfo and 'density' in self.__displayInfo: # and self.__displayInfo['density'] != -1: # FIXME: need more testing
return self.__displayInfo['density']
BASE_DPI = 160.0
d = self.getProperty('ro.sf.lcd_density', strip)
if d:
return float(d) / BASE_DPI
d = self.getProperty('qemu.sf.lcd_density', strip)
if d:
return float(d) / BASE_DPI
if invokeGetPhysicalDisplayIfNotFound:
return self.getPhysicalDisplayInfo()['density']
return -1.0
def getSystemProperty(self, key, strip=True):
self.__checkTransport()
return self.getProperty(key, strip)
def getProperty(self, key, strip=True):
''' Gets the property value for key '''
self.__checkTransport()
import collections
MAP_PROPS = collections.OrderedDict([
(re.compile('display.width'), self.__getDisplayWidth),
(re.compile('display.height'), self.__getDisplayHeight),
(re.compile('display.density'), self.__getDisplayDensity),
(re.compile('display.orientation'), self.__getDisplayOrientation),
(re.compile('.*'), self.__getProp),
])
'''Maps properties key values (as regexps) to instance methods to obtain its values.'''
for kre in MAP_PROPS.keys():
if kre.match(key):
return MAP_PROPS[kre](key=key, strip=strip)
raise ValueError("key='%s' does not match any map entry")
def getSdkVersion(self):
'''
Gets the SDK version.
'''
self.__checkTransport()
return self.build[VERSION_SDK_PROPERTY]
def press(self, name, eventType=DOWN_AND_UP):
self.__checkTransport()
if isinstance(name, unicode):
name = name.decode('ascii', errors='replace')
cmd = 'input keyevent %s' % name
if DEBUG:
print >> sys.stderr, "press(%s)" % cmd
self.shell(cmd)
def longPress(self, name, duration=0.5, dev='/dev/input/event0', scancode=0, repeat=1):
self.__checkTransport()
# WORKAROUND:
# Using 'input keyevent --longpress POWER' does not work correctly in
# KitKat (API 19), it sends a short instead of a long press.
# This uses the events instead, but it may vary from device to device.
# The events sent are device dependent and may not work on other devices.
# If this does not work on your device please do:
# $ adb shell getevent -l
# and post the output to https://github.com/dtmilano/AndroidViewClient/issues
# specifying the device and API level.
if name[0:4] == 'KEY_':
name = name[4:]
# FIXME:
# Most of the keycodes are in KEY_MAP so it's very unlikely that the longpress event
# is sent via `input keyevent ...` (look next if)
if name in KEY_MAP:
self.shell('sendevent %s 1 %d 1' % (dev, KEY_MAP[name]))
self.shell('sendevent %s 0 0 0' % dev)
for _ in range(repeat):
self.shell('sendevent %s 4 4 %d' % (dev, scancode))
self.shell('sendevent %s 0 0 0' % dev)
time.sleep(duration)
self.shell('sendevent %s 1 %d 0' % (dev, KEY_MAP[name]))
self.shell('sendevent %s 0 0 0' % dev)
return
version = self.getSdkVersion()
if version >= 19:
cmd = 'input keyevent --longpress %s' % name
if DEBUG:
print >> sys.stderr, "longPress(%s)" % cmd
self.shell(cmd)
else:
raise RuntimeError("longpress: not supported for API < 19 (version=%d)" % version)
def startActivity(self, component=None, flags=None, uri=None):
self.__checkTransport()
cmd = 'am start'
if component:
cmd += ' -n %s' % component
if flags:
cmd += ' -f %s' % flags
if uri:
cmd += ' %s' % uri
if DEBUG:
print >> sys.stderr, "Starting activity: %s" % cmd
out = self.shell(cmd)
if re.search(r"(Error type)|(Error: )|(Cannot find 'App')", out, re.IGNORECASE | re.MULTILINE):
raise RuntimeError(out)
def takeSnapshot(self, reconnect=False):
'''
Takes a snapshot of the device and return it as a PIL Image.
'''
if PROFILE:
profileStart()
global PIL_AVAILABLE
if not PIL_AVAILABLE:
try:
global Image
from PIL import Image
PIL_AVAILABLE = True
except:
raise Exception("You have to install PIL to use takeSnapshot()")
USE_ADB_FRAMEBUFFER_METHOD = (self.getSdkVersion() < 14 or self.getSdkVersion() >= 23)
if USE_ADB_FRAMEBUFFER_METHOD:
self.__checkTransport()
self.__send('framebuffer:', checkok=True, reconnect=False)
# case 1: // version
# return 12; // bpp, size, width, height, 4*(length, offset)
received = self.__receive(1 * 4 + 12 * 4)
(version, bpp, size, width, height, roffset, rlen, boffset, blen, goffset, glen, aoffset,
alen) = struct.unpack(
'<' + 'L' * 13, received)
if DEBUG:
print >> sys.stderr, " takeSnapshot:", (
version, bpp, size, width, height, roffset, rlen, boffset, blen, goffset, glen, aoffset, alen)
offsets = {roffset: 'R', goffset: 'G', boffset: 'B'}
if bpp == 32:
if alen != 0:
offsets[aoffset] = 'A'
else:
warnings.warn('''framebuffer is specified as 32bpp but alpha length is 0''')
argMode = ''.join([offsets[o] for o in sorted(offsets)])
if DEBUG:
print >> sys.stderr, " takeSnapshot:", (
version, bpp, size, width, height, roffset, rlen, boffset, blen, goffset, blen, aoffset, alen,
argMode)
if argMode == 'BGRA':
argMode = 'RGBA'
if bpp == 16:
mode = 'RGB'
argMode += ';16'
else:
mode = argMode
self.__send('\0', checkok=False, reconnect=False)
if DEBUG:
print >> sys.stderr, " takeSnapshot: reading %d bytes" % (size)
received = self.__receive(size)
if reconnect:
self.socket = AdbClient.connect(self.hostname, self.port, self.timeout)
self.__setTransport()
if DEBUG:
print >> sys.stderr, " takeSnapshot: Image.frombuffer(%s, %s, %s, %s, %s, %s, %s)" % (
mode, (width, height), 'data', 'raw', argMode, 0, 1)
image = Image.frombuffer(mode, (width, height), received, 'raw', argMode, 0, 1)
else:
# ALTERNATIVE_METHOD: screencap
received = self.shell('/system/bin/screencap -p').replace("\r\n", "\n")
if not received:
raise RuntimeError('"/system/bin/screencap -p" result was empty')
stream = StringIO.StringIO(received)
try:
image = Image.open(stream)
except IOError, ex:
print >> sys.stderr, ex
print >> sys.stderr, repr(stream)
print >> sys.stderr, repr(received)
raise RuntimeError('Cannot convert stream to image: ' + ex)
# Just in case let's get the real image size
(w, h) = image.size
if w == self.display['height'] and h == self.display['width']:
# FIXME: We are not catching the 180 degrees rotation here
if 'orientation' in self.display:
r = (0, 90, 180, -90)[self.display['orientation']]
else:
r = 90
image = image.rotate(r, expand=1).resize((h, w))
if PROFILE:
profileEnd()
return image
def __transformPointByOrientation(self, (x, y), orientationOrig, orientationDest):
if orientationOrig != orientationDest:
if orientationDest == 1:
_x = x
x = self.display['width'] - y
y = _x
elif orientationDest == 3:
_x = x
x = y
y = self.display['height'] - _x
return (x, y)
def touch(self, x, y, orientation=-1, eventType=DOWN_AND_UP):
if DEBUG_TOUCH:
print >> sys.stderr, "touch(x=", x, ", y=", y, ", orientation=", orientation, ", eventType=", eventType, ")"
self.__checkTransport()
if orientation == -1:
orientation = self.display['orientation']
self.shell(
'input tap %d %d' % self.__transformPointByOrientation((x, y), orientation, self.display['orientation']))
def touchDip(self, x, y, orientation=-1, eventType=DOWN_AND_UP):
if DEBUG_TOUCH:
print >> sys.stderr, "touchDip(x=", x, ", y=", y, ", orientation=", orientation, ", eventType=", eventType, ")"
self.__checkTransport()
if orientation == -1:
orientation = self.display['orientation']
x = x * self.display['density']
y = y * self.display['density']
self.touch(x, y, orientation, eventType)
def longTouch(self, x, y, duration=2000, orientation=-1):
'''
Long touches at (x, y)
@param duration: duration in ms
@param orientation: the orientation (-1: undefined)
This workaround was suggested by U{HaMi<http://stackoverflow.com/users/2571957/hami>}
'''
self.__checkTransport()
self.drag((x, y), (x, y), duration, orientation)
def drag(self, (x0, y0), (x1, y1), duration, steps=1, orientation=-1):
'''
Sends drag event n PX (actually it's using C{input swipe} command.
@param (x0, y0): starting point in PX
@param (x1, y1): ending point in PX
@param duration: duration of the event in ms
@param steps: number of steps (currently ignored by @{input swipe})
@param orientation: the orientation (-1: undefined)
'''
self.__checkTransport()
if orientation == -1:
orientation = self.display['orientation']
(x0, y0) = self.__transformPointByOrientation((x0, y0), orientation, self.display['orientation'])
(x1, y1) = self.__transformPointByOrientation((x1, y1), orientation, self.display['orientation'])
version = self.getSdkVersion()
if version <= 15:
raise RuntimeError('drag: API <= 15 not supported (version=%d)' % version)
elif version <= 17:
self.shell('input swipe %d %d %d %d' % (x0, y0, x1, y1))
else:
self.shell('input touchscreen swipe %d %d %d %d %d' % (x0, y0, x1, y1, duration))
def dragDip(self, (x0, y0), (x1, y1), duration, steps=1, orientation=-1):
'''
Sends drag event in DIP (actually it's using C{input swipe} command.
@param (x0, y0): starting point in DIP
@param (x1, y1): ending point in DIP
@param duration: duration of the event in ms
@param steps: number of steps (currently ignored by @{input swipe}
'''
self.__checkTransport()
if orientation == -1:
orientation = self.display['orientation']
density = self.display['density'] if self.display['density'] > 0 else 1
x0 = x0 * density
y0 = y0 * density
x1 = x1 * density
y1 = y1 * density
self.drag((x0, y0), (x1, y1), duration, steps, orientation)
def type(self, text):
self.__checkTransport()
if type(text) is str:
escaped = text.replace('%s', '\\%s')
encoded = escaped.replace(' ', '%s')
else:
encoded = str(text);
#FIXME find out which characters can be dangerous,
# for exmaple not worst idea to escape "
self.shell(u'input text "%s"' % encoded)
def wake(self):
self.__checkTransport()
if not self.isScreenOn():
self.shell('input keyevent POWER')
def isLocked(self):
'''
Checks if the device screen is locked.
@return True if the device screen is locked
'''
self.__checkTransport()
lockScreenRE = re.compile('mShowingLockscreen=(true|false)')
m = lockScreenRE.search(self.shell('dumpsys window policy'))
if m:
return (m.group(1) == 'true')
raise RuntimeError("Couldn't determine screen lock state")
def isScreenOn(self):
'''
Checks if the screen is ON.
@return True if the device screen is ON
'''
self.__checkTransport()
screenOnRE = re.compile('mScreenOnFully=(true|false)')
m = screenOnRE.search(self.shell('dumpsys window policy'))
if m:
return (m.group(1) == 'true')
raise RuntimeError("Couldn't determine screen ON state")
def unlock(self):
'''
Unlocks the screen of the device.
'''
self.__checkTransport()
self.shell('input keyevent MENU')
self.shell('input keyevent BACK')
@staticmethod
def percentSame(image1, image2):
'''
Returns the percent of pixels that are equal
@author: catshoes
'''
# If the images differ in size, return 0% same.
size_x1, size_y1 = image1.size
size_x2, size_y2 = image2.size
if (size_x1 != size_x2 or
size_y1 != size_y2):
return 0
# Images are the same size
# Return the percent of pixels that are equal.
numPixelsSame = 0
numPixelsTotal = size_x1 * size_y1
image1Pixels = image1.load()
image2Pixels = image2.load()
# Loop over all pixels, comparing pixel in image1 to image2
for x in range(size_x1):
for y in range(size_y1):
if (image1Pixels[x, y] == image2Pixels[x, y]):
numPixelsSame += 1
return numPixelsSame / float(numPixelsTotal)
@staticmethod
def sameAs(image1, image2, percent=1.0):
'''
Compares 2 images
@author: catshoes
'''
return (AdbClient.percentSame(image1, image2) >= percent)
@staticmethod
def imageInScreen(screen, image):
'''
Checks if image is on the screen
:param screen: the screen image
:param image: the partial image to look for
:return: True or False
@author: Perry Tsai <ripple0129@gmail.com>
'''
# To make sure image smaller than screen.
size_x1, size_y1 = screen.size
size_x2, size_y2 = image.size
if size_x1 <= size_x2 or size_y1 <= size_y2:
return 0
# Load pixels.
screenPixels = screen.load()
imagePixels = image.load()
# Loop over all pixels, if pixel image[0,0] same as pixel screen[x,y] do crop and compare
for x in range(size_x1 - size_x2):
for y in range(size_y1 - size_y2):
if imagePixels[0, 0] == screenPixels[x, y]:
croppedScreen = screen.crop((x, y, x + size_x2, y + size_y2))
size_x3, size_y3 = croppedScreen.size
croppedPixels = croppedScreen.load()
for x in range(size_x3):
for y in range(size_y3):
if imagePixels[x, y] == croppedPixels[x, y]:
return True
def isKeyboardShown(self):
'''
Whether the keyboard is displayed.
'''
self.__checkTransport()
dim = self.shell('dumpsys input_method')
if dim:
# FIXME: API >= 15 ?
return "mInputShown=true" in dim
return False
def initDisplayProperties(self):
self.__checkTransport()
self.__displayInfo = None
self.display['width'] = self.getProperty('display.width')
self.display['height'] = self.getProperty('display.height')
self.display['density'] = self.getProperty('display.density')
self.display['orientation'] = self.getProperty('display.orientation')
def log(self, tag, message, priority='D', verbose=False):
if DEBUG_LOG:
print >> sys.stderr, "log(tag=%s, message=%s, priority=%s, verbose=%s)" % (tag, message, priority, verbose)
self.__checkTransport()
message = self.substituteDeviceTemplate(message)
if verbose or priority == 'V':
print >> sys.stderr, tag + ':', message
self.shell('log -p %c -t "%s" %s' % (priority, tag, message))
class __Log():
'''
Log class to simulate C{android.util.Log}
'''
def __init__(self, adbClient):
self.adbClient = adbClient
def __getattr__(self, attr):
'''
Returns the corresponding log method or @C{AttributeError}.
'''
if attr in ['v', 'd', 'i', 'w', 'e']:
return lambda tag, message, verbose: self.adbClient.log(tag, message, priority=attr.upper(),
verbose=verbose)
raise AttributeError(self.__class__.__name__ + ' has no attribute "%s"' % attr)
def getSystemService(self, name):
if name == WIFI_SERVICE:
return WifiManager(self)
def getWindows(self):
self.__checkTransport()
windows = {}
dww = self.shell('dumpsys window windows')
if DEBUG_WINDOWS: print >> sys.stderr, dww
lines = dww.splitlines()
widRE = re.compile('^ *Window #%s Window\{%s (u\d+ )?%s?.*\}:' %
(_nd('num'), _nh('winId'), _ns('activity', greedy=True)))
currentFocusRE = re.compile('^ mCurrentFocus=Window\{%s .*' % _nh('winId'))
viewVisibilityRE = re.compile(' mViewVisibility=0x%s ' % _nh('visibility'))
# This is for 4.0.4 API-15
containingFrameRE = re.compile('^ *mContainingFrame=\[%s,%s\]\[%s,%s\] mParentFrame=\[%s,%s\]\[%s,%s\]' %
(_nd('cx'), _nd('cy'), _nd('cw'), _nd('ch'), _nd('px'), _nd('py'), _nd('pw'),
_nd('ph')))
contentFrameRE = re.compile('^ *mContentFrame=\[%s,%s\]\[%s,%s\] mVisibleFrame=\[%s,%s\]\[%s,%s\]' %
(_nd('x'), _nd('y'), _nd('w'), _nd('h'), _nd('vx'), _nd('vy'), _nd('vx1'),
_nd('vy1')))
# This is for 4.1 API-16
framesRE = re.compile('^ *Frames: containing=\[%s,%s\]\[%s,%s\] parent=\[%s,%s\]\[%s,%s\]' %
(_nd('cx'), _nd('cy'), _nd('cw'), _nd('ch'), _nd('px'), _nd('py'), _nd('pw'), _nd('ph')))
contentRE = re.compile('^ *content=\[%s,%s\]\[%s,%s\] visible=\[%s,%s\]\[%s,%s\]' %
(_nd('x'), _nd('y'), _nd('w'), _nd('h'), _nd('vx'), _nd('vy'), _nd('vx1'), _nd('vy1')))
policyVisibilityRE = re.compile('mPolicyVisibility=%s ' % _ns('policyVisibility', greedy=True))
currentFocus = None
for l in range(len(lines)):
m = widRE.search(lines[l])
if m:
num = int(m.group('num'))
winId = m.group('winId')
activity = m.group('activity')
wvx = 0
wvy = 0
wvw = 0
wvh = 0
px = 0
py = 0
visibility = -1
policyVisibility = 0x0
for l2 in range(l + 1, len(lines)):
m = widRE.search(lines[l2])
if m:
l += (l2 - 1)
break
m = viewVisibilityRE.search(lines[l2])
if m:
visibility = int(m.group('visibility'))
if DEBUG_COORDS: print >> sys.stderr, "getWindows: visibility=", visibility
if self.build[VERSION_SDK_PROPERTY] >= 17:
wvx, wvy = (0, 0)
wvw, wvh = (0, 0)
if self.build[VERSION_SDK_PROPERTY] >= 16:
m = framesRE.search(lines[l2])
if m:
px, py = obtainPxPy(m)
m = contentRE.search(lines[l2 + 1])
if m:
# FIXME: the information provided by 'dumpsys window windows' in 4.2.1 (API 16)
# when there's a system dialog may not be correct and causes the View coordinates
# be offset by this amount, see
# https://github.com/dtmilano/AndroidViewClient/issues/29
wvx, wvy = obtainVxVy(m)
wvw, wvh = obtainVwVh(m)
elif self.build[VERSION_SDK_PROPERTY] == 15:
m = containingFrameRE.search(lines[l2])
if m:
px, py = obtainPxPy(m)
m = contentFrameRE.search(lines[l2 + 1])
if m:
wvx, wvy = obtainVxVy(m)
wvw, wvh = obtainVwVh(m)
elif self.build[VERSION_SDK_PROPERTY] == 10:
m = containingFrameRE.search(lines[l2])
if m:
px, py = obtainPxPy(m)
m = contentFrameRE.search(lines[l2 + 1])
if m:
wvx, wvy = obtainVxVy(m)
wvw, wvh = obtainVwVh(m)
else:
warnings.warn("Unsupported Android version %d" % self.build[VERSION_SDK_PROPERTY])
# print >> sys.stderr, "Searching policyVisibility in", lines[l2]
m = policyVisibilityRE.search(lines[l2])
if m:
policyVisibility = 0x0 if m.group('policyVisibility') == 'true' else 0x8
windows[winId] = Window(num, winId, activity, wvx, wvy, wvw, wvh, px, py, visibility + policyVisibility)
else:
m = currentFocusRE.search(lines[l])
if m:
currentFocus = m.group('winId')
if currentFocus in windows and windows[currentFocus].visibility == 0:
if DEBUG_COORDS:
print >> sys.stderr, "getWindows: focus=", currentFocus
print >> sys.stderr, "getWindows:", windows[currentFocus]
windows[currentFocus].focused = True
return windows
def getFocusedWindow(self):
'''
Gets the focused window.
@return: The focused L{Window}.
'''
for window in self.getWindows().values():
if window.focused:
return window
return None
def getFocusedWindowName(self):
'''
Gets the focused window name.
This is much like monkeyRunner's C{HierarchyView.getWindowName()}
@return: The focused window name
'''
window = self.getFocusedWindow()
if window:
return window.activity
return None
def getTopActivityNameAndPid(self):
dat = self.shell('dumpsys activity top')
lines = dat.splitlines()
activityRE = re.compile('\s*ACTIVITY ([A-Za-z0-9_.]+)/([A-Za-z0-9_.]+) \w+ pid=(\d+)')
m = activityRE.search(lines[1])
if m:
return (m.group(1), m.group(2), m.group(3))
else:
warnings.warn("NO MATCH:" + lines[1])
return None
def getTopActivityName(self):
tanp = self.getTopActivityNameAndPid()
if tanp:
return tanp[0] + '/' + tanp[1]
else:
return None
def substituteDeviceTemplate(self, template):
serialno = self.serialno.replace('.', '_').replace(':', '-')
focusedWindowName = self.getFocusedWindowName().replace('/', '-').replace('.', '_')
timestamp = datetime.datetime.now().isoformat()
osName = platform.system()
if osName.startswith('Windows'): # ':' not supported in filenames
timestamp.replace(':', '_')
_map = {
'serialno': serialno,
'focusedwindowname': focusedWindowName,
'timestamp': timestamp
}
return string.Template(template).substitute(_map)
if __name__ == '__main__':
adbClient = AdbClient(os.environ['ANDROID_SERIAL'])
INTERACTIVE = False
if INTERACTIVE:
sout = adbClient.shell()
prompt = re.compile(".+@android:(.*) [$#] \r\r\n")
while True:
try:
cmd = raw_input('adb $ ')
except EOFError:
break
if cmd == 'exit':
break
adbClient.socket.__send(cmd + "\r\n")
sout.readline(4096) # eat first line, which is the command
while True:
line = sout.readline(4096)
if prompt.match(line):
break
print line,
if not line:
break
print "\nBye"
else:
print 'date:', adbClient.shell('date')
| |
#!/usr/bin/env python
import os
import pkg_resources
import sys
from setuptools import setup
import chainerx_build_helper
if sys.version_info[:3] == (3, 5, 0):
if not int(os.getenv('CHAINER_PYTHON_350_FORCE', '0')):
msg = """
Chainer does not work with Python 3.5.0.
We strongly recommend to use another version of Python.
If you want to use Chainer with Python 3.5.0 at your own risk,
set CHAINER_PYTHON_350_FORCE environment variable to 1."""
print(msg)
sys.exit(1)
requirements = {
'install': [
'setuptools',
'typing_extensions',
'filelock',
'numpy>=1.9.0',
'protobuf>=3.0.0',
'six>=1.9.0',
],
'stylecheck': [
'autopep8>=1.4.1,<1.5',
'flake8>=3.7,<3.8',
'pycodestyle>=2.5,<2.6',
],
'test': [
'pytest<4.2.0', # 4.2.0 is slow collecting tests and times out on CI.
'attrs<19.2.0', # pytest 4.1.1 does not run with attrs==19.2.0
'mock',
],
'doctest': [
'sphinx==1.8.2',
'matplotlib',
'theano',
],
'docs': [
'sphinx==1.8.2',
'sphinx_rtd_theme',
'onnx<1.7.0',
'packaging',
],
'appveyor': [
'-r test',
# pytest-timeout>=1.3.0 requires pytest>=3.6.
# TODO(niboshi): Consider upgrading pytest to >=3.6
'pytest-timeout<1.3.0',
],
'jenkins': [
'-r test',
# pytest-timeout>=1.3.0 requires pytest>=3.6.
# TODO(niboshi): Consider upgrading pytest to >=3.6
'pytest-timeout<1.3.0',
'pytest-cov<2.10', # pytest-cov 2.10 requires pytest>=4.6
'nose',
'coveralls',
'codecov',
'coverage<5', # Otherwise, Python must be built with sqlite
],
}
def reduce_requirements(key):
# Resolve recursive requirements notation (-r)
reqs = requirements[key]
resolved_reqs = []
for req in reqs:
if req.startswith('-r'):
depend_key = req[2:].lstrip()
reduce_requirements(depend_key)
resolved_reqs += requirements[depend_key]
else:
resolved_reqs.append(req)
requirements[key] = resolved_reqs
for k in requirements.keys():
reduce_requirements(k)
extras_require = {k: v for k, v in requirements.items() if k != 'install'}
setup_requires = []
install_requires = requirements['install']
tests_require = requirements['test']
def find_any_distribution(pkgs):
for pkg in pkgs:
try:
return pkg_resources.get_distribution(pkg)
except pkg_resources.DistributionNotFound:
pass
return None
for pkg_name in ('ChainerMN', 'ONNX-Chainer'):
distribution_name = pkg_name.lower().replace('-', '_')
found_error = find_any_distribution([distribution_name])
if found_error is not None:
msg = """
We detected that {name} is installed in your environment.
{name} has been integrated to Chainer and no separate installation
is necessary. Please uninstall the old {name} in advance.
"""
print(msg.format(name=pkg_name))
exit(1)
here = os.path.abspath(os.path.dirname(__file__))
# Get __version__ variable
exec(open(os.path.join(here, 'chainer', '_version.py')).read())
setup_kwargs = dict(
name='chainer',
version=__version__, # NOQA
description='A flexible framework of neural networks',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
author='Seiya Tokui',
author_email='tokui@preferred.jp',
url='https://chainer.org/',
license='MIT License',
packages=['chainer',
'chainer.backends',
'chainer.dataset',
'chainer.dataset.tabular',
'chainer.datasets',
'chainer.distributions',
'chainer.exporters',
'chainer.functions',
'chainer.functions.activation',
'chainer.functions.array',
'chainer.functions.connection',
'chainer.functions.evaluation',
'chainer.functions.loss',
'chainer.functions.math',
'chainer.functions.noise',
'chainer.functions.normalization',
'chainer.functions.pooling',
'chainer.functions.rnn',
'chainer.functions.theano',
'chainer.functions.util',
'chainer.function_hooks',
'chainer.iterators',
'chainer.initializers',
'chainer.links',
'chainer.links.activation',
'chainer.links.caffe',
'chainer.links.caffe.protobuf3',
'chainer.links.connection',
'chainer.links.loss',
'chainer.links.model',
'chainer.links.model.vision',
'chainer.links.normalization',
'chainer.links.rnn',
'chainer.links.theano',
'chainer.link_hooks',
'chainer.graph_optimizations',
'chainer.optimizers',
'chainer.optimizer_hooks',
'chainer.serializers',
'chainer.testing',
'chainer.training',
'chainer.training.extensions',
'chainer.training.triggers',
'chainer.training.updaters',
'chainer.utils',
'chainermn',
'chainermn.communicators',
'chainermn.datasets',
'chainermn.extensions',
'chainermn.functions',
'chainermn.iterators',
'chainermn.links',
'chainermn.testing',
'onnx_chainer',
'onnx_chainer.functions',
'onnx_chainer.testing'],
package_data={
'chainer': ['py.typed'],
},
zip_safe=False,
setup_requires=setup_requires,
install_requires=install_requires,
tests_require=tests_require,
extras_require=extras_require,
python_requires='>=3.5.0',
)
build_chainerx = 0 != int(os.getenv('CHAINER_BUILD_CHAINERX', '0'))
if (os.getenv('READTHEDOCS', None) == 'True'
and os.getenv('READTHEDOCS_PROJECT', None) == 'chainer'):
# ChainerX must be built in order to build the docs (on Read the Docs).
build_chainerx = True
# Try to prevent Read the Docs build timeouts.
os.environ['MAKEFLAGS'] = '-j2'
chainerx_build_helper.config_setup_kwargs(setup_kwargs, build_chainerx)
setup(**setup_kwargs)
| |
from __future__ import unicode_literals
import json
import os
import boto3
import botocore
from botocore.exceptions import ClientError
from nose.tools import assert_raises
import sure # noqa
from moto import mock_elbv2, mock_ec2, mock_acm, mock_cloudformation
from moto.elbv2 import elbv2_backends
@mock_elbv2
@mock_ec2
def test_create_load_balancer():
conn = boto3.client('elbv2', region_name='us-east-1')
ec2 = boto3.resource('ec2', region_name='us-east-1')
security_group = ec2.create_security_group(
GroupName='a-security-group', Description='First One')
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
subnet1 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.0/26',
AvailabilityZone='us-east-1b')
response = conn.create_load_balancer(
Name='my-lb',
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme='internal',
Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
lb = response.get('LoadBalancers')[0]
lb.get('DNSName').should.equal("my-lb-1.us-east-1.elb.amazonaws.com")
lb.get('LoadBalancerArn').should.equal(
'arn:aws:elasticloadbalancing:us-east-1:1:loadbalancer/my-lb/50dc6c495c0c9188')
lb.get('SecurityGroups').should.equal([security_group.id])
lb.get('AvailabilityZones').should.equal([
{'SubnetId': subnet1.id, 'ZoneName': 'us-east-1a'},
{'SubnetId': subnet2.id, 'ZoneName': 'us-east-1b'}])
# Ensure the tags persisted
response = conn.describe_tags(ResourceArns=[lb.get('LoadBalancerArn')])
tags = {d['Key']: d['Value']
for d in response['TagDescriptions'][0]['Tags']}
tags.should.equal({'key_name': 'a_value'})
@mock_elbv2
@mock_ec2
def test_describe_load_balancers():
conn = boto3.client('elbv2', region_name='us-east-1')
ec2 = boto3.resource('ec2', region_name='us-east-1')
security_group = ec2.create_security_group(
GroupName='a-security-group', Description='First One')
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
subnet1 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.0/26',
AvailabilityZone='us-east-1b')
conn.create_load_balancer(
Name='my-lb',
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme='internal',
Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
response = conn.describe_load_balancers()
response.get('LoadBalancers').should.have.length_of(1)
lb = response.get('LoadBalancers')[0]
lb.get('LoadBalancerName').should.equal('my-lb')
response = conn.describe_load_balancers(
LoadBalancerArns=[lb.get('LoadBalancerArn')])
response.get('LoadBalancers')[0].get(
'LoadBalancerName').should.equal('my-lb')
response = conn.describe_load_balancers(Names=['my-lb'])
response.get('LoadBalancers')[0].get(
'LoadBalancerName').should.equal('my-lb')
with assert_raises(ClientError):
conn.describe_load_balancers(LoadBalancerArns=['not-a/real/arn'])
with assert_raises(ClientError):
conn.describe_load_balancers(Names=['nope'])
@mock_elbv2
@mock_ec2
def test_add_remove_tags():
conn = boto3.client('elbv2', region_name='us-east-1')
ec2 = boto3.resource('ec2', region_name='us-east-1')
security_group = ec2.create_security_group(
GroupName='a-security-group', Description='First One')
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
subnet1 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.0/26',
AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1b')
conn.create_load_balancer(
Name='my-lb',
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme='internal',
Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
lbs = conn.describe_load_balancers()['LoadBalancers']
lbs.should.have.length_of(1)
lb = lbs[0]
with assert_raises(ClientError):
conn.add_tags(ResourceArns=['missing-arn'],
Tags=[{
'Key': 'a',
'Value': 'b'
}])
conn.add_tags(ResourceArns=[lb.get('LoadBalancerArn')],
Tags=[{
'Key': 'a',
'Value': 'b'
}])
tags = {d['Key']: d['Value'] for d in conn.describe_tags(
ResourceArns=[lb.get('LoadBalancerArn')])['TagDescriptions'][0]['Tags']}
tags.should.have.key('a').which.should.equal('b')
conn.add_tags(ResourceArns=[lb.get('LoadBalancerArn')],
Tags=[{
'Key': 'a',
'Value': 'b'
}, {
'Key': 'b',
'Value': 'b'
}, {
'Key': 'c',
'Value': 'b'
}, {
'Key': 'd',
'Value': 'b'
}, {
'Key': 'e',
'Value': 'b'
}, {
'Key': 'f',
'Value': 'b'
}, {
'Key': 'g',
'Value': 'b'
}, {
'Key': 'h',
'Value': 'b'
}, {
'Key': 'j',
'Value': 'b'
}])
conn.add_tags.when.called_with(ResourceArns=[lb.get('LoadBalancerArn')],
Tags=[{
'Key': 'k',
'Value': 'b'
}]).should.throw(botocore.exceptions.ClientError)
conn.add_tags(ResourceArns=[lb.get('LoadBalancerArn')],
Tags=[{
'Key': 'j',
'Value': 'c'
}])
tags = {d['Key']: d['Value'] for d in conn.describe_tags(
ResourceArns=[lb.get('LoadBalancerArn')])['TagDescriptions'][0]['Tags']}
tags.should.have.key('a').which.should.equal('b')
tags.should.have.key('b').which.should.equal('b')
tags.should.have.key('c').which.should.equal('b')
tags.should.have.key('d').which.should.equal('b')
tags.should.have.key('e').which.should.equal('b')
tags.should.have.key('f').which.should.equal('b')
tags.should.have.key('g').which.should.equal('b')
tags.should.have.key('h').which.should.equal('b')
tags.should.have.key('j').which.should.equal('c')
tags.shouldnt.have.key('k')
conn.remove_tags(ResourceArns=[lb.get('LoadBalancerArn')],
TagKeys=['a'])
tags = {d['Key']: d['Value'] for d in conn.describe_tags(
ResourceArns=[lb.get('LoadBalancerArn')])['TagDescriptions'][0]['Tags']}
tags.shouldnt.have.key('a')
tags.should.have.key('b').which.should.equal('b')
tags.should.have.key('c').which.should.equal('b')
tags.should.have.key('d').which.should.equal('b')
tags.should.have.key('e').which.should.equal('b')
tags.should.have.key('f').which.should.equal('b')
tags.should.have.key('g').which.should.equal('b')
tags.should.have.key('h').which.should.equal('b')
tags.should.have.key('j').which.should.equal('c')
@mock_elbv2
@mock_ec2
def test_create_elb_in_multiple_region():
for region in ['us-west-1', 'us-west-2']:
conn = boto3.client('elbv2', region_name=region)
ec2 = boto3.resource('ec2', region_name=region)
security_group = ec2.create_security_group(
GroupName='a-security-group', Description='First One')
vpc = ec2.create_vpc(
CidrBlock='172.28.7.0/24',
InstanceTenancy='default')
subnet1 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.0/26',
AvailabilityZone=region + 'a')
subnet2 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone=region + 'b')
conn.create_load_balancer(
Name='my-lb',
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme='internal',
Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
list(
boto3.client(
'elbv2',
region_name='us-west-1').describe_load_balancers().get('LoadBalancers')
).should.have.length_of(1)
list(
boto3.client(
'elbv2',
region_name='us-west-2').describe_load_balancers().get('LoadBalancers')
).should.have.length_of(1)
@mock_elbv2
@mock_ec2
def test_create_target_group_and_listeners():
conn = boto3.client('elbv2', region_name='us-east-1')
ec2 = boto3.resource('ec2', region_name='us-east-1')
security_group = ec2.create_security_group(
GroupName='a-security-group', Description='First One')
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
subnet1 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.0/26',
AvailabilityZone='us-east-1b')
response = conn.create_load_balancer(
Name='my-lb',
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme='internal',
Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
load_balancer_arn = response.get('LoadBalancers')[0].get('LoadBalancerArn')
# Can't create a target group with an invalid protocol
with assert_raises(ClientError):
conn.create_target_group(
Name='a-target',
Protocol='HTTP',
Port=8080,
VpcId=vpc.id,
HealthCheckProtocol='/HTTP',
HealthCheckPort='8080',
HealthCheckPath='/',
HealthCheckIntervalSeconds=5,
HealthCheckTimeoutSeconds=5,
HealthyThresholdCount=5,
UnhealthyThresholdCount=2,
Matcher={'HttpCode': '200'})
response = conn.create_target_group(
Name='a-target',
Protocol='HTTP',
Port=8080,
VpcId=vpc.id,
HealthCheckProtocol='HTTP',
HealthCheckPort='8080',
HealthCheckPath='/',
HealthCheckIntervalSeconds=5,
HealthCheckTimeoutSeconds=5,
HealthyThresholdCount=5,
UnhealthyThresholdCount=2,
Matcher={'HttpCode': '200'})
target_group = response.get('TargetGroups')[0]
target_group_arn = target_group['TargetGroupArn']
# Add tags to the target group
conn.add_tags(ResourceArns=[target_group_arn], Tags=[
{'Key': 'target', 'Value': 'group'}])
conn.describe_tags(ResourceArns=[target_group_arn])['TagDescriptions'][0]['Tags'].should.equal(
[{'Key': 'target', 'Value': 'group'}])
# Check it's in the describe_target_groups response
response = conn.describe_target_groups()
response.get('TargetGroups').should.have.length_of(1)
# Plain HTTP listener
response = conn.create_listener(
LoadBalancerArn=load_balancer_arn,
Protocol='HTTP',
Port=80,
DefaultActions=[{'Type': 'forward', 'TargetGroupArn': target_group.get('TargetGroupArn')}])
listener = response.get('Listeners')[0]
listener.get('Port').should.equal(80)
listener.get('Protocol').should.equal('HTTP')
listener.get('DefaultActions').should.equal([{
'TargetGroupArn': target_group.get('TargetGroupArn'),
'Type': 'forward'}])
http_listener_arn = listener.get('ListenerArn')
response = conn.describe_target_groups(LoadBalancerArn=load_balancer_arn,
Names=['a-target'])
response.get('TargetGroups').should.have.length_of(1)
# And another with SSL
response = conn.create_listener(
LoadBalancerArn=load_balancer_arn,
Protocol='HTTPS',
Port=443,
Certificates=[
{'CertificateArn': 'arn:aws:iam:123456789012:server-certificate/test-cert'}],
DefaultActions=[{'Type': 'forward', 'TargetGroupArn': target_group.get('TargetGroupArn')}])
listener = response.get('Listeners')[0]
listener.get('Port').should.equal(443)
listener.get('Protocol').should.equal('HTTPS')
listener.get('Certificates').should.equal([{
'CertificateArn': 'arn:aws:iam:123456789012:server-certificate/test-cert',
}])
listener.get('DefaultActions').should.equal([{
'TargetGroupArn': target_group.get('TargetGroupArn'),
'Type': 'forward'}])
https_listener_arn = listener.get('ListenerArn')
response = conn.describe_listeners(LoadBalancerArn=load_balancer_arn)
response.get('Listeners').should.have.length_of(2)
response = conn.describe_listeners(ListenerArns=[https_listener_arn])
response.get('Listeners').should.have.length_of(1)
listener = response.get('Listeners')[0]
listener.get('Port').should.equal(443)
listener.get('Protocol').should.equal('HTTPS')
response = conn.describe_listeners(
ListenerArns=[
http_listener_arn,
https_listener_arn])
response.get('Listeners').should.have.length_of(2)
# Try to delete the target group and it fails because there's a
# listener referencing it
with assert_raises(ClientError) as e:
conn.delete_target_group(
TargetGroupArn=target_group.get('TargetGroupArn'))
e.exception.operation_name.should.equal('DeleteTargetGroup')
e.exception.args.should.equal(("An error occurred (ResourceInUse) when calling the DeleteTargetGroup operation: The target group 'arn:aws:elasticloadbalancing:us-east-1:1:targetgroup/a-target/50dc6c495c0c9188' is currently in use by a listener or a rule", )) # NOQA
# Delete one listener
response = conn.describe_listeners(LoadBalancerArn=load_balancer_arn)
response.get('Listeners').should.have.length_of(2)
conn.delete_listener(ListenerArn=http_listener_arn)
response = conn.describe_listeners(LoadBalancerArn=load_balancer_arn)
response.get('Listeners').should.have.length_of(1)
# Then delete the load balancer
conn.delete_load_balancer(LoadBalancerArn=load_balancer_arn)
# It's gone
response = conn.describe_load_balancers()
response.get('LoadBalancers').should.have.length_of(0)
# And it deleted the remaining listener
response = conn.describe_listeners(
ListenerArns=[
http_listener_arn,
https_listener_arn])
response.get('Listeners').should.have.length_of(0)
# But not the target groups
response = conn.describe_target_groups()
response.get('TargetGroups').should.have.length_of(1)
# Which we'll now delete
conn.delete_target_group(TargetGroupArn=target_group.get('TargetGroupArn'))
response = conn.describe_target_groups()
response.get('TargetGroups').should.have.length_of(0)
@mock_elbv2
@mock_ec2
def test_create_target_group_without_non_required_parameters():
conn = boto3.client('elbv2', region_name='us-east-1')
ec2 = boto3.resource('ec2', region_name='us-east-1')
security_group = ec2.create_security_group(
GroupName='a-security-group', Description='First One')
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
subnet1 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.0/26',
AvailabilityZone='us-east-1b')
response = conn.create_load_balancer(
Name='my-lb',
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme='internal',
Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
# request without HealthCheckIntervalSeconds parameter
# which is default to 30 seconds
response = conn.create_target_group(
Name='a-target',
Protocol='HTTP',
Port=8080,
VpcId=vpc.id,
HealthCheckProtocol='HTTP',
HealthCheckPort='8080'
)
target_group = response.get('TargetGroups')[0]
target_group.should_not.be.none
@mock_elbv2
@mock_ec2
def test_create_invalid_target_group():
conn = boto3.client('elbv2', region_name='us-east-1')
ec2 = boto3.resource('ec2', region_name='us-east-1')
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
# Fail to create target group with name which length is 33
long_name = 'A' * 33
with assert_raises(ClientError):
conn.create_target_group(
Name=long_name,
Protocol='HTTP',
Port=8080,
VpcId=vpc.id,
HealthCheckProtocol='HTTP',
HealthCheckPort='8080',
HealthCheckPath='/',
HealthCheckIntervalSeconds=5,
HealthCheckTimeoutSeconds=5,
HealthyThresholdCount=5,
UnhealthyThresholdCount=2,
Matcher={'HttpCode': '200'})
invalid_names = [
'-name',
'name-',
'-name-',
'example.com',
'test@test',
'Na--me']
for name in invalid_names:
with assert_raises(ClientError):
conn.create_target_group(
Name=name,
Protocol='HTTP',
Port=8080,
VpcId=vpc.id,
HealthCheckProtocol='HTTP',
HealthCheckPort='8080',
HealthCheckPath='/',
HealthCheckIntervalSeconds=5,
HealthCheckTimeoutSeconds=5,
HealthyThresholdCount=5,
UnhealthyThresholdCount=2,
Matcher={'HttpCode': '200'})
valid_names = ['name', 'Name', '000']
for name in valid_names:
conn.create_target_group(
Name=name,
Protocol='HTTP',
Port=8080,
VpcId=vpc.id,
HealthCheckProtocol='HTTP',
HealthCheckPort='8080',
HealthCheckPath='/',
HealthCheckIntervalSeconds=5,
HealthCheckTimeoutSeconds=5,
HealthyThresholdCount=5,
UnhealthyThresholdCount=2,
Matcher={'HttpCode': '200'})
@mock_elbv2
@mock_ec2
def test_describe_paginated_balancers():
conn = boto3.client('elbv2', region_name='us-east-1')
ec2 = boto3.resource('ec2', region_name='us-east-1')
security_group = ec2.create_security_group(
GroupName='a-security-group', Description='First One')
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
subnet1 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.0/26',
AvailabilityZone='us-east-1b')
for i in range(51):
conn.create_load_balancer(
Name='my-lb%d' % i,
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme='internal',
Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
resp = conn.describe_load_balancers()
resp['LoadBalancers'].should.have.length_of(50)
resp['NextMarker'].should.equal(
resp['LoadBalancers'][-1]['LoadBalancerName'])
resp2 = conn.describe_load_balancers(Marker=resp['NextMarker'])
resp2['LoadBalancers'].should.have.length_of(1)
assert 'NextToken' not in resp2.keys()
@mock_elbv2
@mock_ec2
def test_delete_load_balancer():
conn = boto3.client('elbv2', region_name='us-east-1')
ec2 = boto3.resource('ec2', region_name='us-east-1')
security_group = ec2.create_security_group(
GroupName='a-security-group', Description='First One')
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
subnet1 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.0/26',
AvailabilityZone='us-east-1b')
response = conn.create_load_balancer(
Name='my-lb',
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme='internal',
Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
response.get('LoadBalancers').should.have.length_of(1)
lb = response.get('LoadBalancers')[0]
conn.delete_load_balancer(LoadBalancerArn=lb.get('LoadBalancerArn'))
balancers = conn.describe_load_balancers().get('LoadBalancers')
balancers.should.have.length_of(0)
@mock_ec2
@mock_elbv2
def test_register_targets():
conn = boto3.client('elbv2', region_name='us-east-1')
ec2 = boto3.resource('ec2', region_name='us-east-1')
security_group = ec2.create_security_group(
GroupName='a-security-group', Description='First One')
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
subnet1 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.0/26',
AvailabilityZone='us-east-1b')
conn.create_load_balancer(
Name='my-lb',
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme='internal',
Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
response = conn.create_target_group(
Name='a-target',
Protocol='HTTP',
Port=8080,
VpcId=vpc.id,
HealthCheckProtocol='HTTP',
HealthCheckPort='8080',
HealthCheckPath='/',
HealthCheckIntervalSeconds=5,
HealthCheckTimeoutSeconds=5,
HealthyThresholdCount=5,
UnhealthyThresholdCount=2,
Matcher={'HttpCode': '200'})
target_group = response.get('TargetGroups')[0]
# No targets registered yet
response = conn.describe_target_health(
TargetGroupArn=target_group.get('TargetGroupArn'))
response.get('TargetHealthDescriptions').should.have.length_of(0)
response = ec2.create_instances(
ImageId='ami-1234abcd', MinCount=2, MaxCount=2)
instance_id1 = response[0].id
instance_id2 = response[1].id
response = conn.register_targets(
TargetGroupArn=target_group.get('TargetGroupArn'),
Targets=[
{
'Id': instance_id1,
'Port': 5060,
},
{
'Id': instance_id2,
'Port': 4030,
},
])
response = conn.describe_target_health(
TargetGroupArn=target_group.get('TargetGroupArn'))
response.get('TargetHealthDescriptions').should.have.length_of(2)
response = conn.deregister_targets(
TargetGroupArn=target_group.get('TargetGroupArn'),
Targets=[{'Id': instance_id2}])
response = conn.describe_target_health(
TargetGroupArn=target_group.get('TargetGroupArn'))
response.get('TargetHealthDescriptions').should.have.length_of(1)
@mock_ec2
@mock_elbv2
def test_stopped_instance_target():
target_group_port = 8080
conn = boto3.client('elbv2', region_name='us-east-1')
ec2 = boto3.resource('ec2', region_name='us-east-1')
security_group = ec2.create_security_group(
GroupName='a-security-group', Description='First One')
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
subnet1 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.0/26',
AvailabilityZone='us-east-1b')
conn.create_load_balancer(
Name='my-lb',
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme='internal',
Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
response = conn.create_target_group(
Name='a-target',
Protocol='HTTP',
Port=target_group_port,
VpcId=vpc.id,
HealthCheckProtocol='HTTP',
HealthCheckPath='/',
HealthCheckIntervalSeconds=5,
HealthCheckTimeoutSeconds=5,
HealthyThresholdCount=5,
UnhealthyThresholdCount=2,
Matcher={'HttpCode': '200'})
target_group = response.get('TargetGroups')[0]
# No targets registered yet
response = conn.describe_target_health(
TargetGroupArn=target_group.get('TargetGroupArn'))
response.get('TargetHealthDescriptions').should.have.length_of(0)
response = ec2.create_instances(
ImageId='ami-1234abcd', MinCount=1, MaxCount=1)
instance = response[0]
target_dict = {
'Id': instance.id,
'Port': 500
}
response = conn.register_targets(
TargetGroupArn=target_group.get('TargetGroupArn'),
Targets=[target_dict])
response = conn.describe_target_health(
TargetGroupArn=target_group.get('TargetGroupArn'))
response.get('TargetHealthDescriptions').should.have.length_of(1)
target_health_description = response.get('TargetHealthDescriptions')[0]
target_health_description['Target'].should.equal(target_dict)
target_health_description['HealthCheckPort'].should.equal(str(target_group_port))
target_health_description['TargetHealth'].should.equal({
'State': 'healthy'
})
instance.stop()
response = conn.describe_target_health(
TargetGroupArn=target_group.get('TargetGroupArn'))
response.get('TargetHealthDescriptions').should.have.length_of(1)
target_health_description = response.get('TargetHealthDescriptions')[0]
target_health_description['Target'].should.equal(target_dict)
target_health_description['HealthCheckPort'].should.equal(str(target_group_port))
target_health_description['TargetHealth'].should.equal({
'State': 'unused',
'Reason': 'Target.InvalidState',
'Description': 'Target is in the stopped state'
})
@mock_ec2
@mock_elbv2
def test_target_group_attributes():
conn = boto3.client('elbv2', region_name='us-east-1')
ec2 = boto3.resource('ec2', region_name='us-east-1')
security_group = ec2.create_security_group(
GroupName='a-security-group', Description='First One')
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
subnet1 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.0/26',
AvailabilityZone='us-east-1b')
response = conn.create_load_balancer(
Name='my-lb',
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme='internal',
Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
response = conn.create_target_group(
Name='a-target',
Protocol='HTTP',
Port=8080,
VpcId=vpc.id,
HealthCheckProtocol='HTTP',
HealthCheckPort='8080',
HealthCheckPath='/',
HealthCheckIntervalSeconds=5,
HealthCheckTimeoutSeconds=5,
HealthyThresholdCount=5,
UnhealthyThresholdCount=2,
Matcher={'HttpCode': '200'})
target_group = response.get('TargetGroups')[0]
# Check it's in the describe_target_groups response
response = conn.describe_target_groups()
response.get('TargetGroups').should.have.length_of(1)
target_group_arn = target_group['TargetGroupArn']
# check if Names filter works
response = conn.describe_target_groups(Names=[])
response = conn.describe_target_groups(Names=['a-target'])
response.get('TargetGroups').should.have.length_of(1)
target_group_arn = target_group['TargetGroupArn']
# The attributes should start with the two defaults
response = conn.describe_target_group_attributes(
TargetGroupArn=target_group_arn)
response['Attributes'].should.have.length_of(2)
attributes = {attr['Key']: attr['Value']
for attr in response['Attributes']}
attributes['deregistration_delay.timeout_seconds'].should.equal('300')
attributes['stickiness.enabled'].should.equal('false')
# Add cookie stickiness
response = conn.modify_target_group_attributes(
TargetGroupArn=target_group_arn,
Attributes=[
{
'Key': 'stickiness.enabled',
'Value': 'true',
},
{
'Key': 'stickiness.type',
'Value': 'lb_cookie',
},
])
# The response should have only the keys updated
response['Attributes'].should.have.length_of(2)
attributes = {attr['Key']: attr['Value']
for attr in response['Attributes']}
attributes['stickiness.type'].should.equal('lb_cookie')
attributes['stickiness.enabled'].should.equal('true')
# These new values should be in the full attribute list
response = conn.describe_target_group_attributes(
TargetGroupArn=target_group_arn)
response['Attributes'].should.have.length_of(3)
attributes = {attr['Key']: attr['Value']
for attr in response['Attributes']}
attributes['stickiness.type'].should.equal('lb_cookie')
attributes['stickiness.enabled'].should.equal('true')
@mock_elbv2
@mock_ec2
def test_handle_listener_rules():
conn = boto3.client('elbv2', region_name='us-east-1')
ec2 = boto3.resource('ec2', region_name='us-east-1')
security_group = ec2.create_security_group(
GroupName='a-security-group', Description='First One')
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
subnet1 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.0/26',
AvailabilityZone='us-east-1b')
response = conn.create_load_balancer(
Name='my-lb',
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme='internal',
Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
load_balancer_arn = response.get('LoadBalancers')[0].get('LoadBalancerArn')
# Can't create a target group with an invalid protocol
with assert_raises(ClientError):
conn.create_target_group(
Name='a-target',
Protocol='HTTP',
Port=8080,
VpcId=vpc.id,
HealthCheckProtocol='/HTTP',
HealthCheckPort='8080',
HealthCheckPath='/',
HealthCheckIntervalSeconds=5,
HealthCheckTimeoutSeconds=5,
HealthyThresholdCount=5,
UnhealthyThresholdCount=2,
Matcher={'HttpCode': '200'})
response = conn.create_target_group(
Name='a-target',
Protocol='HTTP',
Port=8080,
VpcId=vpc.id,
HealthCheckProtocol='HTTP',
HealthCheckPort='8080',
HealthCheckPath='/',
HealthCheckIntervalSeconds=5,
HealthCheckTimeoutSeconds=5,
HealthyThresholdCount=5,
UnhealthyThresholdCount=2,
Matcher={'HttpCode': '200'})
target_group = response.get('TargetGroups')[0]
# Plain HTTP listener
response = conn.create_listener(
LoadBalancerArn=load_balancer_arn,
Protocol='HTTP',
Port=80,
DefaultActions=[{'Type': 'forward', 'TargetGroupArn': target_group.get('TargetGroupArn')}])
listener = response.get('Listeners')[0]
listener.get('Port').should.equal(80)
listener.get('Protocol').should.equal('HTTP')
listener.get('DefaultActions').should.equal([{
'TargetGroupArn': target_group.get('TargetGroupArn'),
'Type': 'forward'}])
http_listener_arn = listener.get('ListenerArn')
# create first rule
priority = 100
host = 'xxx.example.com'
path_pattern = 'foobar'
created_rule = conn.create_rule(
ListenerArn=http_listener_arn,
Priority=priority,
Conditions=[{
'Field': 'host-header',
'Values': [host]
},
{
'Field': 'path-pattern',
'Values': [path_pattern]
}],
Actions=[{
'TargetGroupArn': target_group.get('TargetGroupArn'),
'Type': 'forward'
}]
)['Rules'][0]
created_rule['Priority'].should.equal('100')
# check if rules is sorted by priority
priority = 50
host = 'yyy.example.com'
path_pattern = 'foobar'
rules = conn.create_rule(
ListenerArn=http_listener_arn,
Priority=priority,
Conditions=[{
'Field': 'host-header',
'Values': [host]
},
{
'Field': 'path-pattern',
'Values': [path_pattern]
}],
Actions=[{
'TargetGroupArn': target_group.get('TargetGroupArn'),
'Type': 'forward'
}]
)
# test for PriorityInUse
with assert_raises(ClientError):
conn.create_rule(
ListenerArn=http_listener_arn,
Priority=priority,
Conditions=[{
'Field': 'host-header',
'Values': [host]
},
{
'Field': 'path-pattern',
'Values': [path_pattern]
}],
Actions=[{
'TargetGroupArn': target_group.get('TargetGroupArn'),
'Type': 'forward'
}]
)
# test for describe listeners
obtained_rules = conn.describe_rules(ListenerArn=http_listener_arn)
len(obtained_rules['Rules']).should.equal(3)
priorities = [rule['Priority'] for rule in obtained_rules['Rules']]
priorities.should.equal(['50', '100', 'default'])
first_rule = obtained_rules['Rules'][0]
second_rule = obtained_rules['Rules'][1]
obtained_rules = conn.describe_rules(RuleArns=[first_rule['RuleArn']])
obtained_rules['Rules'].should.equal([first_rule])
# test for pagination
obtained_rules = conn.describe_rules(
ListenerArn=http_listener_arn, PageSize=1)
len(obtained_rules['Rules']).should.equal(1)
obtained_rules.should.have.key('NextMarker')
next_marker = obtained_rules['NextMarker']
following_rules = conn.describe_rules(
ListenerArn=http_listener_arn,
PageSize=1,
Marker=next_marker)
len(following_rules['Rules']).should.equal(1)
following_rules.should.have.key('NextMarker')
following_rules['Rules'][0]['RuleArn'].should_not.equal(
obtained_rules['Rules'][0]['RuleArn'])
# test for invalid describe rule request
with assert_raises(ClientError):
conn.describe_rules()
with assert_raises(ClientError):
conn.describe_rules(RuleArns=[])
with assert_raises(ClientError):
conn.describe_rules(
ListenerArn=http_listener_arn,
RuleArns=[first_rule['RuleArn']]
)
# modify rule partially
new_host = 'new.example.com'
new_path_pattern = 'new_path'
modified_rule = conn.modify_rule(
RuleArn=first_rule['RuleArn'],
Conditions=[{
'Field': 'host-header',
'Values': [new_host]
},
{
'Field': 'path-pattern',
'Values': [new_path_pattern]
}]
)['Rules'][0]
rules = conn.describe_rules(ListenerArn=http_listener_arn)
obtained_rule = rules['Rules'][0]
modified_rule.should.equal(obtained_rule)
obtained_rule['Conditions'][0]['Values'][0].should.equal(new_host)
obtained_rule['Conditions'][1]['Values'][0].should.equal(new_path_pattern)
obtained_rule['Actions'][0]['TargetGroupArn'].should.equal(
target_group.get('TargetGroupArn'))
# modify priority
conn.set_rule_priorities(
RulePriorities=[
{'RuleArn': first_rule['RuleArn'],
'Priority': int(first_rule['Priority']) - 1}
]
)
with assert_raises(ClientError):
conn.set_rule_priorities(
RulePriorities=[
{'RuleArn': first_rule['RuleArn'], 'Priority': 999},
{'RuleArn': second_rule['RuleArn'], 'Priority': 999}
]
)
# delete
arn = first_rule['RuleArn']
conn.delete_rule(RuleArn=arn)
rules = conn.describe_rules(ListenerArn=http_listener_arn)['Rules']
len(rules).should.equal(2)
# test for invalid action type
safe_priority = 2
with assert_raises(ClientError):
conn.create_rule(
ListenerArn=http_listener_arn,
Priority=safe_priority,
Conditions=[{
'Field': 'host-header',
'Values': [host]
},
{
'Field': 'path-pattern',
'Values': [path_pattern]
}],
Actions=[{
'TargetGroupArn': target_group.get('TargetGroupArn'),
'Type': 'forward2'
}]
)
# test for invalid action type
safe_priority = 2
invalid_target_group_arn = target_group.get('TargetGroupArn') + 'x'
with assert_raises(ClientError):
conn.create_rule(
ListenerArn=http_listener_arn,
Priority=safe_priority,
Conditions=[{
'Field': 'host-header',
'Values': [host]
},
{
'Field': 'path-pattern',
'Values': [path_pattern]
}],
Actions=[{
'TargetGroupArn': invalid_target_group_arn,
'Type': 'forward'
}]
)
# test for invalid condition field_name
safe_priority = 2
with assert_raises(ClientError):
conn.create_rule(
ListenerArn=http_listener_arn,
Priority=safe_priority,
Conditions=[{
'Field': 'xxxxxxx',
'Values': [host]
}],
Actions=[{
'TargetGroupArn': target_group.get('TargetGroupArn'),
'Type': 'forward'
}]
)
# test for emptry condition value
safe_priority = 2
with assert_raises(ClientError):
conn.create_rule(
ListenerArn=http_listener_arn,
Priority=safe_priority,
Conditions=[{
'Field': 'host-header',
'Values': []
}],
Actions=[{
'TargetGroupArn': target_group.get('TargetGroupArn'),
'Type': 'forward'
}]
)
# test for multiple condition value
safe_priority = 2
with assert_raises(ClientError):
conn.create_rule(
ListenerArn=http_listener_arn,
Priority=safe_priority,
Conditions=[{
'Field': 'host-header',
'Values': [host, host]
}],
Actions=[{
'TargetGroupArn': target_group.get('TargetGroupArn'),
'Type': 'forward'
}]
)
@mock_elbv2
@mock_ec2
def test_describe_invalid_target_group():
conn = boto3.client('elbv2', region_name='us-east-1')
ec2 = boto3.resource('ec2', region_name='us-east-1')
security_group = ec2.create_security_group(
GroupName='a-security-group', Description='First One')
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
subnet1 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.0/26',
AvailabilityZone='us-east-1b')
response = conn.create_load_balancer(
Name='my-lb',
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme='internal',
Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
response.get('LoadBalancers')[0].get('LoadBalancerArn')
response = conn.create_target_group(
Name='a-target',
Protocol='HTTP',
Port=8080,
VpcId=vpc.id,
HealthCheckProtocol='HTTP',
HealthCheckPort='8080',
HealthCheckPath='/',
HealthCheckIntervalSeconds=5,
HealthCheckTimeoutSeconds=5,
HealthyThresholdCount=5,
UnhealthyThresholdCount=2,
Matcher={'HttpCode': '200'})
# Check error raises correctly
with assert_raises(ClientError):
conn.describe_target_groups(Names=['invalid'])
@mock_elbv2
@mock_ec2
def test_describe_target_groups_no_arguments():
conn = boto3.client('elbv2', region_name='us-east-1')
ec2 = boto3.resource('ec2', region_name='us-east-1')
security_group = ec2.create_security_group(
GroupName='a-security-group', Description='First One')
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
subnet1 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.0/26',
AvailabilityZone='us-east-1b')
response = conn.create_load_balancer(
Name='my-lb',
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme='internal',
Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
response.get('LoadBalancers')[0].get('LoadBalancerArn')
conn.create_target_group(
Name='a-target',
Protocol='HTTP',
Port=8080,
VpcId=vpc.id,
HealthCheckProtocol='HTTP',
HealthCheckPort='8080',
HealthCheckPath='/',
HealthCheckIntervalSeconds=5,
HealthCheckTimeoutSeconds=5,
HealthyThresholdCount=5,
UnhealthyThresholdCount=2,
Matcher={'HttpCode': '200'})
assert len(conn.describe_target_groups()['TargetGroups']) == 1
@mock_elbv2
def test_describe_account_limits():
client = boto3.client('elbv2', region_name='eu-central-1')
resp = client.describe_account_limits()
resp['Limits'][0].should.contain('Name')
resp['Limits'][0].should.contain('Max')
@mock_elbv2
def test_describe_ssl_policies():
client = boto3.client('elbv2', region_name='eu-central-1')
resp = client.describe_ssl_policies()
len(resp['SslPolicies']).should.equal(5)
resp = client.describe_ssl_policies(Names=['ELBSecurityPolicy-TLS-1-2-2017-01', 'ELBSecurityPolicy-2016-08'])
len(resp['SslPolicies']).should.equal(2)
@mock_elbv2
@mock_ec2
def test_set_ip_address_type():
client = boto3.client('elbv2', region_name='us-east-1')
ec2 = boto3.resource('ec2', region_name='us-east-1')
security_group = ec2.create_security_group(
GroupName='a-security-group', Description='First One')
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
subnet1 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.0/26',
AvailabilityZone='us-east-1b')
response = client.create_load_balancer(
Name='my-lb',
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme='internal',
Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
arn = response['LoadBalancers'][0]['LoadBalancerArn']
# Internal LBs cant be dualstack yet
with assert_raises(ClientError):
client.set_ip_address_type(
LoadBalancerArn=arn,
IpAddressType='dualstack'
)
# Create internet facing one
response = client.create_load_balancer(
Name='my-lb2',
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme='internet-facing',
Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
arn = response['LoadBalancers'][0]['LoadBalancerArn']
client.set_ip_address_type(
LoadBalancerArn=arn,
IpAddressType='dualstack'
)
@mock_elbv2
@mock_ec2
def test_set_security_groups():
client = boto3.client('elbv2', region_name='us-east-1')
ec2 = boto3.resource('ec2', region_name='us-east-1')
security_group = ec2.create_security_group(
GroupName='a-security-group', Description='First One')
security_group2 = ec2.create_security_group(
GroupName='b-security-group', Description='Second One')
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
subnet1 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.0/26',
AvailabilityZone='us-east-1b')
response = client.create_load_balancer(
Name='my-lb',
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme='internal',
Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
arn = response['LoadBalancers'][0]['LoadBalancerArn']
client.set_security_groups(
LoadBalancerArn=arn,
SecurityGroups=[security_group.id, security_group2.id]
)
resp = client.describe_load_balancers(LoadBalancerArns=[arn])
len(resp['LoadBalancers'][0]['SecurityGroups']).should.equal(2)
with assert_raises(ClientError):
client.set_security_groups(
LoadBalancerArn=arn,
SecurityGroups=['non_existant']
)
@mock_elbv2
@mock_ec2
def test_set_subnets():
client = boto3.client('elbv2', region_name='us-east-1')
ec2 = boto3.resource('ec2', region_name='us-east-1')
security_group = ec2.create_security_group(
GroupName='a-security-group', Description='First One')
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
subnet1 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.0/26',
AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.64/26',
AvailabilityZone='us-east-1b')
subnet3 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1c')
response = client.create_load_balancer(
Name='my-lb',
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme='internal',
Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
arn = response['LoadBalancers'][0]['LoadBalancerArn']
client.set_subnets(
LoadBalancerArn=arn,
Subnets=[subnet1.id, subnet2.id, subnet3.id]
)
resp = client.describe_load_balancers(LoadBalancerArns=[arn])
len(resp['LoadBalancers'][0]['AvailabilityZones']).should.equal(3)
# Only 1 AZ
with assert_raises(ClientError):
client.set_subnets(
LoadBalancerArn=arn,
Subnets=[subnet1.id]
)
# Multiple subnets in same AZ
with assert_raises(ClientError):
client.set_subnets(
LoadBalancerArn=arn,
Subnets=[subnet1.id, subnet2.id, subnet2.id]
)
@mock_elbv2
@mock_ec2
def test_set_subnets():
client = boto3.client('elbv2', region_name='us-east-1')
ec2 = boto3.resource('ec2', region_name='us-east-1')
security_group = ec2.create_security_group(
GroupName='a-security-group', Description='First One')
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
subnet1 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.0/26',
AvailabilityZone='us-east-1b')
response = client.create_load_balancer(
Name='my-lb',
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme='internal',
Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
arn = response['LoadBalancers'][0]['LoadBalancerArn']
client.modify_load_balancer_attributes(
LoadBalancerArn=arn,
Attributes=[{'Key': 'idle_timeout.timeout_seconds', 'Value': '600'}]
)
# Check its 600 not 60
response = client.describe_load_balancer_attributes(
LoadBalancerArn=arn
)
idle_timeout = list(filter(lambda item: item['Key'] == 'idle_timeout.timeout_seconds', response['Attributes']))[0]
idle_timeout['Value'].should.equal('600')
@mock_elbv2
@mock_ec2
def test_modify_target_group():
client = boto3.client('elbv2', region_name='us-east-1')
ec2 = boto3.resource('ec2', region_name='us-east-1')
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
response = client.create_target_group(
Name='a-target',
Protocol='HTTP',
Port=8080,
VpcId=vpc.id,
HealthCheckProtocol='HTTP',
HealthCheckPort='8080',
HealthCheckPath='/',
HealthCheckIntervalSeconds=5,
HealthCheckTimeoutSeconds=5,
HealthyThresholdCount=5,
UnhealthyThresholdCount=2,
Matcher={'HttpCode': '200'})
arn = response.get('TargetGroups')[0]['TargetGroupArn']
client.modify_target_group(
TargetGroupArn=arn,
HealthCheckProtocol='HTTPS',
HealthCheckPort='8081',
HealthCheckPath='/status',
HealthCheckIntervalSeconds=10,
HealthCheckTimeoutSeconds=10,
HealthyThresholdCount=10,
UnhealthyThresholdCount=4,
Matcher={'HttpCode': '200-399'}
)
response = client.describe_target_groups(
TargetGroupArns=[arn]
)
response['TargetGroups'][0]['Matcher']['HttpCode'].should.equal('200-399')
response['TargetGroups'][0]['HealthCheckIntervalSeconds'].should.equal(10)
response['TargetGroups'][0]['HealthCheckPath'].should.equal('/status')
response['TargetGroups'][0]['HealthCheckPort'].should.equal('8081')
response['TargetGroups'][0]['HealthCheckProtocol'].should.equal('HTTPS')
response['TargetGroups'][0]['HealthCheckTimeoutSeconds'].should.equal(10)
response['TargetGroups'][0]['HealthyThresholdCount'].should.equal(10)
response['TargetGroups'][0]['UnhealthyThresholdCount'].should.equal(4)
@mock_elbv2
@mock_ec2
@mock_acm
def test_modify_listener_http_to_https():
client = boto3.client('elbv2', region_name='eu-central-1')
acm = boto3.client('acm', region_name='eu-central-1')
ec2 = boto3.resource('ec2', region_name='eu-central-1')
security_group = ec2.create_security_group(
GroupName='a-security-group', Description='First One')
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
subnet1 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='eu-central-1a')
subnet2 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.0/26',
AvailabilityZone='eu-central-1b')
response = client.create_load_balancer(
Name='my-lb',
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme='internal',
Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
load_balancer_arn = response.get('LoadBalancers')[0].get('LoadBalancerArn')
response = client.create_target_group(
Name='a-target',
Protocol='HTTP',
Port=8080,
VpcId=vpc.id,
HealthCheckProtocol='HTTP',
HealthCheckPort='8080',
HealthCheckPath='/',
HealthCheckIntervalSeconds=5,
HealthCheckTimeoutSeconds=5,
HealthyThresholdCount=5,
UnhealthyThresholdCount=2,
Matcher={'HttpCode': '200'})
target_group = response.get('TargetGroups')[0]
target_group_arn = target_group['TargetGroupArn']
# Plain HTTP listener
response = client.create_listener(
LoadBalancerArn=load_balancer_arn,
Protocol='HTTP',
Port=80,
DefaultActions=[{'Type': 'forward', 'TargetGroupArn': target_group_arn}]
)
listener_arn = response['Listeners'][0]['ListenerArn']
response = acm.request_certificate(
DomainName='google.com',
SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'],
)
google_arn = response['CertificateArn']
response = acm.request_certificate(
DomainName='yahoo.com',
SubjectAlternativeNames=['yahoo.com', 'www.yahoo.com', 'mail.yahoo.com'],
)
yahoo_arn = response['CertificateArn']
response = client.modify_listener(
ListenerArn=listener_arn,
Port=443,
Protocol='HTTPS',
SslPolicy='ELBSecurityPolicy-TLS-1-2-2017-01',
Certificates=[
{'CertificateArn': google_arn, 'IsDefault': False},
{'CertificateArn': yahoo_arn, 'IsDefault': True}
],
DefaultActions=[
{'Type': 'forward', 'TargetGroupArn': target_group_arn}
]
)
response['Listeners'][0]['Port'].should.equal(443)
response['Listeners'][0]['Protocol'].should.equal('HTTPS')
response['Listeners'][0]['SslPolicy'].should.equal('ELBSecurityPolicy-TLS-1-2-2017-01')
len(response['Listeners'][0]['Certificates']).should.equal(2)
# Check default cert, can't do this in server mode
if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'false':
listener = elbv2_backends['eu-central-1'].load_balancers[load_balancer_arn].listeners[listener_arn]
listener.certificate.should.equal(yahoo_arn)
# No default cert
with assert_raises(ClientError):
client.modify_listener(
ListenerArn=listener_arn,
Port=443,
Protocol='HTTPS',
SslPolicy='ELBSecurityPolicy-TLS-1-2-2017-01',
Certificates=[
{'CertificateArn': google_arn, 'IsDefault': False}
],
DefaultActions=[
{'Type': 'forward', 'TargetGroupArn': target_group_arn}
]
)
# Bad cert
with assert_raises(ClientError):
client.modify_listener(
ListenerArn=listener_arn,
Port=443,
Protocol='HTTPS',
SslPolicy='ELBSecurityPolicy-TLS-1-2-2017-01',
Certificates=[
{'CertificateArn': 'lalala', 'IsDefault': True}
],
DefaultActions=[
{'Type': 'forward', 'TargetGroupArn': target_group_arn}
]
)
@mock_ec2
@mock_elbv2
@mock_cloudformation
def test_create_target_groups_through_cloudformation():
cfn_conn = boto3.client('cloudformation', region_name='us-east-1')
elbv2_client = boto3.client('elbv2', region_name='us-east-1')
# test that setting a name manually as well as letting cloudformation create a name both work
# this is a special case because test groups have a name length limit of 22 characters, and must be unique
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticloadbalancingv2-targetgroup.html#cfn-elasticloadbalancingv2-targetgroup-name
template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "ECS Cluster Test CloudFormation",
"Resources": {
"testVPC": {
"Type": "AWS::EC2::VPC",
"Properties": {
"CidrBlock": "10.0.0.0/16",
},
},
"testGroup1": {
"Type": "AWS::ElasticLoadBalancingV2::TargetGroup",
"Properties": {
"Port": 80,
"Protocol": "HTTP",
"VpcId": {"Ref": "testVPC"},
},
},
"testGroup2": {
"Type": "AWS::ElasticLoadBalancingV2::TargetGroup",
"Properties": {
"Port": 90,
"Protocol": "HTTP",
"VpcId": {"Ref": "testVPC"},
},
},
"testGroup3": {
"Type": "AWS::ElasticLoadBalancingV2::TargetGroup",
"Properties": {
"Name": "MyTargetGroup",
"Port": 70,
"Protocol": "HTTPS",
"VpcId": {"Ref": "testVPC"},
},
},
}
}
template_json = json.dumps(template)
cfn_conn.create_stack(
StackName="test-stack",
TemplateBody=template_json,
)
describe_target_groups_response = elbv2_client.describe_target_groups()
target_group_dicts = describe_target_groups_response['TargetGroups']
assert len(target_group_dicts) == 3
# there should be 2 target groups with the same prefix of 10 characters (since the random suffix is 12)
# and one named MyTargetGroup
assert len([tg for tg in target_group_dicts if tg['TargetGroupName'] == 'MyTargetGroup']) == 1
assert len(
[tg for tg in target_group_dicts if tg['TargetGroupName'].startswith('test-stack')]
) == 2
@mock_elbv2
@mock_ec2
def test_redirect_action_listener_rule():
conn = boto3.client('elbv2', region_name='us-east-1')
ec2 = boto3.resource('ec2', region_name='us-east-1')
security_group = ec2.create_security_group(
GroupName='a-security-group', Description='First One')
vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default')
subnet1 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.192/26',
AvailabilityZone='us-east-1a')
subnet2 = ec2.create_subnet(
VpcId=vpc.id,
CidrBlock='172.28.7.128/26',
AvailabilityZone='us-east-1b')
response = conn.create_load_balancer(
Name='my-lb',
Subnets=[subnet1.id, subnet2.id],
SecurityGroups=[security_group.id],
Scheme='internal',
Tags=[{'Key': 'key_name', 'Value': 'a_value'}])
load_balancer_arn = response.get('LoadBalancers')[0].get('LoadBalancerArn')
response = conn.create_listener(LoadBalancerArn=load_balancer_arn,
Protocol='HTTP',
Port=80,
DefaultActions=[
{'Type': 'redirect',
'RedirectConfig': {
'Protocol': 'HTTPS',
'Port': '443',
'StatusCode': 'HTTP_301'
}}])
listener = response.get('Listeners')[0]
expected_default_actions = [{
'Type': 'redirect',
'RedirectConfig': {
'Protocol': 'HTTPS',
'Port': '443',
'StatusCode': 'HTTP_301'
}
}]
listener.get('DefaultActions').should.equal(expected_default_actions)
listener_arn = listener.get('ListenerArn')
describe_rules_response = conn.describe_rules(ListenerArn=listener_arn)
describe_rules_response['Rules'][0]['Actions'].should.equal(expected_default_actions)
describe_listener_response = conn.describe_listeners(ListenerArns=[listener_arn, ])
describe_listener_actions = describe_listener_response['Listeners'][0]['DefaultActions']
describe_listener_actions.should.equal(expected_default_actions)
modify_listener_response = conn.modify_listener(ListenerArn=listener_arn, Port=81)
modify_listener_actions = modify_listener_response['Listeners'][0]['DefaultActions']
modify_listener_actions.should.equal(expected_default_actions)
@mock_elbv2
@mock_cloudformation
def test_redirect_action_listener_rule_cloudformation():
cnf_conn = boto3.client('cloudformation', region_name='us-east-1')
elbv2_client = boto3.client('elbv2', region_name='us-east-1')
template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "ECS Cluster Test CloudFormation",
"Resources": {
"testVPC": {
"Type": "AWS::EC2::VPC",
"Properties": {
"CidrBlock": "10.0.0.0/16",
},
},
"subnet1": {
"Type": "AWS::EC2::Subnet",
"Properties": {
"CidrBlock": "10.0.0.0/24",
"VpcId": {"Ref": "testVPC"},
"AvalabilityZone": "us-east-1b",
},
},
"subnet2": {
"Type": "AWS::EC2::Subnet",
"Properties": {
"CidrBlock": "10.0.1.0/24",
"VpcId": {"Ref": "testVPC"},
"AvalabilityZone": "us-east-1b",
},
},
"testLb": {
"Type": "AWS::ElasticLoadBalancingV2::LoadBalancer",
"Properties": {
"Name": "my-lb",
"Subnets": [{"Ref": "subnet1"}, {"Ref": "subnet2"}],
"Type": "application",
"SecurityGroups": [],
}
},
"testListener": {
"Type": "AWS::ElasticLoadBalancingV2::Listener",
"Properties": {
"LoadBalancerArn": {"Ref": "testLb"},
"Port": 80,
"Protocol": "HTTP",
"DefaultActions": [{
"Type": "redirect",
"RedirectConfig": {
"Port": "443",
"Protocol": "HTTPS",
"StatusCode": "HTTP_301",
}
}]
}
}
}
}
template_json = json.dumps(template)
cnf_conn.create_stack(StackName="test-stack", TemplateBody=template_json)
describe_load_balancers_response = elbv2_client.describe_load_balancers(Names=['my-lb',])
describe_load_balancers_response['LoadBalancers'].should.have.length_of(1)
load_balancer_arn = describe_load_balancers_response['LoadBalancers'][0]['LoadBalancerArn']
describe_listeners_response = elbv2_client.describe_listeners(LoadBalancerArn=load_balancer_arn)
describe_listeners_response['Listeners'].should.have.length_of(1)
describe_listeners_response['Listeners'][0]['DefaultActions'].should.equal([{
'Type': 'redirect',
'RedirectConfig': {
'Port': '443', 'Protocol': 'HTTPS', 'StatusCode': 'HTTP_301',
}
},])
| |
from __future__ import absolute_import
import io
import json
import logging
import os
import re
import socket
import sys
import time
from nose.tools import assert_raises
from django.test import LiveServerTestCase
from django.test.testcases import QuietWSGIRequestHandler
from django.utils import six
import requests
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException, \
StaleElementReferenceException, TimeoutException, WebDriverException
from selenium.webdriver import Chrome, DesiredCapabilities, Firefox, PhantomJS
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
from selenium.webdriver.support.color import Color
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support.wait import WebDriverWait
from sbo_selenium.conf import settings
# StoppableWSGIServer is not in Django 1.7
try:
from django.test.testcases import StoppableWSGIServer
except ImportError:
StoppableWSGIServer = None
logger = logging.getLogger('django.request')
# Storage for Sauce Labs session IDs so they can be logged in bulk
sauce_sessions = []
ADD_ACCESSIBILITY_SCRIPT = """
var script = document.createElement('script');
script.src = '/static/js/axs_testing.js';
document.body.appendChild(script);
"""
SELECT_TEXT_SOURCE = """
(function(selector, start, end) {
var children,
count,
i,
j = 0,
length,
node,
range,
selection,
textNode,
text;
selection = document.getSelection();
selection.removeAllRanges();
range = document.createRange();
node = $(selector);
children = node.contents();
count = children.length;
if ('createTouch' in document) {
node.trigger('touchstart');
}
else {
$(node).mousedown();
}
for (i = 0; i < count; i++) {
textNode = children[i];
if (textNode.nodeType !== 3) {
continue;
}
text = textNode.nodeValue;
length = text.length;
if (length === 0) {
continue;
}
if (start >= j + length || (end !== -1 && end <= j)) {}
else if (j >= start && j + length <= end) {
range.selectNodeContents(textNode);
break;
}
else if (start >= j && start < j + length) {
range.setStart(textNode, start - j);
}
else if (end > j && end <= j + length) {
range.setEnd(textNode, end - j);
break;
}
j += text.length;
}
if (end === -1) {
range.setEnd(textNode, length);
}
selection.addRange(range);
if ('createTouch' in document) {
node.trigger('touchend');
}
else {
$(node).mouseup();
}
})('%s', %d, %d);
"""
class LoggingStream(io.TextIOBase):
"""
A stream that writes to the "django.request" logger (sending a new message
when each newline is encountered).
"""
def __init__(self, *args, **kwargs):
self.buffer = six.StringIO()
super(LoggingStream, self).__init__(*args, **kwargs)
def write(self, s):
parts = re.split("([^\n]+)", s)
for part in parts:
if part == "\n":
logger.error(self.buffer.getvalue())
self.buffer = six.StringIO()
elif part:
self.buffer.write(part)
def replacement_get_stderr(self):
""" Replacement for QuietWSGIRequestHandler.get_stderr() to log errors to
file rather than cluttering the test output """
return LoggingStream()
def replacement_log_message(self, format, *args):
""" Replacement for QuitWSGIRequestHandler.log_message() to log messages
rather than ignore them """
logger.info("[%s] %s", self.log_date_time_string(), format % args)
def replacement_handle_error(self, request, client_address):
""" Errors from the WSGI server itself tend to be harmless ones like
"[Errno 32] Broken pipe" (which happens when a browser cancels a request
before it finishes because it realizes it already has the asset). By
default these get dumped to stderr where they get confused with the test
results, but aren't actually treated as test errors. We'll just log them
instead.
"""
msg = "Exception happened during processing of request from %s"
logger.error(msg, client_address, exc_info=sys.exc_info())
QuietWSGIRequestHandler.get_stderr = replacement_get_stderr
QuietWSGIRequestHandler.log_message = replacement_log_message
if StoppableWSGIServer is not None:
StoppableWSGIServer.handle_error = replacement_handle_error
def lambda_click(element):
"""Click function for use in Wait lambdas to verify that the click succeeded"""
if not element.is_displayed():
return False
element.click()
return True
class Wait(WebDriverWait):
""" Subclass of WebDriverWait with predetermined timeout and poll
frequency. Also deals with a wider variety of exceptions. """
def __init__(self, driver):
""" Constructor """
super(Wait, self).__init__(driver, settings.SELENIUM_TIMEOUT,
settings.SELENIUM_POLL_FREQUENCY)
def until(self, method, message=''):
"""Calls the method provided with the driver as an argument until the \
return value is not False."""
end_time = time.time() + self._timeout
while True:
try:
value = method(self._driver)
if value:
return value
except NoSuchElementException:
pass
except StaleElementReferenceException:
pass
except WebDriverException:
pass
time.sleep(self._poll)
if time.time() > end_time:
break
raise TimeoutException(message)
def until_not(self, method, message=''):
"""Calls the method provided with the driver as an argument until the
return value is False."""
end_time = time.time() + self._timeout
while True:
try:
value = method(self._driver)
if not value:
return value
except NoSuchElementException:
return True
except StaleElementReferenceException:
pass
except WebDriverException:
pass
time.sleep(self._poll)
if time.time() > end_time:
break
raise TimeoutException(message)
class SeleniumTestCase(LiveServerTestCase):
"""
Base class for Selenium tests. Allows tests to be written independently
of which browser they're going to be run in.
"""
@classmethod
def appium_command_executor(cls):
""" Get the command executor URL for iOS simulator testing """
if hasattr(cls, '_appium_executor'):
return cls._appium_executor
# Get the address iWebDriver will connect to
address = None
try:
address = socket.gethostbyname(socket.gethostname())
except:
# Use default address defined below
pass
# If we don't have an address we should use localhost
if not address:
address = '127.0.0.1'
port = 4723
cls._appium_executor = "".join(["http://", address, ":", str(port),
'/wd/hub'])
return cls._appium_executor
@classmethod
def setUpClass(cls):
# Create the screenshots directory if it doesn't exist yet
screenshot_dir = settings.SELENIUM_SCREENSHOT_DIR
if screenshot_dir and not os.path.exists(screenshot_dir):
os.makedirs(screenshot_dir)
super(SeleniumTestCase, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(SeleniumTestCase, cls).tearDownClass()
def setUp(self):
""" Start a new browser instance for each test """
self._screenshot_number = 1
self.browser = os.getenv('SELENIUM_BROWSER',
settings.SELENIUM_DEFAULT_BROWSER)
if os.getenv('SELENIUM_HOST'):
self.sel = self.sauce_labs_driver()
elif self.browser == 'firefox':
self.sel = Firefox()
elif self.browser == 'htmlunit':
self.sel = RemoteWebDriver(desired_capabilities=DesiredCapabilities.HTMLUNITWITHJS)
elif self.browser in ['ios', 'ipad', 'ipod', 'iphone']:
capabilities = {
'app': 'safari',
'browserName': '',
'device': 'iPhone Simulator',
'os': 'iOS 6.1'
}
self.sel = RemoteWebDriver(command_executor=self.appium_command_executor(),
desired_capabilities=capabilities)
elif self.browser == 'opera':
self.sel = RemoteWebDriver(desired_capabilities=DesiredCapabilities.OPERA)
elif self.browser == 'iexplore':
self.sel = RemoteWebDriver(desired_capabilities=DesiredCapabilities.INTERNETEXPLORER)
elif self.browser == 'phantomjs':
self.sel = PhantomJS(service_args=['--debug=true',
'--webdriver-loglevel=DEBUG'])
elif self.browser == 'safari':
# requires a Safari extension to be built from source and installed
self.sel = RemoteWebDriver(desired_capabilities=DesiredCapabilities.SAFARI)
else:
self.sel = Chrome()
# set_page_load_timeout does not seem to work in every browser
# Waiting for the body to appear seems like a safe alternative
self.wait_for_element('body')
# Give the browser a little time; Firefox throws random errors if you
# hit it too soon
time.sleep(1)
def tearDown(self):
# Check to see if an exception was raised during the test
info = sys.exc_info()
passed = info[0] is None
if not passed:
# Want to see what went wrong
self.screenshot()
self.report_status(passed)
if hasattr(self, 'sel'):
self.sel.quit()
super(SeleniumTestCase, self).tearDown()
# ~~~~~~~~~~~~~~~~~~~~~~~~~ Selenium operations ~~~~~~~~~~~~~~~~~~~~~~~~~~
def assert_hidden(self, selector):
element = self.wait_for_element(selector)
msg = "'%s' should not be visible" % selector
assert not element.is_displayed(), msg
def assert_not_present(self, selector):
assert_raises(NoSuchElementException,
self.sel.find_element_by_css_selector, selector)
def assert_not_visible(self, selector):
""" Ok if it's either missing or hidden """
try:
element = self.sel.find_element_by_css_selector(selector)
except NoSuchElementException:
return
msg = "'%s' should not be visible" % selector
assert not element.is_displayed(), msg
def assert_text_not_in_element(self, selector, text):
""" Verify that the specified element does not contain certain text """
msg = "'%s' should not contain the text '%s'" % (selector, text)
content = self.sel.find_element_by_css_selector(selector).text
assert text not in content, msg
def assert_visible(self, selector):
element = self.wait_for_element(selector)
msg = "'%s' should be visible" % selector
assert element.is_displayed(), msg
def audit_accessibility(self):
""" Check for accessibility violations using the JavaScript library
from Chrome's Developer Tools. """
# First add the library to the page
script = ''
for line in ADD_ACCESSIBILITY_SCRIPT.splitlines():
script += line.strip()
self.sel.execute_script(script)
# Wait for the script to finish loading
self.wait_for_condition('return axs.AuditRule.specs.videoWithoutCaptions !== "undefined";')
# Now run the audit and inspect the results
self.sel.execute_script('axs_audit_results = axs.Audit.run();')
failed = self.sel.execute_script('return axs_audit_results.some(function (element, index, array) { return element.result === "FAIL" });')
if failed:
report = self.sel.execute_script('return axs.Audit.createReport(axs_audit_results);')
raise self.failureException(report)
def click(self, selector):
""" Click the element matching the selector (and retry if it isn't
visible or clickable yet) """
element = self.wait_for_element(selector)
element_was_clicked = lambda driver: lambda_click(element)
msg = "The element matching '%s' should be clickable" % selector
Wait(self.sel).until(element_was_clicked, msg)
return element
def click_link_with_text(self, text):
link_is_present = lambda driver: driver.find_element_by_link_text(text)
msg = "A link with text '%s' should be present" % text
link = Wait(self.sel).until(link_is_present, msg)
link.click()
return link
def click_link_with_xpath(self, xpath):
link_is_present = lambda driver: driver.find_element_by_xpath(xpath)
msg = "A link with xpath '%s' should be present" % xpath
link = Wait(self.sel).until(link_is_present, msg)
link.click()
return link
def enter_text(self, selector, value):
field = self.wait_for_element(selector)
field.send_keys(value)
self.screenshot()
return field
def enter_text_via_xpath(self, xpath, value):
field = self.wait_for_xpath(xpath)
field.send_keys(value)
self.screenshot()
return field
def get(self, relative_url):
self.sel.get('%s%s' % (self.live_server_url, relative_url))
self.screenshot()
def screenshot(self):
if hasattr(self, 'sauce_user_name'):
# Sauce Labs is taking screenshots for us
return
if not hasattr(self, 'browser') or self.browser == 'htmlunit':
# Can't take screenshots
return
screenshot_dir = settings.SELENIUM_SCREENSHOT_DIR
if not screenshot_dir:
return
name = "%s_%d.png" % (self._testMethodName, self._screenshot_number)
path = os.path.join(screenshot_dir, name)
self.sel.get_screenshot_as_file(path)
self._screenshot_number += 1
def select_by_text(self, selector, text):
select = Select(self.wait_for_element(selector))
select.select_by_visible_text(text)
self.screenshot()
return select
def select_by_value(self, selector, value):
select = Select(self.wait_for_element(selector))
select.select_by_value(value)
self.screenshot()
return select
def select_text(self, selector, start=0, end=-1):
""" Selects the specified text range of the element matching the
provided selector by simulating a mouse down, programmatically
selecting the text, and then simulating a mouse up. Doesn't yet work
on IE < 9 or iOS. Doesn't support nested markup either. """
if not hasattr(self, 'select_text_template'):
template = ''
for line in SELECT_TEXT_SOURCE.splitlines():
template += line.strip()
self.select_text_template = template
script = self.select_text_template % (selector, start, end)
self.sel.execute_script(script)
self.screenshot()
def wait_for_background_color(self, selector, color_string):
color = Color.from_string(color_string)
correct_color = lambda driver: Color.from_string(driver.find_element_by_css_selector(selector).value_of_css_property("background-color")) == color
msg = "The color of '%s' should be %s" % (selector, color_string)
Wait(self.sel).until(correct_color, msg)
self.screenshot()
def wait_for_condition(self, return_statement, msg=None):
"""Wait until the provided JavaScript expression returns true.
Note: for this to work, the expression must include the "return"
keyword, not just the expression to be evaluated."""
condition_is_true = lambda driver: driver.execute_script(return_statement)
if not msg:
msg = '"{}" never became true'.format(return_statement)
Wait(self.sel).until(condition_is_true, msg)
def wait_for_element(self, selector):
element_is_present = lambda driver: driver.find_element_by_css_selector(selector)
msg = "An element matching '%s' should be on the page" % selector
element = Wait(self.sel).until(element_is_present, msg)
self.screenshot()
return element
def wait_for_text(self, text):
text_is_present = lambda driver: text in driver.page_source
msg = "The text '%s' should be present on the page" % text
Wait(self.sel).until(text_is_present, msg)
self.screenshot()
def wait_for_xpath(self, xpath):
element_is_present = lambda driver: driver.find_element_by_xpath(xpath)
msg = "An element matching '%s' should be on the page" % xpath
element = Wait(self.sel).until(element_is_present, msg)
self.screenshot()
return element
def wait_until_element_contains(self, selector, text):
""" Wait until the specified element contains certain text """
text_contained = lambda driver: text in driver.find_element_by_css_selector(selector).text
msg = "'%s' should contain the text '%s'" % (selector, text)
Wait(self.sel).until(text_contained, msg)
self.screenshot()
def wait_until_hidden(self, selector):
""" Wait until the element matching the selector is hidden """
element = self.wait_for_element(selector)
element_is_hidden = lambda driver: not element.is_displayed()
msg = "The element matching '%s' should not be visible" % selector
Wait(self.sel).until(element_is_hidden, msg)
self.screenshot()
return element
def wait_until_not_present(self, selector):
""" Wait until the element matching the selector is gone from page """
element_is_present = lambda driver: driver.find_element_by_css_selector(selector)
msg = "There should not be an element matching '%s'" % selector
Wait(self.sel).until_not(element_is_present, msg)
self.screenshot()
def wait_until_not_visible(self, selector):
""" Wait until the element matching the selector is either hidden or
removed from the page """
element_is_visible = lambda driver: driver.find_element_by_css_selector(selector).is_displayed()
msg = "The element matching '%s' should not be visible" % selector
Wait(self.sel).until_not(element_is_visible, msg)
self.screenshot()
def wait_until_option_added(self, selector, option_text):
""" Wait until the specified select option appears; the entire
select widget may be replaced in the process """
end_time = time.time() + settings.SELENIUM_TIMEOUT
while True:
try:
select = Select(self.sel.find_element_by_css_selector(selector))
for option in select.options:
if option.text == option_text:
return option
except (NoSuchElementException, StaleElementReferenceException):
pass
time.sleep(settings.SELENIUM_POLL_FREQUENCY)
if time.time() > end_time:
break
raise TimeoutException("Select option should have been added")
def wait_until_option_disabled(self, selector, option_text):
""" Wait until the specified select option is disabled; the entire
select widget may be replaced in the process """
end_time = time.time() + settings.SELENIUM_TIMEOUT
while True:
try:
select = Select(self.sel.find_element_by_css_selector(selector))
for option in select.options:
if option.text == option_text and not option.is_enabled():
return option
except (NoSuchElementException, StaleElementReferenceException):
pass
time.sleep(settings.SELENIUM_POLL_FREQUENCY)
if time.time() > end_time:
break
raise TimeoutException("Select option should have been disabled")
def wait_until_property_equals(self, selector, name, value):
""" Wait until the specified CSS property of the element matching the
provided selector matches the expected value """
value_is_correct = lambda driver: driver.find_element_by_css_selector(selector).value_of_css_property(name) == value
msg = "The %s CSS property of '%s' should be %s" % (name, selector,
value)
Wait(self.sel).until(value_is_correct, msg)
self.screenshot()
def wait_until_offscreen(self, selector):
""" Wait until the element matching the provided selector has been
moved offscreen (deliberately, not just scrolled out of view) """
end_time = time.time() + settings.SELENIUM_TIMEOUT
while True:
try:
element = self.sel.find_element_by_css_selector(selector)
location = element.location
size = element.size
if location["y"] + size["height"] <= 0:
self.screenshot()
return True
if location["x"] + size["width"] <= 0:
self.screenshot()
return True
except (NoSuchElementException, StaleElementReferenceException):
pass
time.sleep(settings.SELENIUM_POLL_FREQUENCY)
if time.time() > end_time:
break
raise TimeoutException("'%s' should be offscreen" % selector)
def wait_until_onscreen(self, selector):
""" Wait until the element matching the provided selector has been
moved into the viewable page """
end_time = time.time() + settings.SELENIUM_TIMEOUT
while True:
try:
element = self.sel.find_element_by_css_selector(selector)
location = element.location
if location["x"] >= 0 and location["y"] >= 0:
self.screenshot()
return True
except (NoSuchElementException, StaleElementReferenceException):
pass
time.sleep(settings.SELENIUM_POLL_FREQUENCY)
if time.time() > end_time:
break
raise TimeoutException("'%s' should be offscreen" % selector)
def wait_until_property_less_than(self, selector, name, value):
""" Wait until the specified CSS property of the element matching the
provided selector is less than a certain value. Ignores any
non-integer suffixes like 'px'. """
value_is_correct = lambda driver: int(re.match(r'([\d-]+)', driver.find_element_by_css_selector(selector).value_of_css_property(name)).group(1)) < value
msg = "The %s CSS property of '%s' should be less than %s" % (name, selector, value)
Wait(self.sel).until(value_is_correct, msg)
self.screenshot()
def wait_until_visible(self, selector):
""" Wait until the element matching the selector is visible """
element = self.wait_for_element(selector)
element_is_visible = lambda driver: element.is_displayed()
msg = "The element matching '%s' should be visible" % selector
Wait(self.sel).until(element_is_visible, msg)
return element
# ~~~~~~~~~~~~~~~~~~~~~~~~~ Sauce Labs support ~~~~~~~~~~~~~~~~~~~~~~~~~~
def sauce_labs_driver(self):
""" Configure the Selenium driver to use Sauce Labs """
host = os.getenv("SELENIUM_HOST", "ondemand.saucelabs.com")
port = os.getenv("SELENIUM_PORT", "80")
executor = "".join(["http://", host, ":", port, '/wd/hub'])
platform = os.getenv("SELENIUM_PLATFORM", "Windows 7")
version = os.getenv("SELENIUM_VERSION", "")
self.sauce_user_name = os.getenv("SAUCE_USER_NAME")
self.sauce_api_key = os.getenv("SAUCE_API_KEY")
tunnel_id = os.getenv("SAUCE_TUNNEL_ID", "")
build_number = os.getenv('BUILD_NUMBER')
job_name = os.getenv('JOB_NAME')
# http://code.google.com/p/selenium/wiki/DesiredCapabilities
# https://saucelabs.com/docs/additional-config#desired-capabilities
caps = {
'accessKey': self.sauce_api_key,
'capture-html': True,
'browserName': self.browser,
'javascriptEnabled': True,
'name': self.id(),
'platform': platform,
'username': self.sauce_user_name,
'version': version,
}
if build_number and job_name:
caps['build'] = '{} #{}'.format(job_name, build_number)
if tunnel_id:
caps['tunnel-identifier'] = tunnel_id
if settings.SELENIUM_SAUCE_VERSION:
caps['selenium-version'] = settings.SELENIUM_SAUCE_VERSION
remote = webdriver.Remote(command_executor=executor,
desired_capabilities=caps)
# Store the Sauce session ID to output later for Jenkins integration
# See https://saucelabs.com/jenkins/5 for details
sauce_sessions.append('SauceOnDemandSessionID={} job-name={}'.format(remote.session_id, self.id()))
return remote
def report_status(self, passed):
"""Report to Sauce Labs whether or not the test passed, so that can be
reflected in their UI."""
if not hasattr(self, 'sauce_user_name'):
# Not using Sauce Labs for this test
return
url_pattern = 'http://{}:{}@saucelabs.com/rest/v1/{}/jobs/{}'
url = url_pattern.format(self.sauce_user_name,
self.sauce_api_key,
self.sauce_user_name,
self.sel.session_id)
body_content = json.dumps({"passed": passed})
headers = {
'Content-Type': 'application/json',
}
response = requests.put(url, body_content, headers=headers)
return response.status_code == 200
| |
"""Thread module emulating a subset of Java's threading model."""
import sys as _sys
try:
import _pydev_thread as thread
except ImportError:
import thread
import warnings
from _pydev_time import time as _time, sleep as _sleep
from traceback import format_exc as _format_exc
# Note regarding PEP 8 compliant aliases
# This threading model was originally inspired by Java, and inherited
# the convention of camelCase function and method names from that
# language. While those names are not in any imminent danger of being
# deprecated, starting with Python 2.6, the module now provides a
# PEP 8 compliant alias for any such method name.
# Using the new PEP 8 compliant names also facilitates substitution
# with the multiprocessing module, which doesn't provide the old
# Java inspired names.
# Rename some stuff so "from threading import *" is safe
__all__ = ['activeCount', 'active_count', 'Condition', 'currentThread',
'current_thread', 'enumerate', 'Event',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Thread',
'Timer', 'setprofile', 'settrace', 'local', 'stack_size']
_start_new_thread = thread.start_new_thread
_allocate_lock = thread.allocate_lock
_get_ident = thread.get_ident
ThreadError = thread.error
del thread
# sys.exc_clear is used to work around the fact that except blocks
# don't fully clear the exception until 3.0.
warnings.filterwarnings('ignore', category=DeprecationWarning,
module='threading', message='sys.exc_clear')
# Debug support (adapted from ihooks.py).
# All the major classes here derive from _Verbose. We force that to
# be a new-style class so that all the major classes here are new-style.
# This helps debugging (type(instance) is more revealing for instances
# of new-style classes).
_VERBOSE = False
if __debug__:
class _Verbose(object):
def __init__(self, verbose=None):
if verbose is None:
verbose = _VERBOSE
self.__verbose = verbose
def _note(self, format, *args):
if self.__verbose:
format = format % args
# Issue #4188: calling current_thread() can incur an infinite
# recursion if it has to create a DummyThread on the fly.
ident = _get_ident()
try:
name = _active[ident].name
except KeyError:
name = "<OS thread %d>" % ident
format = "%s: %s\n" % (name, format)
_sys.stderr.write(format)
else:
# Disable this when using "python -O"
class _Verbose(object):
def __init__(self, verbose=None):
pass
def _note(self, *args):
pass
# Support for profile and trace hooks
_profile_hook = None
_trace_hook = None
def setprofile(func):
global _profile_hook
_profile_hook = func
def settrace(func):
global _trace_hook
_trace_hook = func
# Synchronization classes
Lock = _allocate_lock
def RLock(*args, **kwargs):
return _RLock(*args, **kwargs)
class _RLock(_Verbose):
def __init__(self, verbose=None):
_Verbose.__init__(self, verbose)
self.__block = _allocate_lock()
self.__owner = None
self.__count = 0
def __repr__(self):
owner = self.__owner
try:
owner = _active[owner].name
except KeyError:
pass
return "<%s owner=%r count=%d>" % (
self.__class__.__name__, owner, self.__count)
def acquire(self, blocking=1):
me = _get_ident()
if self.__owner == me:
self.__count = self.__count + 1
if __debug__:
self._note("%s.acquire(%s): recursive success", self, blocking)
return 1
rc = self.__block.acquire(blocking)
if rc:
self.__owner = me
self.__count = 1
if __debug__:
self._note("%s.acquire(%s): initial success", self, blocking)
else:
if __debug__:
self._note("%s.acquire(%s): failure", self, blocking)
return rc
__enter__ = acquire
def release(self):
if self.__owner != _get_ident():
raise RuntimeError("cannot release un-acquired lock")
self.__count = count = self.__count - 1
if not count:
self.__owner = None
self.__block.release()
if __debug__:
self._note("%s.release(): final release", self)
else:
if __debug__:
self._note("%s.release(): non-final release", self)
def __exit__(self, t, v, tb):
self.release()
# Internal methods used by condition variables
def _acquire_restore(self, count_owner):
count, owner = count_owner
self.__block.acquire()
self.__count = count
self.__owner = owner
if __debug__:
self._note("%s._acquire_restore()", self)
def _release_save(self):
if __debug__:
self._note("%s._release_save()", self)
count = self.__count
self.__count = 0
owner = self.__owner
self.__owner = None
self.__block.release()
return (count, owner)
def _is_owned(self):
return self.__owner == _get_ident()
def Condition(*args, **kwargs):
return _Condition(*args, **kwargs)
class _Condition(_Verbose):
def __init__(self, lock=None, verbose=None):
_Verbose.__init__(self, verbose)
if lock is None:
lock = RLock()
self.__lock = lock
# Export the lock's acquire() and release() methods
self.acquire = lock.acquire
self.release = lock.release
# If the lock defines _release_save() and/or _acquire_restore(),
# these override the default implementations (which just call
# release() and acquire() on the lock). Ditto for _is_owned().
try:
self._release_save = lock._release_save
except AttributeError:
pass
try:
self._acquire_restore = lock._acquire_restore
except AttributeError:
pass
try:
self._is_owned = lock._is_owned
except AttributeError:
pass
self.__waiters = []
def __enter__(self):
return self.__lock.__enter__()
def __exit__(self, *args):
return self.__lock.__exit__(*args)
def __repr__(self):
return "<Condition(%s, %d)>" % (self.__lock, len(self.__waiters))
def _release_save(self):
self.__lock.release() # No state to save
def _acquire_restore(self, x):
self.__lock.acquire() # Ignore saved state
def _is_owned(self):
# Return True if lock is owned by current_thread.
# This method is called only if __lock doesn't have _is_owned().
if self.__lock.acquire(0):
self.__lock.release()
return False
else:
return True
def wait(self, timeout=None):
if not self._is_owned():
raise RuntimeError("cannot wait on un-acquired lock")
waiter = _allocate_lock()
waiter.acquire()
self.__waiters.append(waiter)
saved_state = self._release_save()
try: # restore state no matter what (e.g., KeyboardInterrupt)
if timeout is None:
waiter.acquire()
if __debug__:
self._note("%s.wait(): got it", self)
else:
# Balancing act: We can't afford a pure busy loop, so we
# have to sleep; but if we sleep the whole timeout time,
# we'll be unresponsive. The scheme here sleeps very
# little at first, longer as time goes on, but never longer
# than 20 times per second (or the timeout time remaining).
endtime = _time() + timeout
delay = 0.0005 # 500 us -> initial delay of 1 ms
while True:
gotit = waiter.acquire(0)
if gotit:
break
remaining = endtime - _time()
if remaining <= 0:
break
delay = min(delay * 2, remaining, .05)
_sleep(delay)
if not gotit:
if __debug__:
self._note("%s.wait(%s): timed out", self, timeout)
try:
self.__waiters.remove(waiter)
except ValueError:
pass
else:
if __debug__:
self._note("%s.wait(%s): got it", self, timeout)
finally:
self._acquire_restore(saved_state)
def notify(self, n=1):
if not self._is_owned():
raise RuntimeError("cannot notify on un-acquired lock")
__waiters = self.__waiters
waiters = __waiters[:n]
if not waiters:
if __debug__:
self._note("%s.notify(): no waiters", self)
return
self._note("%s.notify(): notifying %d waiter%s", self, n,
n!=1 and "s" or "")
for waiter in waiters:
waiter.release()
try:
__waiters.remove(waiter)
except ValueError:
pass
def notifyAll(self):
self.notify(len(self.__waiters))
notify_all = notifyAll
def Semaphore(*args, **kwargs):
return _Semaphore(*args, **kwargs)
class _Semaphore(_Verbose):
# After Tim Peters' semaphore class, but not quite the same (no maximum)
def __init__(self, value=1, verbose=None):
if value < 0:
raise ValueError("semaphore initial value must be >= 0")
_Verbose.__init__(self, verbose)
self.__cond = Condition(Lock())
self.__value = value
def acquire(self, blocking=1):
rc = False
self.__cond.acquire()
while self.__value == 0:
if not blocking:
break
if __debug__:
self._note("%s.acquire(%s): blocked waiting, value=%s",
self, blocking, self.__value)
self.__cond.wait()
else:
self.__value = self.__value - 1
if __debug__:
self._note("%s.acquire: success, value=%s",
self, self.__value)
rc = True
self.__cond.release()
return rc
__enter__ = acquire
def release(self):
self.__cond.acquire()
self.__value = self.__value + 1
if __debug__:
self._note("%s.release: success, value=%s",
self, self.__value)
self.__cond.notify()
self.__cond.release()
def __exit__(self, t, v, tb):
self.release()
def BoundedSemaphore(*args, **kwargs):
return _BoundedSemaphore(*args, **kwargs)
class _BoundedSemaphore(_Semaphore):
"""Semaphore that checks that # releases is <= # acquires"""
def __init__(self, value=1, verbose=None):
_Semaphore.__init__(self, value, verbose)
self._initial_value = value
def release(self):
if self._Semaphore__value >= self._initial_value:
raise ValueError, "Semaphore released too many times"
return _Semaphore.release(self)
def Event(*args, **kwargs):
return _Event(*args, **kwargs)
class _Event(_Verbose):
# After Tim Peters' event class (without is_posted())
def __init__(self, verbose=None):
_Verbose.__init__(self, verbose)
self.__cond = Condition(Lock())
self.__flag = False
def _reset_internal_locks(self):
# private! called by Thread._reset_internal_locks by _after_fork()
self.__cond.__init__()
def isSet(self):
return self.__flag
is_set = isSet
def set(self):
self.__cond.acquire()
try:
self.__flag = True
self.__cond.notify_all()
finally:
self.__cond.release()
def clear(self):
self.__cond.acquire()
try:
self.__flag = False
finally:
self.__cond.release()
def wait(self, timeout=None):
self.__cond.acquire()
try:
if not self.__flag:
self.__cond.wait(timeout)
return self.__flag
finally:
self.__cond.release()
# Helper to generate new thread names
_counter = 0
def _newname(template="Thread-%d"):
global _counter
_counter = _counter + 1
return template % _counter
# Active thread administration
_active_limbo_lock = _allocate_lock()
_active = {} # maps thread id to Thread object
_limbo = {}
# Main class for threads
class Thread(_Verbose):
__initialized = False
# Need to store a reference to sys.exc_info for printing
# out exceptions when a thread tries to use a global var. during interp.
# shutdown and thus raises an exception about trying to perform some
# operation on/with a NoneType
__exc_info = _sys.exc_info
# Keep sys.exc_clear too to clear the exception just before
# allowing .join() to return.
__exc_clear = _sys.exc_clear
def __init__(self, group=None, target=None, name=None,
args=(), kwargs=None, verbose=None):
assert group is None, "group argument must be None for now"
_Verbose.__init__(self, verbose)
if kwargs is None:
kwargs = {}
self.__target = target
self.__name = str(name or _newname())
self.__args = args
self.__kwargs = kwargs
self.__daemonic = self._set_daemon()
self.__ident = None
self.__started = Event()
self.__stopped = False
self.__block = Condition(Lock())
self.__initialized = True
# sys.stderr is not stored in the class like
# sys.exc_info since it can be changed between instances
self.__stderr = _sys.stderr
def _reset_internal_locks(self):
# private! Called by _after_fork() to reset our internal locks as
# they may be in an invalid state leading to a deadlock or crash.
if hasattr(self, '_Thread__block'): # DummyThread deletes self.__block
self.__block.__init__()
self.__started._reset_internal_locks()
@property
def _block(self):
# used by a unittest
return self.__block
def _set_daemon(self):
# Overridden in _MainThread and _DummyThread
return current_thread().daemon
def __repr__(self):
assert self.__initialized, "Thread.__init__() was not called"
status = "initial"
if self.__started.is_set():
status = "started"
if self.__stopped:
status = "stopped"
if self.__daemonic:
status += " daemon"
if self.__ident is not None:
status += " %s" % self.__ident
return "<%s(%s, %s)>" % (self.__class__.__name__, self.__name, status)
def start(self):
if not self.__initialized:
raise RuntimeError("thread.__init__() not called")
if self.__started.is_set():
raise RuntimeError("threads can only be started once")
if __debug__:
self._note("%s.start(): starting thread", self)
with _active_limbo_lock:
_limbo[self] = self
try:
_start_new_thread(self.__bootstrap, ())
except Exception:
with _active_limbo_lock:
del _limbo[self]
raise
self.__started.wait()
def run(self):
try:
if self.__target:
self.__target(*self.__args, **self.__kwargs)
finally:
# Avoid a refcycle if the thread is running a function with
# an argument that has a member that points to the thread.
del self.__target, self.__args, self.__kwargs
def __bootstrap(self):
# Wrapper around the real bootstrap code that ignores
# exceptions during interpreter cleanup. Those typically
# happen when a daemon thread wakes up at an unfortunate
# moment, finds the world around it destroyed, and raises some
# random exception *** while trying to report the exception in
# __bootstrap_inner() below ***. Those random exceptions
# don't help anybody, and they confuse users, so we suppress
# them. We suppress them only when it appears that the world
# indeed has already been destroyed, so that exceptions in
# __bootstrap_inner() during normal business hours are properly
# reported. Also, we only suppress them for daemonic threads;
# if a non-daemonic encounters this, something else is wrong.
try:
self.__bootstrap_inner()
except:
if self.__daemonic and _sys is None:
return
raise
def _set_ident(self):
self.__ident = _get_ident()
def __bootstrap_inner(self):
try:
self._set_ident()
self.__started.set()
with _active_limbo_lock:
_active[self.__ident] = self
del _limbo[self]
if __debug__:
self._note("%s.__bootstrap(): thread started", self)
if _trace_hook:
self._note("%s.__bootstrap(): registering trace hook", self)
_sys.settrace(_trace_hook)
if _profile_hook:
self._note("%s.__bootstrap(): registering profile hook", self)
_sys.setprofile(_profile_hook)
try:
self.run()
except SystemExit:
if __debug__:
self._note("%s.__bootstrap(): raised SystemExit", self)
except:
if __debug__:
self._note("%s.__bootstrap(): unhandled exception", self)
# If sys.stderr is no more (most likely from interpreter
# shutdown) use self.__stderr. Otherwise still use sys (as in
# _sys) in case sys.stderr was redefined since the creation of
# self.
if _sys:
_sys.stderr.write("Exception in thread %s:\n%s\n" %
(self.name, _format_exc()))
else:
# Do the best job possible w/o a huge amt. of code to
# approximate a traceback (code ideas from
# Lib/traceback.py)
exc_type, exc_value, exc_tb = self.__exc_info()
try:
print>>self.__stderr, (
"Exception in thread " + self.name +
" (most likely raised during interpreter shutdown):")
print>>self.__stderr, (
"Traceback (most recent call last):")
while exc_tb:
print>>self.__stderr, (
' File "%s", line %s, in %s' %
(exc_tb.tb_frame.f_code.co_filename,
exc_tb.tb_lineno,
exc_tb.tb_frame.f_code.co_name))
exc_tb = exc_tb.tb_next
print>>self.__stderr, ("%s: %s" % (exc_type, exc_value))
# Make sure that exc_tb gets deleted since it is a memory
# hog; deleting everything else is just for thoroughness
finally:
del exc_type, exc_value, exc_tb
else:
if __debug__:
self._note("%s.__bootstrap(): normal return", self)
finally:
# Prevent a race in
# test_threading.test_no_refcycle_through_target when
# the exception keeps the target alive past when we
# assert that it's dead.
self.__exc_clear()
finally:
with _active_limbo_lock:
self.__stop()
try:
# We don't call self.__delete() because it also
# grabs _active_limbo_lock.
del _active[_get_ident()]
except:
pass
def __stop(self):
self.__block.acquire()
self.__stopped = True
self.__block.notify_all()
self.__block.release()
def __delete(self):
"Remove current thread from the dict of currently running threads."
# Notes about running with dummy_thread:
#
# Must take care to not raise an exception if dummy_thread is being
# used (and thus this module is being used as an instance of
# dummy_threading). dummy_thread.get_ident() always returns -1 since
# there is only one thread if dummy_thread is being used. Thus
# len(_active) is always <= 1 here, and any Thread instance created
# overwrites the (if any) thread currently registered in _active.
#
# An instance of _MainThread is always created by 'threading'. This
# gets overwritten the instant an instance of Thread is created; both
# threads return -1 from dummy_thread.get_ident() and thus have the
# same key in the dict. So when the _MainThread instance created by
# 'threading' tries to clean itself up when atexit calls this method
# it gets a KeyError if another Thread instance was created.
#
# This all means that KeyError from trying to delete something from
# _active if dummy_threading is being used is a red herring. But
# since it isn't if dummy_threading is *not* being used then don't
# hide the exception.
try:
with _active_limbo_lock:
del _active[_get_ident()]
# There must not be any python code between the previous line
# and after the lock is released. Otherwise a tracing function
# could try to acquire the lock again in the same thread, (in
# current_thread()), and would block.
except KeyError:
if 'dummy_threading' not in _sys.modules:
raise
def join(self, timeout=None):
if not self.__initialized:
raise RuntimeError("Thread.__init__() not called")
if not self.__started.is_set():
raise RuntimeError("cannot join thread before it is started")
if self is current_thread():
raise RuntimeError("cannot join current thread")
if __debug__:
if not self.__stopped:
self._note("%s.join(): waiting until thread stops", self)
self.__block.acquire()
try:
if timeout is None:
while not self.__stopped:
self.__block.wait()
if __debug__:
self._note("%s.join(): thread stopped", self)
else:
deadline = _time() + timeout
while not self.__stopped:
delay = deadline - _time()
if delay <= 0:
if __debug__:
self._note("%s.join(): timed out", self)
break
self.__block.wait(delay)
else:
if __debug__:
self._note("%s.join(): thread stopped", self)
finally:
self.__block.release()
@property
def name(self):
assert self.__initialized, "Thread.__init__() not called"
return self.__name
@name.setter
def name(self, name):
assert self.__initialized, "Thread.__init__() not called"
self.__name = str(name)
@property
def ident(self):
assert self.__initialized, "Thread.__init__() not called"
return self.__ident
def isAlive(self):
assert self.__initialized, "Thread.__init__() not called"
return self.__started.is_set() and not self.__stopped
is_alive = isAlive
@property
def daemon(self):
assert self.__initialized, "Thread.__init__() not called"
return self.__daemonic
@daemon.setter
def daemon(self, daemonic):
if not self.__initialized:
raise RuntimeError("Thread.__init__() not called")
if self.__started.is_set():
raise RuntimeError("cannot set daemon status of active thread");
self.__daemonic = daemonic
def isDaemon(self):
return self.daemon
def setDaemon(self, daemonic):
self.daemon = daemonic
def getName(self):
return self.name
def setName(self, name):
self.name = name
# The timer class was contributed by Itamar Shtull-Trauring
def Timer(*args, **kwargs):
return _Timer(*args, **kwargs)
class _Timer(Thread):
"""Call a function after a specified number of seconds:
t = Timer(30.0, f, args=[], kwargs={})
t.start()
t.cancel() # stop the timer's action if it's still waiting
"""
def __init__(self, interval, function, args=[], kwargs={}):
Thread.__init__(self)
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.finished = Event()
def cancel(self):
"""Stop the timer if it hasn't finished yet"""
self.finished.set()
def run(self):
self.finished.wait(self.interval)
if not self.finished.is_set():
self.function(*self.args, **self.kwargs)
self.finished.set()
# Special thread class to represent the main thread
# This is garbage collected through an exit handler
class _MainThread(Thread):
def __init__(self):
Thread.__init__(self, name="MainThread")
self._Thread__started.set()
self._set_ident()
with _active_limbo_lock:
_active[_get_ident()] = self
def _set_daemon(self):
return False
def _exitfunc(self):
self._Thread__stop()
t = _pickSomeNonDaemonThread()
if t:
if __debug__:
self._note("%s: waiting for other threads", self)
while t:
t.join()
t = _pickSomeNonDaemonThread()
if __debug__:
self._note("%s: exiting", self)
self._Thread__delete()
def _pickSomeNonDaemonThread():
for t in enumerate():
if not t.daemon and t.is_alive():
return t
return None
# Dummy thread class to represent threads not started here.
# These aren't garbage collected when they die, nor can they be waited for.
# If they invoke anything in threading.py that calls current_thread(), they
# leave an entry in the _active dict forever after.
# Their purpose is to return *something* from current_thread().
# They are marked as daemon threads so we won't wait for them
# when we exit (conform previous semantics).
class _DummyThread(Thread):
def __init__(self):
Thread.__init__(self, name=_newname("Dummy-%d"))
# Thread.__block consumes an OS-level locking primitive, which
# can never be used by a _DummyThread. Since a _DummyThread
# instance is immortal, that's bad, so release this resource.
del self._Thread__block
self._Thread__started.set()
self._set_ident()
with _active_limbo_lock:
_active[_get_ident()] = self
def _set_daemon(self):
return True
def join(self, timeout=None):
assert False, "cannot join a dummy thread"
# Global API functions
def currentThread():
try:
return _active[_get_ident()]
except KeyError:
##print "current_thread(): no current thread for", _get_ident()
return _DummyThread()
current_thread = currentThread
def activeCount():
with _active_limbo_lock:
return len(_active) + len(_limbo)
active_count = activeCount
def _enumerate():
# Same as enumerate(), but without the lock. Internal use only.
return _active.values() + _limbo.values()
def enumerate():
with _active_limbo_lock:
return _active.values() + _limbo.values()
# Create the main thread object,
# and make it available for the interpreter
# (Py_Main) as threading._shutdown.
_shutdown = _MainThread()._exitfunc
# get thread-local implementation, either from the thread
# module, or from the python fallback
try:
from _pydev_thread import _local as local
except ImportError:
from _threading_local import local
def _after_fork():
# This function is called by Python/ceval.c:PyEval_ReInitThreads which
# is called from PyOS_AfterFork. Here we cleanup threading module state
# that should not exist after a fork.
# Reset _active_limbo_lock, in case we forked while the lock was held
# by another (non-forked) thread. http://bugs.python.org/issue874900
global _active_limbo_lock
_active_limbo_lock = _allocate_lock()
# fork() only copied the current thread; clear references to others.
new_active = {}
current = current_thread()
with _active_limbo_lock:
for thread in _active.itervalues():
# Any lock/condition variable may be currently locked or in an
# invalid state, so we reinitialize them.
if hasattr(thread, '_reset_internal_locks'):
thread._reset_internal_locks()
if thread is current:
# There is only one active thread. We reset the ident to
# its new value since it can have changed.
ident = _get_ident()
thread._Thread__ident = ident
new_active[ident] = thread
else:
# All the others are already stopped.
thread._Thread__stop()
_limbo.clear()
_active.clear()
_active.update(new_active)
assert len(_active) == 1
# Self-test code
def _test():
class BoundedQueue(_Verbose):
def __init__(self, limit):
_Verbose.__init__(self)
self.mon = RLock()
self.rc = Condition(self.mon)
self.wc = Condition(self.mon)
self.limit = limit
self.queue = deque()
def put(self, item):
self.mon.acquire()
while len(self.queue) >= self.limit:
self._note("put(%s): queue full", item)
self.wc.wait()
self.queue.append(item)
self._note("put(%s): appended, length now %d",
item, len(self.queue))
self.rc.notify()
self.mon.release()
def get(self):
self.mon.acquire()
while not self.queue:
self._note("get(): queue empty")
self.rc.wait()
item = self.queue.popleft()
self._note("get(): got %s, %d left", item, len(self.queue))
self.wc.notify()
self.mon.release()
return item
class ProducerThread(Thread):
def __init__(self, queue, quota):
Thread.__init__(self, name="Producer")
self.queue = queue
self.quota = quota
def run(self):
from random import random
counter = 0
while counter < self.quota:
counter = counter + 1
self.queue.put("%s.%d" % (self.name, counter))
_sleep(random() * 0.00001)
class ConsumerThread(Thread):
def __init__(self, queue, count):
Thread.__init__(self, name="Consumer")
self.queue = queue
self.count = count
def run(self):
while self.count > 0:
item = self.queue.get()
print item
self.count = self.count - 1
NP = 3
QL = 4
NI = 5
Q = BoundedQueue(QL)
P = []
for i in range(NP):
t = ProducerThread(Q, NI)
t.name = ("Producer-%d" % (i+1))
P.append(t)
C = ConsumerThread(Q, NI*NP)
for t in P:
t.start()
_sleep(0.000001)
C.start()
for t in P:
t.join()
C.join()
if __name__ == '__main__':
_test()
| |
# -*- coding: utf-8 -*-
########################################################################
#
# License: BSD
# Created: December 16, 2004
# Author: Ivan Vilata i Balaguer - ivan at selidor dot net
#
# $Id$
#
########################################################################
"""Atom classes for describing dataset contents."""
# Imports
# =======
import re
import sys
import inspect
import cPickle
import numpy
from tables.utils import SizeType
from tables.misc.enum import Enum
# Public variables
# ================
__docformat__ = 'reStructuredText'
"""The format of documentation strings in this module."""
all_types = set() # filled as atom classes are created
"""Set of all PyTables types."""
atom_map = {} # filled as atom classes are created
"""Maps atom kinds to item sizes and atom classes.
If there is a fixed set of possible item sizes for a given kind, the
kind maps to another mapping from item size in bytes to atom class.
Otherwise, the kind maps directly to the atom class.
"""
deftype_from_kind = {} # filled as atom classes are created
"""Maps atom kinds to their default atom type (if any)."""
# Public functions
# ================
_type_re = re.compile(r'^([a-z]+)([0-9]*)$')
def split_type(type):
"""Split a PyTables type into a PyTables kind and an item size.
Returns a tuple of (kind, itemsize). If no item size is present in the type
(in the form of a precision), the returned item size is None::
>>> split_type('int32')
('int', 4)
>>> split_type('string')
('string', None)
>>> split_type('int20')
Traceback (most recent call last):
...
ValueError: precision must be a multiple of 8: 20
>>> split_type('foo bar')
Traceback (most recent call last):
...
ValueError: malformed type: 'foo bar'
"""
match = _type_re.match(type)
if not match:
raise ValueError("malformed type: %r" % type)
kind, precision = match.groups()
itemsize = None
if precision:
precision = int(precision)
itemsize, remainder = divmod(precision, 8)
if remainder: # 0 could be a valid item size
raise ValueError( "precision must be a multiple of 8: %d"
% precision )
return (kind, itemsize)
# Private functions
# =================
def _invalid_itemsize_error(kind, itemsize, itemsizes):
isizes = sorted(itemsizes)
return ValueError( "invalid item size for kind ``%s``: %r; "
"it must be one of ``%r``"
% (kind, itemsize, isizes) )
def _abstract_atom_init(deftype, defvalue):
"""Return a constructor for an abstract `Atom` class."""
defitemsize = split_type(deftype)[1]
def __init__(self, itemsize=defitemsize, shape=(), dflt=defvalue):
assert self.kind in atom_map
try:
atomclass = atom_map[self.kind][itemsize]
except KeyError:
raise _invalid_itemsize_error( self.kind, itemsize,
atom_map[self.kind] )
self.__class__ = atomclass
atomclass.__init__(self, shape, dflt)
return __init__
def _normalize_shape(shape):
"""Check that the `shape` is safe to be used and return it as a tuple."""
if isinstance(shape, (int, numpy.integer, long)):
if shape < 1:
raise ValueError( "shape value must be greater than 0: %d"
% shape )
shape = (shape,) # N is a shorthand for (N,)
try:
shape = tuple(shape)
except TypeError:
raise TypeError( "shape must be an integer or sequence: %r"
% (shape,) )
## XXX Get from HDF5 library if possible.
# HDF5 does not support ranks greater than 32
if len(shape) > 32:
raise ValueError(
"shapes with rank > 32 are not supported: %r" % (shape,) )
return tuple(SizeType(s) for s in shape)
def _normalize_default(value, dtype):
"""Return `value` as a valid default of NumPy type `dtype`."""
# Create NumPy objects as defaults
# This is better in order to serialize them as attributes
if value is None:
value = 0
basedtype = dtype.base
try:
default = numpy.array(value, dtype=basedtype)
except ValueError:
array = numpy.array(value)
if array.shape != basedtype.shape:
raise
# Maybe nested dtype with "scalar" value.
default = numpy.array(value, dtype=basedtype.base)
# 0-dim arrays will be representented as NumPy scalars
# (PyTables attribute convention)
if default.shape == ():
default = default[()]
return default
def _cmp_dispatcher(other_method_name):
"""Dispatch comparisons to a method of the *other* object.
Returns a new *rich comparison* method which dispatches calls to
the method `other_method_name` of the *other* object. If there is
no such method in the object, ``False`` is returned.
This is part of the implementation of a double dispatch pattern.
"""
def dispatched_cmp(self, other):
try:
other_method = getattr(other, other_method_name)
except AttributeError:
return False
return other_method(self)
return dispatched_cmp
# Helper classes
# ==============
class MetaAtom(type):
"""Atom metaclass.
This metaclass ensures that data about atom classes gets inserted
into the suitable registries.
"""
def __init__(class_, name, bases, dict_):
super(MetaAtom, class_).__init__(name, bases, dict_)
kind = dict_.get('kind')
itemsize = dict_.get('itemsize')
type_ = dict_.get('type')
deftype = dict_.get('_deftype')
if kind and deftype:
deftype_from_kind[kind] = deftype
if type_:
all_types.add(type_)
if kind and itemsize and not hasattr(itemsize, '__int__'):
# Atom classes with a non-fixed item size do have an
# ``itemsize``, but it's not a number (e.g. property).
atom_map[kind] = class_
return
if kind: # first definition of kind, make new entry
atom_map[kind] = {}
if itemsize and hasattr(itemsize, '__int__'): # fixed
kind = class_.kind # maybe from superclasses
atom_map[kind][int(itemsize)] = class_
# Atom classes
# ============
class Atom(object):
"""Defines the type of atomic cells stored in a dataset.
The meaning of *atomic* is that individual elements of a cell can
not be extracted directly by indexing (i.e. __getitem__()) the
dataset; e.g. if a dataset has shape (2, 2) and its atoms have
shape (3,), to get the third element of the cell at (1, 0) one
should use dataset[1,0][2] instead of dataset[1,0,2].
The Atom class is meant to declare the different properties of the
*base element* (also known as *atom*) of CArray, EArray and
VLArray datasets, although they are also used to describe the base
elements of Array datasets. Atoms have the property that their
length is always the same. However, you can grow datasets along
the extensible dimension in the case of EArray or put a variable
number of them on a VLArray row. Moreover, they are not restricted
to scalar values, and they can be *fully multidimensional
objects*.
Parameters
----------
itemsize : int
For types with a non-fixed size, this sets the size in
bytes of individual items in the atom.
shape : tuple
Sets the shape of the atom. An integer shape of
N is equivalent to the tuple (N,).
dflt
Sets the default value for the atom.
The following are the public methods and attributes of the Atom class.
Notes
-----
A series of descendant classes are offered in order to make the
use of these element descriptions easier. You should use a
particular Atom descendant class whenever you know the exact type
you will need when writing your code. Otherwise, you may use one
of the Atom.from_*() factory Methods.
.. rubric:: Arom attributes
.. attribute:: dflt
The default value of the atom.
If the user does not supply a value for an element while
filling a dataset, this default value will be written to disk.
If the user supplies a scalar value for a multidimensional
atom, this value is automatically *broadcast* to all the items
in the atom cell. If dflt is not supplied, an appropriate zero
value (or *null* string) will be chosen by default. Please
note that default values are kept internally as NumPy objects.
.. attribute:: dtype
The NumPy dtype that most closely matches this atom.
.. attribute:: itemsize
Size in bytes of a single item in the atom.
Specially useful for atoms of the string kind.
.. attribute:: kind
The PyTables kind of the atom (a string).
.. attribute:: shape
The shape of the atom (a tuple for scalar atoms).
.. attribute:: type
The PyTables type of the atom (a string).
Atoms can be compared with atoms and other objects for
strict (in)equality without having to compare individual
attributes::
>>> atom1 = StringAtom(itemsize=10) # same as ``atom2``
>>> atom2 = Atom.from_kind('string', 10) # same as ``atom1``
>>> atom3 = IntAtom()
>>> atom1 == 'foo'
False
>>> atom1 == atom2
True
>>> atom2 != atom1
False
>>> atom1 == atom3
False
>>> atom3 != atom2
True
"""
# Register data for all subclasses.
__metaclass__ = MetaAtom
# Class methods
# ~~~~~~~~~~~~~
@classmethod
def prefix(class_):
"""Return the atom class prefix."""
cname = class_.__name__
return cname[:cname.rfind('Atom')]
@classmethod
def from_sctype(class_, sctype, shape=(), dflt=None):
"""Create an Atom from a NumPy scalar type sctype.
Optional shape and default value may be specified as the
shape and dflt
arguments, respectively. Information in the
sctype not represented in an Atom is ignored::
>>> import numpy
>>> Atom.from_sctype(numpy.int16, shape=(2, 2))
Int16Atom(shape=(2, 2), dflt=0)
>>> Atom.from_sctype('S5', dflt='hello')
Traceback (most recent call last):
...
ValueError: unknown NumPy scalar type: 'S5'
>>> Atom.from_sctype('Float64')
Float64Atom(shape=(), dflt=0.0)
"""
if ( not isinstance(sctype, type)
or not issubclass(sctype, numpy.generic) ):
if sctype not in numpy.sctypeDict:
raise ValueError("unknown NumPy scalar type: %r" % (sctype,))
sctype = numpy.sctypeDict[sctype]
return class_.from_dtype(numpy.dtype((sctype, shape)), dflt)
@classmethod
def from_dtype(class_, dtype, dflt=None):
"""Create an Atom from a NumPy dtype.
An optional default value may be specified as the dflt
argument. Information in the dtype not represented in an Atom is
ignored::
>>> import numpy
>>> Atom.from_dtype(numpy.dtype((numpy.int16, (2, 2))))
Int16Atom(shape=(2, 2), dflt=0)
>>> Atom.from_dtype(numpy.dtype('Float64'))
Float64Atom(shape=(), dflt=0.0)
"""
basedtype = dtype.base
if basedtype.names:
raise ValueError( "compound data types are not supported: %r"
% dtype )
if basedtype.shape != ():
raise ValueError( "nested data types are not supported: %r"
% dtype )
if basedtype.kind == 'S': # can not reuse something like 'string80'
itemsize = basedtype.itemsize
return class_.from_kind('string', itemsize, dtype.shape, dflt)
# Most NumPy types have direct correspondence with PyTables types.
return class_.from_type(basedtype.name, dtype.shape, dflt)
@classmethod
def from_type(class_, type, shape=(), dflt=None):
"""Create an Atom from a PyTables type.
Optional shape and default value may be specified as the
shape and dflt arguments, respectively::
>>> Atom.from_type('bool')
BoolAtom(shape=(), dflt=False)
>>> Atom.from_type('int16', shape=(2, 2))
Int16Atom(shape=(2, 2), dflt=0)
>>> Atom.from_type('string40', dflt='hello')
Traceback (most recent call last):
...
ValueError: unknown type: 'string40'
>>> Atom.from_type('Float64')
Traceback (most recent call last):
...
ValueError: unknown type: 'Float64'
"""
if type not in all_types:
raise ValueError("unknown type: %r" % (type,))
kind, itemsize = split_type(type)
return class_.from_kind(kind, itemsize, shape, dflt)
@classmethod
def from_kind(class_, kind, itemsize=None, shape=(), dflt=None):
"""Create an Atom from a PyTables kind.
Optional item size, shape and default value may be
specified as the itemsize, shape and dflt
arguments, respectively. Bear in mind that not all atoms support
a default item size::
>>> Atom.from_kind('int', itemsize=2, shape=(2, 2))
Int16Atom(shape=(2, 2), dflt=0)
>>> Atom.from_kind('int', shape=(2, 2))
Int32Atom(shape=(2, 2), dflt=0)
>>> Atom.from_kind('int', shape=1)
Int32Atom(shape=(1,), dflt=0)
>>> Atom.from_kind('string', dflt=b'hello')
Traceback (most recent call last):
...
ValueError: no default item size for kind ``string``
>>> Atom.from_kind('Float')
Traceback (most recent call last):
...
ValueError: unknown kind: 'Float'
Moreover, some kinds with atypical constructor signatures
are not supported; you need to use the proper
constructor::
>>> Atom.from_kind('enum') #doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: the ``enum`` kind is not supported...
"""
kwargs = {'shape': shape}
if kind not in atom_map:
raise ValueError("unknown kind: %r" % (kind,))
# This incompatibility detection may get out-of-date and is
# too hard-wired, but I couldn't come up with something
# smarter. -- Ivan (2007-02-08)
if kind in ['enum']:
raise ValueError( "the ``%s`` kind is not supported; "
"please use the appropriate constructor"
% kind )
# If no `itemsize` is given, try to get the default type of the
# kind (which has a fixed item size).
if itemsize is None:
if kind not in deftype_from_kind:
raise ValueError( "no default item size for kind ``%s``"
% kind )
type_ = deftype_from_kind[kind]
kind, itemsize = split_type(type_)
kdata = atom_map[kind]
# Look up the class and set a possible item size.
if hasattr(kdata, 'kind'): # atom class: non-fixed item size
atomclass = kdata
kwargs['itemsize'] = itemsize
else: # dictionary: fixed item size
if itemsize not in kdata:
raise _invalid_itemsize_error(kind, itemsize, kdata)
atomclass = kdata[itemsize]
# Only set a `dflt` argument if given (`None` may not be understood).
if dflt is not None:
kwargs['dflt'] = dflt
return atomclass(**kwargs)
# Properties
# ~~~~~~~~~~
size = property(
lambda self: self.dtype.itemsize,
None, None, "Total size in bytes of the atom." )
recarrtype = property(
lambda self: str(self.dtype.shape) + self.dtype.base.str[1:],
None, None, "String type to be used in numpy.rec.array()." )
ndim = property(
lambda self: len(self.shape), None, None,
"""The number of dimensions of the atom.
.. versionadded:: 2.4""")
# Special methods
# ~~~~~~~~~~~~~~~
def __init__(self, nptype, shape, dflt):
if not hasattr(self, 'type'):
raise NotImplementedError( "``%s`` is an abstract class; "
"please use one of its subclasses"
% self.__class__.__name__ )
self.shape = shape = _normalize_shape(shape)
"""The shape of the atom (a tuple for scalar atoms)."""
# Curiously enough, NumPy isn't generally able to accept NumPy
# integers in a shape. ;(
npshape = tuple(int(s) for s in shape)
self.dtype = dtype = numpy.dtype((nptype, npshape))
"""The NumPy dtype that most closely matches this atom."""
self.dflt = _normalize_default(dflt, dtype)
"""The default value of the atom.
If the user does not supply a value for an element while
filling a dataset, this default value will be written to
disk. If the user supplies a scalar value for a
multidimensional atom, this value is automatically *broadcast*
to all the items in the atom cell. If dflt is not supplied, an
appropriate zero value (or *null* string) will be chosen by
default. Please note that default values are kept internally
as NumPy objects."""
def __repr__(self):
args = 'shape=%s, dflt=%r' % (self.shape, self.dflt)
if not hasattr(self.__class__.itemsize, '__int__'): # non-fixed
args = 'itemsize=%s, %s' % (self.itemsize, args)
return '%s(%s)' % (self.__class__.__name__, args)
__eq__ = _cmp_dispatcher('_is_equal_to_atom')
def __ne__(self, other):
return not self.__eq__(other)
# XXX: API incompatible change for PyTables 3 line
# Overriding __eq__ blocks inheritance of __hash__ in 3.x
#def __hash__(self):
# return hash((self.__class__, self.type, self.shape, self.itemsize,
# self.dflt))
# Public methods
# ~~~~~~~~~~~~~~
def copy(self, **override):
"""Get a copy of the atom, possibly overriding some arguments.
Constructor arguments to be overridden must be passed as
keyword arguments::
>>> atom1 = Int32Atom(shape=12)
>>> atom2 = atom1.copy()
>>> print(atom1)
Int32Atom(shape=(12,), dflt=0)
>>> print(atom2)
Int32Atom(shape=(12,), dflt=0)
>>> atom1 is atom2
False
>>> atom3 = atom1.copy(shape=(2, 2))
>>> print(atom3)
Int32Atom(shape=(2, 2), dflt=0)
>>> atom1.copy(foobar=42)
Traceback (most recent call last):
...
TypeError: __init__() got an unexpected keyword argument 'foobar'
"""
newargs = self._get_init_args()
newargs.update(override)
return self.__class__(**newargs)
# Private methods
# ~~~~~~~~~~~~~~~
def _get_init_args(self):
"""Get a dictionary of instance constructor arguments.
This implementation works on classes which use the same names
for both constructor arguments and instance attributes.
"""
return dict( (arg, getattr(self, arg))
for arg in inspect.getargspec(self.__init__)[0]
if arg != 'self' )
def _is_equal_to_atom(self, atom):
"""Is this object equal to the given `atom`?"""
return ( self.type == atom.type and self.shape == atom.shape
and self.itemsize == atom.itemsize
and numpy.all(self.dflt == atom.dflt) )
class StringAtom(Atom):
"""Defines an atom of type string.
The item size is the *maximum* length in characters of strings.
"""
kind = 'string'
itemsize = property(
lambda self: self.dtype.base.itemsize,
None, None, "Size in bytes of a sigle item in the atom." )
type = 'string'
_defvalue = b''
def __init__(self, itemsize, shape=(), dflt=_defvalue):
if not hasattr(itemsize, '__int__') or int(itemsize) < 0:
raise ValueError( "invalid item size for kind ``%s``: %r; "
"it must be a positive integer"
% ('string', itemsize) )
Atom.__init__(self, 'S%d' % itemsize, shape, dflt)
class BoolAtom(Atom):
"""Defines an atom of type bool."""
kind = 'bool'
itemsize = 1
type = 'bool'
_deftype = 'bool8'
_defvalue = False
def __init__(self, shape=(), dflt=_defvalue):
Atom.__init__(self, self.type, shape, dflt)
class IntAtom(Atom):
"""Defines an atom of a signed integral type (int kind)."""
kind = 'int'
signed = True
_deftype = 'int32'
_defvalue = 0
__init__ = _abstract_atom_init(_deftype, _defvalue)
class UIntAtom(Atom):
"""Defines an atom of an unsigned integral type (uint kind)."""
kind = 'uint'
signed = False
_deftype = 'uint32'
_defvalue = 0
__init__ = _abstract_atom_init(_deftype, _defvalue)
class FloatAtom(Atom):
"""Defines an atom of a floating point type (float kind)."""
kind = 'float'
_deftype = 'float64'
_defvalue = 0.0
__init__ = _abstract_atom_init(_deftype, _defvalue)
def _create_numeric_class(baseclass, itemsize):
"""Create a numeric atom class with the given `baseclass` and an
`itemsize`.
"""
prefix = '%s%d' % (baseclass.prefix(), itemsize * 8)
type_ = prefix.lower()
classdict = { 'itemsize': itemsize, 'type': type_,
'__doc__': "Defines an atom of type ``%s``." % type_ }
def __init__(self, shape=(), dflt=baseclass._defvalue):
Atom.__init__(self, self.type, shape, dflt)
classdict['__init__'] = __init__
return type('%sAtom' % prefix, (baseclass,), classdict)
def _generate_integral_classes():
"""Generate all integral classes."""
for baseclass in [IntAtom, UIntAtom]:
for itemsize in [1, 2, 4, 8]:
newclass = _create_numeric_class(baseclass, itemsize)
yield newclass
def _generate_floating_classes():
"""Generate all floating classes."""
itemsizes = [4, 8]
# numpy >= 1.6
if hasattr(numpy, 'float16'):
itemsizes.insert(0, 2)
if hasattr(numpy, 'float96'):
itemsizes.append(12)
if hasattr(numpy, 'float128'):
itemsizes.append(16)
for itemsize in itemsizes:
newclass = _create_numeric_class(FloatAtom, itemsize)
yield newclass
# Create all numeric atom classes.
for _classgen in [_generate_integral_classes, _generate_floating_classes]:
for _newclass in _classgen():
exec '%s = _newclass' % _newclass.__name__
del _classgen, _newclass
class ComplexAtom(Atom):
"""Defines an atom of kind complex.
Allowed item sizes are 8 (single precision) and 16 (double precision). This
class must be used instead of more concrete ones to avoid confusions with
numarray-like precision specifications used in PyTables 1.X.
"""
# This definition is a little more complex (no pun intended)
# because, although the complex kind is a normal numerical one,
# the usage of bottom-level classes is artificially forbidden.
# Everything will be back to normality when people has stopped
# using the old bottom-level complex classes.
kind = 'complex'
itemsize = property(
lambda self: self.dtype.base.itemsize,
None, None, "Size in bytes of a sigle item in the atom." )
_deftype = 'complex128'
_defvalue = 0j
# Only instances have a `type` attribute, so complex types must be
# registered by hand.
all_types.add('complex64')
all_types.add('complex128')
def __init__(self, itemsize, shape=(), dflt=_defvalue):
isizes = [8, 16]
if itemsize not in isizes:
raise _invalid_itemsize_error('complex', itemsize, isizes)
self.type = '%s%d' % (self.kind, itemsize * 8)
Atom.__init__(self, self.type, shape, dflt)
class _ComplexErrorAtom(ComplexAtom):
"""Reminds the user to stop using the old complex atom names."""
__metaclass__ = type # do not register anything about this class
def __init__(self, shape=(), dflt=ComplexAtom._defvalue):
raise TypeError(
"to avoid confusions with PyTables 1.X complex atom names, "
"please use ``ComplexAtom(itemsize=N)``, "
"where N=8 for single precision complex atoms, "
"and N=16 for double precision complex atoms" )
Complex32Atom = Complex64Atom = Complex128Atom = _ComplexErrorAtom
class TimeAtom(Atom):
"""Defines an atom of time type (time kind).
There are two distinct supported types of time: a 32 bit integer value and
a 64 bit floating point value. Both of them reflect the number of seconds
since the Unix epoch. This atom has the property of being stored using the
HDF5 time datatypes.
"""
kind = 'time'
_deftype = 'time32'
_defvalue = 0
__init__ = _abstract_atom_init(_deftype, _defvalue)
class Time32Atom(TimeAtom):
"""Defines an atom of type time32."""
itemsize = 4
type = 'time32'
_defvalue = 0
def __init__(self, shape=(), dflt=_defvalue):
Atom.__init__(self, 'int32', shape, dflt)
class Time64Atom(TimeAtom):
"""Defines an atom of type time64."""
itemsize = 8
type = 'time64'
_defvalue = 0.0
def __init__(self, shape=(), dflt=_defvalue):
Atom.__init__(self, 'float64', shape, dflt)
class EnumAtom(Atom):
"""Description of an atom of an enumerated type.
Instances of this class describe the atom type used to store enumerated
values. Those values belong to an enumerated type, defined by the first
argument (enum) in the constructor of the atom, which accepts the same
kinds of arguments as the Enum class (see :ref:`EnumClassDescr`). The
enumerated type is stored in the enum attribute of the atom.
A default value must be specified as the second argument (dflt) in the
constructor; it must be the *name* (a string) of one of the enumerated
values in the enumerated type. When the atom is created, the corresponding
concrete value is broadcast and stored in the dflt attribute (setting
different default values for items in a multidimensional atom is not
supported yet). If the name does not match any value in the enumerated
type, a KeyError is raised.
Another atom must be specified as the base argument in order to determine
the base type used for storing the values of enumerated values in memory
and disk. This *storage atom* is kept in the base attribute of the created
atom. As a shorthand, you may specify a PyTables type instead of the
storage atom, implying that this has a scalar shape.
The storage atom should be able to represent each and every concrete value
in the enumeration. If it is not, a TypeError is raised. The default value
of the storage atom is ignored.
The type attribute of enumerated atoms is always enum.
Enumerated atoms also support comparisons with other objects::
>>> enum = ['T0', 'T1', 'T2']
>>> atom1 = EnumAtom(enum, 'T0', 'int8') # same as ``atom2``
>>> atom2 = EnumAtom(enum, 'T0', Int8Atom()) # same as ``atom1``
>>> atom3 = EnumAtom(enum, 'T0', 'int16')
>>> atom4 = Int8Atom()
>>> atom1 == enum
False
>>> atom1 == atom2
True
>>> atom2 != atom1
False
>>> atom1 == atom3
False
>>> atom1 == atom4
False
>>> atom4 != atom1
True
Examples
--------
The next C enum construction::
enum myEnum {
T0,
T1,
T2
};
would correspond to the following PyTables
declaration::
>>> myEnumAtom = EnumAtom(['T0', 'T1', 'T2'], 'T0', 'int32')
Please note the dflt argument with a value of 'T0'. Since the concrete
value matching T0 is unknown right now (we have not used explicit concrete
values), using the name is the only option left for defining a default
value for the atom.
The chosen representation of values for this enumerated atom uses unsigned
32-bit integers, which surely wastes quite a lot of memory. Another size
could be selected by using the base argument (this time with a full-blown
storage atom)::
>>> myEnumAtom = EnumAtom(['T0', 'T1', 'T2'], 'T0', UInt8Atom())
You can also define multidimensional arrays for data elements::
>>> myEnumAtom = EnumAtom(
... ['T0', 'T1', 'T2'], 'T0', base='uint32', shape=(3,2))
for 3x2 arrays of uint32.
"""
# Registering this class in the class map may be a little wrong,
# since the ``Atom.from_kind()`` method fails miserably with
# enumerations, as they don't support an ``itemsize`` argument.
# However, resetting ``__metaclass__`` to ``type`` doesn't seem to
# work and I don't feel like creating a subclass of ``MetaAtom``.
kind = 'enum'
type = 'enum'
# Properties
# ~~~~~~~~~~
itemsize = property(
lambda self: self.dtype.base.itemsize,
None, None, "Size in bytes of a sigle item in the atom." )
# Private methods
# ~~~~~~~~~~~~~~~
def _checkBase(self, base):
"""Check the `base` storage atom."""
if base.kind == 'enum':
raise TypeError( "can not use an enumerated atom "
"as a storage atom: %r" % base )
# Check whether the storage atom can represent concrete values
# in the enumeration...
basedtype = base.dtype
pyvalues = [value for (name, value) in self.enum]
try:
npgenvalues = numpy.array(pyvalues)
except ValueError:
raise TypeError("concrete values are not uniformly-shaped")
try:
npvalues = numpy.array(npgenvalues, dtype=basedtype.base)
except ValueError:
raise TypeError( "storage atom type is incompatible with "
"concrete values in the enumeration" )
if npvalues.shape[1:] != basedtype.shape:
raise TypeError( "storage atom shape does not match that of "
"concrete values in the enumeration" )
if npvalues.tolist() != npgenvalues.tolist():
raise TypeError( "storage atom type lacks precision for "
"concrete values in the enumeration" )
# ...with some implementation limitations.
if not npvalues.dtype.kind in ['i', 'u']:
raise NotImplementedError( "only integer concrete values "
"are supported for the moment, sorry" )
if len(npvalues.shape) > 1:
raise NotImplementedError( "only scalar concrete values "
"are supported for the moment, sorry" )
def _get_init_args(self):
"""Get a dictionary of instance constructor arguments."""
return dict( enum=self.enum, dflt=self._defname,
base=self.base, shape=self.shape )
def _is_equal_to_atom(self, atom):
"""Is this object equal to the given `atom`?"""
return False
def _is_equal_to_enumatom(self, enumatom):
"""Is this object equal to the given `enumatom`?"""
return ( self.enum == enumatom.enum and self.shape == enumatom.shape
and numpy.all(self.dflt == enumatom.dflt)
and self.base == enumatom.base )
# Special methods
# ~~~~~~~~~~~~~~~
def __init__(self, enum, dflt, base, shape=()):
if not isinstance(enum, Enum):
enum = Enum(enum)
self.enum = enum
if isinstance(base, str):
base = Atom.from_type(base)
self._checkBase(base)
self.base = base
default = enum[dflt] # check default value
self._defname = dflt # kept for representation purposes
# These are kept to ease dumping this particular
# representation of the enumeration to storage.
names, values = [], []
for (name, value) in enum:
names.append(name)
values.append(value)
basedtype = self.base.dtype
self._names = names
self._values = numpy.array(values, dtype=basedtype.base)
Atom.__init__(self, basedtype, shape, default)
def __repr__(self):
return ( 'EnumAtom(enum=%r, dflt=%r, base=%r, shape=%r)'
% (self.enum, self._defname, self.base, self.shape) )
__eq__ = _cmp_dispatcher('_is_equal_to_enumatom')
# XXX: API incompatible change for PyTables 3 line
# Overriding __eq__ blocks inheritance of __hash__ in 3.x
#def __hash__(self):
# return hash((self.__class__, self.enum, self.shape, self.dflt,
# self.base))
# Pseudo-atom classes
# ===================
#
# Now, there come three special classes, `ObjectAtom`, `VLStringAtom`
# and `VLUnicodeAtom`, that actually do not descend from `Atom`, but
# which goal is so similar that they should be described here.
# Pseudo-atoms can only be used with `VLArray` datasets, and they do
# not support multidimensional values, nor multiple values per row.
#
# They can be recognised because they also have ``kind``, ``type`` and
# ``shape`` attributes, but no ``size``, ``itemsize`` or ``dflt``
# ones. Instead, they have a ``base`` atom which defines the elements
# used for storage.
#
# See ``examples/vlarray1.py`` and ``examples/vlarray2.py`` for
# further examples on `VLArray` datasets, including object
# serialization and string management.
class PseudoAtom(object):
"""Pseudo-atoms can only be used in ``VLArray`` nodes.
They can be recognised because they also have `kind`, `type` and
`shape` attributes, but no `size`, `itemsize` or `dflt` ones.
Instead, they have a `base` atom which defines the elements used
for storage.
"""
def __repr__(self):
return '%s()' % self.__class__.__name__
def toarray(self, object_):
"""Convert an `object_` into an array of base atoms."""
raise NotImplementedError
def fromarray(self, array):
"""Convert an `array` of base atoms into an object."""
raise NotImplementedError
class _BufferedAtom(PseudoAtom):
"""Pseudo-atom which stores data as a buffer (flat array of uints)."""
shape = ()
def toarray(self, object_):
buffer_ = self._tobuffer(object_)
array = numpy.ndarray( buffer=buffer_, dtype=self.base.dtype,
shape=len(buffer_) )
return array
def _tobuffer(self, object_):
"""Convert an `object_` into a buffer."""
raise NotImplementedError
class VLStringAtom(_BufferedAtom):
"""Defines an atom of type ``vlstring``.
This class describes a *row* of the VLArray class, rather than an atom. It
differs from the StringAtom class in that you can only add *one instance of
it to one specific row*, i.e. the :meth:`VLArray.append` method only
accepts one object when the base atom is of this type.
Like StringAtom, this class does not make assumptions on the encoding of
the string, and raw bytes are stored as is. Unicode strings are supported
as long as no character is out of the ASCII set; otherwise, you will need
to *explicitly* convert them to strings before you can save them. For full
Unicode support, using VLUnicodeAtom (see :ref:`VLUnicodeAtom`) is
recommended.
Variable-length string atoms do not accept parameters and they cause the
reads of rows to always return Python strings. You can regard vlstring
atoms as an easy way to save generic variable length strings.
"""
kind = 'vlstring'
type = 'vlstring'
base = UInt8Atom()
def _tobuffer(self, object_):
if not isinstance(object_, basestring):
raise TypeError("object is not a string: %r" % (object_,))
return numpy.string_(object_)
def fromarray(self, array):
return array.tostring()
class VLUnicodeAtom(_BufferedAtom):
"""Defines an atom of type vlunicode.
This class describes a *row* of the VLArray class, rather than an atom. It
is very similar to VLStringAtom (see :ref:`VLStringAtom`), but it stores
Unicode strings (using 32-bit characters a la UCS-4, so all strings of the
same length also take up the same space).
This class does not make assumptions on the encoding of plain input
strings. Plain strings are supported as long as no character is out of the
ASCII set; otherwise, you will need to *explicitly* convert them to Unicode
before you can save them.
Variable-length Unicode atoms do not accept parameters and they cause the
reads of rows to always return Python Unicode strings. You can regard
vlunicode atoms as an easy way to save variable length Unicode strings.
"""
kind = 'vlunicode'
type = 'vlunicode'
base = UInt32Atom()
if sys.version_info[0] > 2 or sys.maxunicode <= 0xffff:
# numpy.unicode_ no more implements the buffer interface in Python 3
#
# When the Python build is UCS-2, we need to promote the
# Unicode string to UCS-4. We *must* use a 0-d array since
# NumPy scalars inherit the UCS-2 encoding from Python (see
# NumPy ticket #525). Since ``_tobuffer()`` can't return an
# array, we must override ``toarray()`` itself.
def toarray(self, object_):
if not isinstance(object_, basestring):
raise TypeError("object is not a string: %r" % (object_,))
ustr = unicode(object_)
uarr = numpy.array(ustr, dtype='U')
return numpy.ndarray(
buffer=uarr, dtype=self.base.dtype, shape=len(ustr) )
def _tobuffer(self, object_):
# This works (and is used) only with UCS-4 builds of Python,
# where the width of the internal representation of a
# character matches that of the base atoms.
if not isinstance(object_, basestring):
raise TypeError("object is not a string: %r" % (object_,))
return numpy.unicode_(object_)
def fromarray(self, array):
length = len(array)
if length == 0:
return u'' # ``array.view('U0')`` raises a `TypeError`
return array.view('U%d' % length).item()
class ObjectAtom(_BufferedAtom):
"""Defines an atom of type object.
This class is meant to fit *any* kind of Python object in a row of a
VLArray dataset by using pickle behind the scenes. Due to the fact that
you can not foresee how long will be the output of the pickle
serialization (i.e. the atom already has a *variable* length), you can only
fit *one object per row*. However, you can still group several objects in a
single tuple or list and pass it to the :meth:`VLArray.append` method.
Object atoms do not accept parameters and they cause the reads of rows to
always return Python objects. You can regard object atoms as an easy way to
save an arbitrary number of generic Python objects in a VLArray dataset.
"""
kind = 'object'
type = 'object'
base = UInt8Atom()
def _tobuffer(self, object_):
return cPickle.dumps(object_, cPickle.HIGHEST_PROTOCOL)
def fromarray(self, array):
# We have to check for an empty array because of a possible
# bug in HDF5 which makes it claim that a dataset has one
# record when in fact it is empty.
if array.size == 0:
return None
return cPickle.loads(array.tostring())
# Main part
# =========
def _test():
"""Run ``doctest`` on this module."""
import doctest
doctest.testmod()
if __name__ == '__main__':
_test()
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Commonly used utils in pandas-on-Spark.
"""
import functools
from collections import OrderedDict
from contextlib import contextmanager
import os
from typing import ( # noqa: F401 (SPARK-34943)
Any,
Callable,
Dict,
Iterable,
Iterator,
List,
Optional,
Tuple,
Union,
TYPE_CHECKING,
cast,
no_type_check,
overload,
)
import warnings
from pyspark.sql import functions as F, Column, DataFrame as SparkDataFrame, SparkSession
from pyspark.sql.types import DoubleType
import pandas as pd
from pandas.api.types import is_list_like
# For running doctests and reference resolution in PyCharm.
from pyspark import pandas as ps # noqa: F401
from pyspark.pandas._typing import Axis, DataFrameOrSeries
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef.typehints import as_spark_type
if TYPE_CHECKING:
# This is required in old Python 3.5 to prevent circular reference.
from pyspark.pandas.base import IndexOpsMixin # noqa: F401 (SPARK-34943)
from pyspark.pandas.frame import DataFrame # noqa: F401 (SPARK-34943)
from pyspark.pandas.internal import InternalFrame # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
ERROR_MESSAGE_CANNOT_COMBINE = (
"Cannot combine the series or dataframe because it comes from a different dataframe. "
"In order to allow this operation, enable 'compute.ops_on_diff_frames' option."
)
SPARK_CONF_ARROW_ENABLED = "spark.sql.execution.arrow.pyspark.enabled"
def same_anchor(
this: Union["DataFrame", "IndexOpsMixin", "InternalFrame"],
that: Union["DataFrame", "IndexOpsMixin", "InternalFrame"],
) -> bool:
"""
Check if the anchors of the given DataFrame or Series are the same or not.
"""
from pyspark.pandas.base import IndexOpsMixin
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.internal import InternalFrame
if isinstance(this, InternalFrame):
this_internal = this
else:
assert isinstance(this, (DataFrame, IndexOpsMixin)), type(this)
this_internal = this._internal
if isinstance(that, InternalFrame):
that_internal = that
else:
assert isinstance(that, (DataFrame, IndexOpsMixin)), type(that)
that_internal = that._internal
return (
this_internal.spark_frame is that_internal.spark_frame
and this_internal.index_level == that_internal.index_level
and all(
spark_column_equals(this_scol, that_scol)
for this_scol, that_scol in zip(
this_internal.index_spark_columns, that_internal.index_spark_columns
)
)
)
def combine_frames(
this: "DataFrame",
*args: DataFrameOrSeries,
how: str = "full",
preserve_order_column: bool = False
) -> "DataFrame":
"""
This method combines `this` DataFrame with a different `that` DataFrame or
Series from a different DataFrame.
It returns a DataFrame that has prefix `this_` and `that_` to distinct
the columns names from both DataFrames
It internally performs a join operation which can be expensive in general.
So, if `compute.ops_on_diff_frames` option is False,
this method throws an exception.
"""
from pyspark.pandas.config import get_option
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.internal import (
InternalField,
InternalFrame,
HIDDEN_COLUMNS,
NATURAL_ORDER_COLUMN_NAME,
SPARK_INDEX_NAME_FORMAT,
)
from pyspark.pandas.series import Series
if all(isinstance(arg, Series) for arg in args):
assert all(
same_anchor(arg, args[0]) for arg in args
), "Currently only one different DataFrame (from given Series) is supported"
assert not same_anchor(this, args[0]), "We don't need to combine. All series is in this."
that = args[0]._psdf[list(args)]
elif len(args) == 1 and isinstance(args[0], DataFrame):
assert isinstance(args[0], DataFrame)
assert not same_anchor(
this, args[0]
), "We don't need to combine. `this` and `that` are same."
that = args[0]
else:
raise AssertionError("args should be single DataFrame or " "single/multiple Series")
if get_option("compute.ops_on_diff_frames"):
def resolve(internal: InternalFrame, side: str) -> InternalFrame:
rename = lambda col: "__{}_{}".format(side, col)
internal = internal.resolved_copy
sdf = internal.spark_frame
sdf = internal.spark_frame.select(
*[
scol_for(sdf, col).alias(rename(col))
for col in sdf.columns
if col not in HIDDEN_COLUMNS
],
*HIDDEN_COLUMNS
)
return internal.copy(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, rename(col)) for col in internal.index_spark_column_names
],
index_fields=[
field.copy(name=rename(field.name)) for field in internal.index_fields
],
data_spark_columns=[
scol_for(sdf, rename(col)) for col in internal.data_spark_column_names
],
data_fields=[field.copy(name=rename(field.name)) for field in internal.data_fields],
)
this_internal = resolve(this._internal, "this")
that_internal = resolve(that._internal, "that")
this_index_map = list(
zip(
this_internal.index_spark_column_names,
this_internal.index_names,
this_internal.index_fields,
)
)
that_index_map = list(
zip(
that_internal.index_spark_column_names,
that_internal.index_names,
that_internal.index_fields,
)
)
assert len(this_index_map) == len(that_index_map)
join_scols = []
merged_index_scols = []
# Note that the order of each element in index_map is guaranteed according to the index
# level.
this_and_that_index_map = list(zip(this_index_map, that_index_map))
this_sdf = this_internal.spark_frame.alias("this")
that_sdf = that_internal.spark_frame.alias("that")
# If the same named index is found, that's used.
index_column_names = []
index_use_extension_dtypes = []
for (
i,
((this_column, this_name, this_field), (that_column, that_name, that_field)),
) in enumerate(this_and_that_index_map):
if this_name == that_name:
# We should merge the Spark columns into one
# to mimic pandas' behavior.
this_scol = scol_for(this_sdf, this_column)
that_scol = scol_for(that_sdf, that_column)
join_scol = this_scol == that_scol
join_scols.append(join_scol)
column_name = SPARK_INDEX_NAME_FORMAT(i)
index_column_names.append(column_name)
index_use_extension_dtypes.append(
any(field.is_extension_dtype for field in [this_field, that_field])
)
merged_index_scols.append(
F.when(this_scol.isNotNull(), this_scol).otherwise(that_scol).alias(column_name)
)
else:
raise ValueError("Index names must be exactly matched currently.")
assert len(join_scols) > 0, "cannot join with no overlapping index names"
joined_df = this_sdf.join(that_sdf, on=join_scols, how=how)
if preserve_order_column:
order_column = [scol_for(this_sdf, NATURAL_ORDER_COLUMN_NAME)]
else:
order_column = []
joined_df = joined_df.select(
*merged_index_scols,
*(
scol_for(this_sdf, this_internal.spark_column_name_for(label))
for label in this_internal.column_labels
),
*(
scol_for(that_sdf, that_internal.spark_column_name_for(label))
for label in that_internal.column_labels
),
*order_column
)
index_spark_columns = [scol_for(joined_df, col) for col in index_column_names]
index_columns = set(index_column_names)
new_data_columns = [
col
for col in joined_df.columns
if col not in index_columns and col != NATURAL_ORDER_COLUMN_NAME
]
schema = joined_df.select(*index_spark_columns, *new_data_columns).schema
index_fields = [
InternalField.from_struct_field(struct_field, use_extension_dtypes=use_extension_dtypes)
for struct_field, use_extension_dtypes in zip(
schema.fields[: len(index_spark_columns)], index_use_extension_dtypes
)
]
data_fields = [
InternalField.from_struct_field(
struct_field, use_extension_dtypes=field.is_extension_dtype
)
for struct_field, field in zip(
schema.fields[len(index_spark_columns) :],
this_internal.data_fields + that_internal.data_fields,
)
]
level = max(this_internal.column_labels_level, that_internal.column_labels_level)
def fill_label(label: Optional[Tuple]) -> List:
if label is None:
return ([""] * (level - 1)) + [None]
else:
return ([""] * (level - len(label))) + list(label)
column_labels = [
tuple(["this"] + fill_label(label)) for label in this_internal.column_labels
] + [tuple(["that"] + fill_label(label)) for label in that_internal.column_labels]
column_label_names = (
cast(List[Optional[Tuple]], [None]) * (1 + level - this_internal.column_labels_level)
) + this_internal.column_label_names
return DataFrame(
InternalFrame(
spark_frame=joined_df,
index_spark_columns=index_spark_columns,
index_names=this_internal.index_names,
index_fields=index_fields,
column_labels=column_labels,
data_spark_columns=[scol_for(joined_df, col) for col in new_data_columns],
data_fields=data_fields,
column_label_names=column_label_names,
)
)
else:
raise ValueError(ERROR_MESSAGE_CANNOT_COMBINE)
def align_diff_frames(
resolve_func: Callable[
["DataFrame", List[Tuple], List[Tuple]], Iterator[Tuple["Series", Tuple]]
],
this: "DataFrame",
that: "DataFrame",
fillna: bool = True,
how: str = "full",
preserve_order_column: bool = False,
) -> "DataFrame":
"""
This method aligns two different DataFrames with a given `func`. Columns are resolved and
handled within the given `func`.
To use this, `compute.ops_on_diff_frames` should be True, for now.
:param resolve_func: Takes aligned (joined) DataFrame, the column of the current DataFrame, and
the column of another DataFrame. It returns an iterable that produces Series.
>>> from pyspark.pandas.config import set_option, reset_option
>>>
>>> set_option("compute.ops_on_diff_frames", True)
>>>
>>> psdf1 = ps.DataFrame({'a': [9, 8, 7, 6, 5, 4, 3, 2, 1]})
>>> psdf2 = ps.DataFrame({'a': [9, 8, 7, 6, 5, 4, 3, 2, 1]})
>>>
>>> def func(psdf, this_column_labels, that_column_labels):
... psdf # conceptually this is A + B.
...
... # Within this function, Series from A or B can be performed against `psdf`.
... this_label = this_column_labels[0] # this is ('a',) from psdf1.
... that_label = that_column_labels[0] # this is ('a',) from psdf2.
... new_series = (psdf[this_label] - psdf[that_label]).rename(str(this_label))
...
... # This new series will be placed in new DataFrame.
... yield (new_series, this_label)
>>>
>>>
>>> align_diff_frames(func, psdf1, psdf2).sort_index()
a
0 0
1 0
2 0
3 0
4 0
5 0
6 0
7 0
8 0
>>> reset_option("compute.ops_on_diff_frames")
:param this: a DataFrame to align
:param that: another DataFrame to align
:param fillna: If True, it fills missing values in non-common columns in both `this` and `that`.
Otherwise, it returns as are.
:param how: join way. In addition, it affects how `resolve_func` resolves the column conflict.
- full: `resolve_func` should resolve only common columns from 'this' and 'that' DataFrames.
For instance, if 'this' has columns A, B, C and that has B, C, D, `this_columns` and
'that_columns' in this function are B, C and B, C.
- left: `resolve_func` should resolve columns including that columns.
For instance, if 'this' has columns A, B, C and that has B, C, D, `this_columns` is
B, C but `that_columns` are B, C, D.
- inner: Same as 'full' mode; however, internally performs inner join instead.
:return: Aligned DataFrame
"""
from pyspark.pandas.frame import DataFrame
assert how == "full" or how == "left" or how == "inner"
this_column_labels = this._internal.column_labels
that_column_labels = that._internal.column_labels
common_column_labels = set(this_column_labels).intersection(that_column_labels)
# 1. Perform the join given two dataframes.
combined = combine_frames(this, that, how=how, preserve_order_column=preserve_order_column)
# 2. Apply the given function to transform the columns in a batch and keep the new columns.
combined_column_labels = combined._internal.column_labels
that_columns_to_apply = [] # type: List[Tuple]
this_columns_to_apply = [] # type: List[Tuple]
additional_that_columns = [] # type: List[Tuple]
columns_to_keep = [] # type: List[Union[Series, Column]]
column_labels_to_keep = [] # type: List[Tuple]
for combined_label in combined_column_labels:
for common_label in common_column_labels:
if combined_label == tuple(["this", *common_label]):
this_columns_to_apply.append(combined_label)
break
elif combined_label == tuple(["that", *common_label]):
that_columns_to_apply.append(combined_label)
break
else:
if how == "left" and combined_label in [
tuple(["that", *label]) for label in that_column_labels
]:
# In this case, we will drop `that_columns` in `columns_to_keep` but passes
# it later to `func`. `func` should resolve it.
# Note that adding this into a separate list (`additional_that_columns`)
# is intentional so that `this_columns` and `that_columns` can be paired.
additional_that_columns.append(combined_label)
elif fillna:
columns_to_keep.append(SF.lit(None).cast(DoubleType()).alias(str(combined_label)))
column_labels_to_keep.append(combined_label)
else:
columns_to_keep.append(combined._psser_for(combined_label))
column_labels_to_keep.append(combined_label)
that_columns_to_apply += additional_that_columns
# Should extract columns to apply and do it in a batch in case
# it adds new columns for example.
if len(this_columns_to_apply) > 0 or len(that_columns_to_apply) > 0:
psser_set, column_labels_set = zip(
*resolve_func(combined, this_columns_to_apply, that_columns_to_apply)
)
columns_applied = list(psser_set) # type: List[Union[Series, Column]]
column_labels_applied = list(column_labels_set) # type: List[Tuple]
else:
columns_applied = []
column_labels_applied = []
applied = DataFrame(
combined._internal.with_new_columns(
columns_applied + columns_to_keep,
column_labels=column_labels_applied + column_labels_to_keep,
)
) # type: DataFrame
# 3. Restore the names back and deduplicate columns.
this_labels = OrderedDict()
# Add columns in an order of its original frame.
for this_label in this_column_labels:
for new_label in applied._internal.column_labels:
if new_label[1:] not in this_labels and this_label == new_label[1:]:
this_labels[new_label[1:]] = new_label
# After that, we will add the rest columns.
other_labels = OrderedDict()
for new_label in applied._internal.column_labels:
if new_label[1:] not in this_labels:
other_labels[new_label[1:]] = new_label
psdf = applied[list(this_labels.values()) + list(other_labels.values())]
psdf.columns = psdf.columns.droplevel()
return psdf
def is_testing() -> bool:
"""Indicates whether Spark is currently running tests."""
return "SPARK_TESTING" in os.environ
def default_session(conf: Optional[Dict[str, Any]] = None) -> SparkSession:
if conf is None:
conf = dict()
builder = SparkSession.builder.appName("pandas-on-Spark")
for key, value in conf.items():
builder = builder.config(key, value)
# Currently, pandas-on-Spark is dependent on such join due to 'compute.ops_on_diff_frames'
# configuration. This is needed with Spark 3.0+.
builder.config("spark.sql.analyzer.failAmbiguousSelfJoin", False)
if is_testing():
builder.config("spark.executor.allowSparkContext", False)
return builder.getOrCreate()
@contextmanager
def sql_conf(pairs: Dict[str, Any], *, spark: Optional[SparkSession] = None) -> Iterator[None]:
"""
A convenient context manager to set `value` to the Spark SQL configuration `key` and
then restores it back when it exits.
"""
assert isinstance(pairs, dict), "pairs should be a dictionary."
if spark is None:
spark = default_session()
keys = pairs.keys()
new_values = pairs.values()
old_values = [spark.conf.get(key, None) for key in keys]
for key, new_value in zip(keys, new_values):
spark.conf.set(key, new_value)
try:
yield
finally:
for key, old_value in zip(keys, old_values):
if old_value is None:
spark.conf.unset(key)
else:
spark.conf.set(key, old_value)
def validate_arguments_and_invoke_function(
pobj: Union[pd.DataFrame, pd.Series],
pandas_on_spark_func: Callable,
pandas_func: Callable,
input_args: Dict,
) -> Any:
"""
Invokes a pandas function.
This is created because different versions of pandas support different parameters, and as a
result when we code against the latest version, our users might get a confusing
"got an unexpected keyword argument" error if they are using an older version of pandas.
This function validates all the arguments, removes the ones that are not supported if they
are simply the default value (i.e. most likely the user didn't explicitly specify it). It
throws a TypeError if the user explicitly specify an argument that is not supported by the
pandas version available.
For example usage, look at DataFrame.to_html().
:param pobj: the pandas DataFrame or Series to operate on
:param pandas_on_spark_func: pandas-on-Spark function, used to get default parameter values
:param pandas_func: pandas function, used to check whether pandas supports all the arguments
:param input_args: arguments to pass to the pandas function, often created by using locals().
Make sure locals() call is at the top of the function so it captures only
input parameters, rather than local variables.
:return: whatever pandas_func returns
"""
import inspect
# Makes a copy since whatever passed in is likely created by locals(), and we can't delete
# 'self' key from that.
args = input_args.copy()
del args["self"]
if "kwargs" in args:
# explode kwargs
kwargs = args["kwargs"]
del args["kwargs"]
args = {**args, **kwargs}
pandas_on_spark_params = inspect.signature(pandas_on_spark_func).parameters
pandas_params = inspect.signature(pandas_func).parameters
for param in pandas_on_spark_params.values():
if param.name not in pandas_params:
if args[param.name] == param.default:
del args[param.name]
else:
raise TypeError(
(
"The pandas version [%s] available does not support parameter '%s' "
+ "for function '%s'."
)
% (pd.__version__, param.name, pandas_func.__name__)
)
args["self"] = pobj
return pandas_func(**args)
@no_type_check
def lazy_property(fn: Callable[[Any], Any]) -> property:
"""
Decorator that makes a property lazy-evaluated.
Copied from https://stevenloria.com/lazy-properties/
"""
attr_name = "_lazy_" + fn.__name__
@property
@functools.wraps(fn)
def wrapped_lazy_property(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, fn(self))
return getattr(self, attr_name)
def deleter(self):
if hasattr(self, attr_name):
delattr(self, attr_name)
return wrapped_lazy_property.deleter(deleter)
def scol_for(sdf: SparkDataFrame, column_name: str) -> Column:
"""Return Spark Column for the given column name."""
return sdf["`{}`".format(column_name)]
def column_labels_level(column_labels: List[Tuple]) -> int:
"""Return the level of the column index."""
if len(column_labels) == 0:
return 1
else:
levels = set(1 if label is None else len(label) for label in column_labels)
assert len(levels) == 1, levels
return list(levels)[0]
def name_like_string(name: Optional[Union[Any, Tuple]]) -> str:
"""
Return the name-like strings from str or tuple of str
Examples
--------
>>> name = 'abc'
>>> name_like_string(name)
'abc'
>>> name = ('abc',)
>>> name_like_string(name)
'abc'
>>> name = ('a', 'b', 'c')
>>> name_like_string(name)
'(a, b, c)'
"""
if name is None:
name = ("__none__",)
elif is_list_like(name):
name = tuple([str(n) for n in name])
else:
name = (str(name),)
return ("(%s)" % ", ".join(name)) if len(name) > 1 else name[0]
def is_name_like_tuple(value: Any, allow_none: bool = True, check_type: bool = False) -> bool:
"""
Check the given tuple is be able to be used as a name.
Examples
--------
>>> is_name_like_tuple(('abc',))
True
>>> is_name_like_tuple((1,))
True
>>> is_name_like_tuple(('abc', 1, None))
True
>>> is_name_like_tuple(('abc', 1, None), check_type=True)
True
>>> is_name_like_tuple((1.0j,))
True
>>> is_name_like_tuple(tuple())
False
>>> is_name_like_tuple((list('abc'),))
False
>>> is_name_like_tuple(('abc', 1, None), allow_none=False)
False
>>> is_name_like_tuple((1.0j,), check_type=True)
False
"""
if value is None:
return allow_none
elif not isinstance(value, tuple):
return False
elif len(value) == 0:
return False
elif not allow_none and any(v is None for v in value):
return False
elif any(is_list_like(v) or isinstance(v, slice) for v in value):
return False
elif check_type:
return all(
v is None or as_spark_type(type(v), raise_error=False) is not None for v in value
)
else:
return True
def is_name_like_value(
value: Any, allow_none: bool = True, allow_tuple: bool = True, check_type: bool = False
) -> bool:
"""
Check the given value is like a name.
Examples
--------
>>> is_name_like_value('abc')
True
>>> is_name_like_value(1)
True
>>> is_name_like_value(None)
True
>>> is_name_like_value(('abc',))
True
>>> is_name_like_value(1.0j)
True
>>> is_name_like_value(list('abc'))
False
>>> is_name_like_value(None, allow_none=False)
False
>>> is_name_like_value(('abc',), allow_tuple=False)
False
>>> is_name_like_value(1.0j, check_type=True)
False
"""
if value is None:
return allow_none
elif isinstance(value, tuple):
return allow_tuple and is_name_like_tuple(
value, allow_none=allow_none, check_type=check_type
)
elif is_list_like(value) or isinstance(value, slice):
return False
elif check_type:
return as_spark_type(type(value), raise_error=False) is not None
else:
return True
def validate_axis(axis: Optional[Axis] = 0, none_axis: int = 0) -> int:
"""Check the given axis is valid."""
# convert to numeric axis
axis = cast(Dict[Optional[Axis], int], {None: none_axis, "index": 0, "columns": 1}).get(
axis, axis
)
if axis in (none_axis, 0, 1):
return cast(int, axis)
else:
raise ValueError("No axis named {0}".format(axis))
def validate_bool_kwarg(value: Any, arg_name: str) -> Optional[bool]:
"""Ensures that argument passed in arg_name is of type bool."""
if not (isinstance(value, bool) or value is None):
raise TypeError(
'For argument "{}" expected type bool, received '
"type {}.".format(arg_name, type(value).__name__)
)
return value
def validate_how(how: str) -> str:
"""Check the given how for join is valid."""
if how == "full":
warnings.warn(
"Warning: While pandas-on-Spark will accept 'full', you should use 'outer' "
+ "instead to be compatible with the pandas merge API",
UserWarning,
)
if how == "outer":
# 'outer' in pandas equals 'full' in Spark
how = "full"
if how not in ("inner", "left", "right", "full"):
raise ValueError(
"The 'how' parameter has to be amongst the following values: ",
"['inner', 'left', 'right', 'outer']",
)
return how
@overload
def verify_temp_column_name(df: SparkDataFrame, column_name_or_label: str) -> str:
...
@overload
def verify_temp_column_name(
df: "DataFrame", column_name_or_label: Union[Any, Tuple]
) -> Union[Any, Tuple]:
...
def verify_temp_column_name(
df: Union["DataFrame", SparkDataFrame], column_name_or_label: Union[Any, Tuple]
) -> Union[Any, Tuple]:
"""
Verify that the given column name does not exist in the given pandas-on-Spark or
Spark DataFrame.
The temporary column names should start and end with `__`. In addition, `column_name_or_label`
expects a single string, or column labels when `df` is a pandas-on-Spark DataFrame.
>>> psdf = ps.DataFrame({("x", "a"): ['a', 'b', 'c']})
>>> psdf["__dummy__"] = 0
>>> psdf[("", "__dummy__")] = 1
>>> psdf # doctest: +NORMALIZE_WHITESPACE
x __dummy__
a __dummy__
0 a 0 1
1 b 0 1
2 c 0 1
>>> verify_temp_column_name(psdf, '__tmp__')
('__tmp__', '')
>>> verify_temp_column_name(psdf, ('', '__tmp__'))
('', '__tmp__')
>>> verify_temp_column_name(psdf, '__dummy__')
Traceback (most recent call last):
...
AssertionError: ... `(__dummy__, )` ...
>>> verify_temp_column_name(psdf, ('', '__dummy__'))
Traceback (most recent call last):
...
AssertionError: ... `(, __dummy__)` ...
>>> verify_temp_column_name(psdf, 'dummy')
Traceback (most recent call last):
...
AssertionError: ... should be empty or start and end with `__`: ('dummy', '')
>>> verify_temp_column_name(psdf, ('', 'dummy'))
Traceback (most recent call last):
...
AssertionError: ... should be empty or start and end with `__`: ('', 'dummy')
>>> internal = psdf._internal.resolved_copy
>>> sdf = internal.spark_frame
>>> sdf.select(internal.data_spark_columns).show() # doctest: +NORMALIZE_WHITESPACE
+------+---------+-------------+
|(x, a)|__dummy__|(, __dummy__)|
+------+---------+-------------+
| a| 0| 1|
| b| 0| 1|
| c| 0| 1|
+------+---------+-------------+
>>> verify_temp_column_name(sdf, '__tmp__')
'__tmp__'
>>> verify_temp_column_name(sdf, '__dummy__')
Traceback (most recent call last):
...
AssertionError: ... `__dummy__` ... '(x, a)', '__dummy__', '(, __dummy__)', ...
>>> verify_temp_column_name(sdf, ('', '__dummy__'))
Traceback (most recent call last):
...
AssertionError: <class 'tuple'>
>>> verify_temp_column_name(sdf, 'dummy')
Traceback (most recent call last):
...
AssertionError: ... should start and end with `__`: dummy
"""
from pyspark.pandas.frame import DataFrame
if isinstance(df, DataFrame):
if isinstance(column_name_or_label, str):
column_name = column_name_or_label
level = df._internal.column_labels_level
column_name_or_label = tuple([column_name_or_label] + ([""] * (level - 1)))
else:
column_name = name_like_string(column_name_or_label)
assert any(len(label) > 0 for label in column_name_or_label) and all(
label == "" or (label.startswith("__") and label.endswith("__"))
for label in column_name_or_label
), "The temporary column name should be empty or start and end with `__`: {}".format(
column_name_or_label
)
assert all(
column_name_or_label != label for label in df._internal.column_labels
), "The given column name `{}` already exists in the pandas-on-Spark DataFrame: {}".format(
name_like_string(column_name_or_label), df.columns
)
df = df._internal.resolved_copy.spark_frame
else:
assert isinstance(column_name_or_label, str), type(column_name_or_label)
assert column_name_or_label.startswith("__") and column_name_or_label.endswith(
"__"
), "The temporary column name should start and end with `__`: {}".format(
column_name_or_label
)
column_name = column_name_or_label
assert isinstance(df, SparkDataFrame), type(df)
assert (
column_name not in df.columns
), "The given column name `{}` already exists in the Spark DataFrame: {}".format(
column_name, df.columns
)
return column_name_or_label
def spark_column_equals(left: Column, right: Column) -> bool:
"""
Check both `left` and `right` have the same expressions.
>>> spark_column_equals(SF.lit(0), SF.lit(0))
True
>>> spark_column_equals(SF.lit(0) + 1, SF.lit(0) + 1)
True
>>> spark_column_equals(SF.lit(0) + 1, SF.lit(0) + 2)
False
>>> sdf1 = ps.DataFrame({"x": ['a', 'b', 'c']}).to_spark()
>>> spark_column_equals(sdf1["x"] + 1, sdf1["x"] + 1)
True
>>> sdf2 = ps.DataFrame({"x": ['a', 'b', 'c']}).to_spark()
>>> spark_column_equals(sdf1["x"] + 1, sdf2["x"] + 1)
False
"""
return left._jc.equals(right._jc) # type: ignore
def compare_null_first(
left: Column,
right: Column,
comp: Callable[[Column, Column], Column],
) -> Column:
return (left.isNotNull() & right.isNotNull() & comp(left, right)) | (
left.isNull() & right.isNotNull()
)
def compare_null_last(
left: Column,
right: Column,
comp: Callable[[Column, Column], Column],
) -> Column:
return (left.isNotNull() & right.isNotNull() & comp(left, right)) | (
left.isNotNull() & right.isNull()
)
def compare_disallow_null(
left: Column,
right: Column,
comp: Callable[[Column, Column], Column],
) -> Column:
return left.isNotNull() & right.isNotNull() & comp(left, right)
def compare_allow_null(
left: Column,
right: Column,
comp: Callable[[Column, Column], Column],
) -> Column:
return left.isNull() | right.isNull() | comp(left, right)
def _test() -> None:
import os
import doctest
import sys
from pyspark.sql import SparkSession
import pyspark.pandas.utils
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.utils.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]").appName("pyspark.pandas.utils tests").getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.utils,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| |
#!/usr/bin/python2
#
# Copyright 2018 Google LLC
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""Utility functions useful for CSV data sources."""
from __future__ import print_function
__author__ = 'Benjamin Yolken <yolken@google.com>'
import csv
import re
import string
import warnings
import data_source
def _HeaderToColumn(header_string):
"""Parse the header string for a column.
Args:
header_string: The complete string for the column header
Returns:
A DataColumn object populated based on the header data
Raises:
DataSourceError: If there are any errors in parsing, e.g. if an unrecognized
key is found.
"""
# The column id must be at least one character long, and cannot contain the
# characters '[', ']', ';', or whitespace
parameters_match = re.match(
'^([^\]\[;\s]+)(?:\[(.*)\]){0,1}$',
header_string.strip().replace('"', ''))
if not parameters_match:
raise data_source.DataSourceError(
'Formatting error for header string: %s' % header_string)
column_id = parameters_match.group(1)
column = data_source.DataSourceColumn(column_id, internal_parameters={})
if parameters_match.group(2):
# Parse the column parameters
key_value_pairs = parameters_match.group(2).split(';')
for key_value_pair in key_value_pairs:
try:
[key, value] = key_value_pair.split('=')
except ValueError:
raise data_source.DataSourceError(
'Formatting error for header string: %s' % header_string)
# Map the key to the appropriate field of the DataSourceColumn object
if key == 'type':
if value not in ['date', 'float', 'integer', 'string']:
raise data_source.DataSourceError(
'Unknown data type for column %s: %s' %
(column.column_id, value))
column.data_type = value
elif key == 'format':
column.data_format = value
elif key == 'concept':
column.concept_ref = value
elif key == 'extends':
column.concept_extension = value
elif key == 'parent':
column.parent_ref = value
elif key == 'slice_role':
role_value = value.lower()
if role_value not in ['dimension', 'metric']:
raise data_source.DataSourceError(
'Unrecognized slice_roll in column %s: %s' %
(column.column_id, value))
else:
column.slice_role = role_value
elif key == 'rollup':
if value.lower() == 'true':
column.rollup = True
elif value.lower() == 'false':
column.rollup = False
else:
raise data_source.DataSourceError(
'Unrecognized boolean value in column %s: %s' %
(column.column_id, value))
elif key == 'total_val':
column.total_val = value
elif key == 'dropif':
column.internal_parameters['dropif_val'] = value
elif key == 'zeroif':
column.internal_parameters['zeroif_val'] = value
elif key == 'aggregation':
if string.lower(value) not in ['sum', 'max', 'min', 'avg', 'count']:
raise data_source.DataSourceError(
'Unknown aggregation for column %s: %s' %
(column.column_id, value))
column.internal_parameters['aggregation'] = value
else:
raise data_source.DataSourceError(
'Unknown parameter for column %s: %s' %
(column.column_id, key))
return column
def ConstructColumnBundle(csv_file, verbose=True):
"""Construct a ColumnBundle from the header information in a CSV file.
Args:
csv_file: The complete string for the column header
verbose: Print out extra information to stdout
Returns:
A data_source.ColumnBundle object populated based on the CSV header
Raises:
DataSourceError: If there are any parsing errors or data
inconsistencies
"""
# Get the first and second rows of the CSV
header_csv_reader = csv.reader(csv_file, delimiter=',', quotechar='"')
header_row_values = next(header_csv_reader)
second_row_values = next(header_csv_reader)
csv_file.seek(0)
# Check that second row is properly formatted
if len(header_row_values) != len(second_row_values):
raise data_source.DataSourceError(
'Number of columns in row 2 (%d) does not match number '
'expected (%d)' % (len(second_row_values), len(header_row_values)))
column_bundle = data_source.DataSourceColumnBundle()
for header_element in header_row_values:
column_bundle.AddColumn(_HeaderToColumn(header_element))
num_date_columns = 0
has_metric_column = False
column_ids = [column.column_id for column in
column_bundle.GetColumnIterator()]
# Iterate through columns, populating and refining DataSourceColumn
# parameters as necessary
for c, column in enumerate(column_bundle.GetColumnIterator()):
if verbose:
print('\nEvaluating column %s' % column.column_id)
# Check data type
if not column.data_type:
column.data_type = (
data_source.GuessDataType(second_row_values[c], column.column_id))
if verbose:
print('Guessing that column %s is of type %s' % (
column.column_id, column.data_type))
# Check slice type
if not column.slice_role:
if column.data_type == 'integer' or column.data_type == 'float':
column.slice_role = 'metric'
else:
column.slice_role = 'dimension'
if verbose:
print('Guessing that column %s is a %s' % (
column.column_id, column.slice_role))
# Check aggregation
if column.slice_role == 'metric':
has_metric_column = True
if 'aggregation' not in column.internal_parameters:
column.internal_parameters['aggregation'] = 'SUM'
if verbose:
print('Guessing that column %s should be aggregated by %s' % (
column.column_id, column.internal_parameters['aggregation']))
# Check parent
if column.parent_ref:
if column.parent_ref not in column_ids:
raise data_source.DataSourceError(
'Column %s references a parent not defined in this dataset: %s' %
(column.column_id, column.parent_ref))
parent_column = column_bundle.GetColumnByID(column.parent_ref)
if not parent_column.rollup:
parent_column.rollup = True
if verbose:
print('Making column %s rollup since it is a parent to column %s'
% (parent_column.column_id, column.column_id))
# Check date format and concept
if column.data_type == 'date':
num_date_columns += 1
if not column.data_format:
column.data_format = (
data_source.GuessDateFormat(second_row_values[c]))
if not column.concept_ref:
column.concept_ref = (
data_source.GuessDateConcept(column.data_format))
if verbose:
print('Guessing that column %s is formatted as %s and '
'corresponds to %s' % (
column.column_id, column.data_format, column.concept_ref))
# Warn user if their file will not produce interesting DSPL visualizations
if num_date_columns == 0:
warnings.warn('Input file does not have a date column',
data_source.DataSourceWarning)
elif num_date_columns > 1:
warnings.warn('Input file has more than one date column',
data_source.DataSourceWarning)
if not has_metric_column:
warnings.warn('Input file does not have any metrics',
data_source.DataSourceWarning)
return column_bundle
| |
class Node:
def __init__(self):
self.labelIndex = -1
self.name = 'Top'
self.children = []
self.parent = -1
self.subsize = 0
self.depth = 0
self.level = 0
def get_all_node_ids(self, name_2_id, cache):
cache.append(self.name)
if self.labelIndex != -1:
name_2_id['\\'.join(cache[1:])] = self.labelIndex
for c in self.children:
c.get_all_node_ids(name_2_id, cache)
cache.pop()
def get_all_node_ids_label_id(self, name_2_id, cache):
cache.append(str(self.labelIndex))
if self.labelIndex != -1:
name_2_id['\\'.join(cache[1:])] = self.labelIndex
for c in self.children:
c.get_all_node_ids_label_id(name_2_id, cache)
cache.pop()
def get_all_node_ids_2(self, id_paths, cache):
cache.append(str(self.labelIndex))
if self.labelIndex != -1:
id_paths.append('/'.join(cache[1:]))
for c in self.children:
c.get_all_node_ids_2(id_paths, cache)
cache.pop()
def sort_label_ids(self, old_2_new_mapper, new_2_old_mapper):
q = []
for ch in self.children:
q.append(ch)
id = 0
while q != []:
nd = q.pop(0)
#record mapping
old_2_new_mapper[nd.labelIndex] = id
new_2_old_mapper[id] = nd.labelIndex
#change label
nd.labelIndex = id
id += 1
for ch in nd.children:
q.append(ch)
def add(self, child):
self.children.append(child)
child.parent = self
child.depth = self.depth + 1
def find_child(self, nm):
for c in self.children:
if c.name == nm:
return c
return -1
def get_tree_size(self):
sum = 1
for i in self.children:
sum += i.get_tree_size()
self.subsize = sum
return sum
def get_each_subtree_size(self, sub_sizes):
sum = 1
for i in self.children:
v = i.get_each_subtree_size(sub_sizes)
sub_sizes[i.labelIndex] = v
sum += v
self.subsize = sum
return sum
def get_subtree_size(self):
for c in self.children:
c.subsize = c.get_tree_size()
def get_subtree_weight(self,weight):
for c in self.children:
ratio = float(c.subsize) / c.parent.subsize
weight[c.labelIndex] = ratio
c.get_subtree_weight(weight)
def read_dmoz_tree(self, file):
root = Node()
fd = open(file)
for line in fd:
line = line.replace('\n', '')
line = line.split('\t')
path = line[0]
id = line[1]
path = path.split('/')
nd = root
for p in path:
ret = nd.find_child(p)
if ret == -1:
t_nd = Node()
t_nd.labelIndex = int(id)
t_nd.name = p
nd.add(t_nd)
nd = t_nd
else:
nd = ret
fd.close()
return root
def read_dmoz_ontology_tree(self, file):
#create nodes
nodes = []
for i in range(100000):
nd = Node()
nd.labelIndex = i-1
nodes.append(nd)
fd = open(file)
for line in fd:
labels = line.strip().split('\t')
parent = int(labels[0])
child = int(labels[1])
nodes[parent].add(nodes[child])
fd.close()
return nodes[0]
def read_ohsumed_tree(self, file):
fd= open(file)
id = 0
top = Node()
for line in fd:
line = line.replace('\n', '')
line = line.split('\\')
nd = top
for l in line:
c = nd.find_child(l)
if c == -1:
new_nd = Node()
new_nd.labelIndex = id
new_nd.name = l
nd.add(new_nd)
nd = new_nd
else:
nd = c
id += 1
fd.close()
return top
def read_wipo_tree(self, file, max_depth):
root = Node()
fd = open(file)
id = 0
for line in fd:
line = line.strip()
path = line.split('\\')
nd = root
for p in path[:max_depth]:
ret = nd.find_child(p)
if ret == -1:
t_nd = Node()
t_nd.labelIndex = int(id)
id += 1
t_nd.name = p
nd.add(t_nd)
nd = t_nd
else:
nd = ret
fd.close()
return root
def read_tree(self,file):
#read all string
fd=open(file)
ct=fd.read()
fd.close()
#create nodes
nodes = []
for i in range(50000):
nd = Node()
nd.labelIndex = i-1
nodes.append(nd)
#begin parse
ct = ct.replace('#','')
ct_nodes = ct.split('|')
for n in ct_nodes:
if n is not '' and n != '\n':
ct_ns = n.split(',')
labelIndex = int(ct_ns[0])
for id in ct_ns[1:(len(ct_ns)-1)]:
nodes[labelIndex].add(nodes[int(id)])
return nodes[0]
def read_parent_child_pair_tree(self, file):
#create nodes
nodes = {}
fd = open(file)
for line in fd:
line = line.replace('\n','')
line = line.split(',')
parent = int(line[0])
child = int(line[1])
if parent not in nodes:
nd = Node()
nd.labelIndex = parent
nodes[parent] = nd
if child not in nodes:
nd = Node()
nd.labelIndex = child
nodes[child] = nd
nodes[parent].add(nodes[child])
fd.close()
root = Node()
roots = []
for label in nodes.keys():
if nodes[label].parent == -1:
roots.append(label)
if len(roots) == 1:
root = nodes[roots[0]]
else:
for label in roots:
root.add(nodes[label])
return root, nodes
#added by xiao 06012012
#get number of potencial subtrees expanded from root
def get_total_subtrees(self):
if len(self.children) == 0:
return 1
else:
p = 1
for c in self.children:
v = 1 + c.get_total_subtrees()
p *= v
return p
def write_tree(self, file):
q = [self]
out_str = ''
while q != []:
top = q.pop(0)
if top.children != []:
out_str += str(top.labelIndex+1) + ','
for nd in top.children:
out_str += str(nd.labelIndex +1) + ','
q.append(nd)
out_str += '|'
out_str += '#'
fd = open(file,'w')
fd.write(out_str)
fd.close()
def get_ancestor_nodes(self, ancestors):
for c in self.children:
n = c.parent
while n.labelIndex is not -1:
ancestors[c.labelIndex].append(n.labelIndex)
n = n.parent
c.get_ancestor_nodes(ancestors)
def get_parents(self, parents):
for c in self.children:
parents[c.labelIndex] = c.parent.labelIndex
c.get_parents(parents)
def get_nodes_per_level(self, nodes):
for c in self.children:
nodes[c.depth].append(c.labelIndex)
c.get_nodes_per_level(nodes)
def get_nodes_per_level(self, depth, nodes):
for c in self.children:
nodes[depth].append(c.labelIndex)
c.get_nodes_per_level(depth + 1, nodes)
def get_max_level(self):
level = 0
for c in self.children:
if c.get_max_level() + 1 > level:
level = c.get_max_level() + 1
return level
def get_subtree_level(self):
for c in self.children:
c.level = c.get_max_level()
def get_leaves(self, leaves):
for c in self.children:
if c.children == []:
leaves.append(c)
else:
c.get_leaves(leaves)
def get_node_depth(self, depth_seq):
for c in self.children:
depth_seq[c.labelIndex] = c.depth
c.get_node_depth(depth_seq)
def find_leaves_in_list(self, st, leaves):
if self.labelIndex != -1:
if self.labelIndex in st:
is_leaf = 1
for c in self.children:
if c.labelIndex in st:
is_leaf = 0
break
if is_leaf == 1:
leaves.add(self.labelIndex)
for c in self.children:
c.find_leaves_in_list(st, leaves)
else:
for c in self.children:
c.find_leaves_in_list(st, leaves)
def readUnlabeledDocIDs(file):
fd=open('al_dmoz_unlabeled_doc_ids','r')
undocids=set()
for line in fd:
line.replace('\n','')
undocids.add(int(line))
fd.close()
return undocids
def writeUnlabeledDocIDs(in_doc_ids, selected_doc_ids, out_file):
remain = in_doc_ids - selected_doc_ids
fd = open(out_file, 'w')
for x in remain:
fd.write(x + '\n')
fd.close()
def sampleUnlabeledDocs(usedDocIDs, num, feature_file_in, feature_file_out, label_file_in, label_file_out):
#sample num of docs from doc id set
sampleDocIDs = set()
if num > len(usedDocIDs):
sampleDocIDs = usedDocIDs
else:
import random
sampleDocIDs = random.sample(usedDocIDs, num)
#open file for sample
feature_fd = open(feature_file_in)
label_fd = open(label_file_in)
sampled_doc_ids = []
feature_buf = []
label_buf = []
n = 0
for line_label in label_fd:
line_feature = feature_fd.readline()
#check doc id
a = line_label.index(' ')
id = line_label[:a]
id = int(id)
if id in sampleDocIDs:
feature_buf.append(line_feature)
label_buf.append(line_label)
sampled_doc_ids.append(id)
n+=1
#if get all docs, then break
if n == num:
break
feature_fd.close()
label_fd.close()
feature_fd_w = open(feature_file_out,'w')
label_fd_w = open(label_file_out,'w')
for i in range(len(feature_buf)):
feature_fd_w.write(feature_buf[i])
label_fd_w.write(label_buf[i])
feature_fd_w.close()
label_fd_w.close()
return sampled_doc_ids
def HU_score(root, weight, probs):
hu = 0
for c in root.children:
labelIndex = root.children.labelIndex
#compute uncertainty
p = probs[labelIndex]
u = abs(p - 0.5)
#accumulate uncertainty
hu += u * weight[labelIndex]
hu += HU_score(c, weight, probs)
return hu
def active_learning(root, weight, unlabeled_doc_ids, select_num, file_base):
#read all probs
num = root.get_tree_size()
probs = []
for i in range(num):
fd = open(str(i) + '_' + file_base)
ps = []
for line in fd:
line = line.replace('\n','')
ps.append(float(line))
fd.close()
probs.append(ps)
#begin active learning
vs = []
for i in range(len(probs[0])):
p = []
for j in range(num):
p.append(probs[j][i])
v = HU_score(root, weight, p)
vs.append((unlabeled_doc_ids[i], v))
vs = sorted(vs, key=lambda s:s[1],reverse=True)
#return selected doc ids
return [vs[i][0] for i in range(select_num)]
def localize_selected_docs(parents, selected_doc_ids, sampled_features_file, sampled_labels_file, feature_file_base, label_file_base):
#init
newdocs = []
newlabels = []
label_size = root.get_tree_size()
for i in label_size:
newdocs.append([])
newlabels.append([])
#localize data
fd = open(sampled_labels_file)
for line in fd:
line = line.replace('\n','')
id = int(line[0])
#get labels
labels = line[2:]
labels.append(-1)
labels = set(labels)
for labelIndex in range(label_size):
#is positive
if labelIndex in labels:
newdocs[labelIndex].append(id)
newlabels[labelIndex].append(1)
#is negative
elif parents[labelIndex] in labels:
newdocs[labelIndex].append(id)
newlabels[labelIndex].append(-1)
fd.close()
#read in whole dataset
docs = {}
fd = open(sampled_features_file)
for line in fd:
a = line.index(' ')
id = line[:a]
docs[int(id)] = line
fd.close()
#append data at the end of each node's dataset
for labelIndex in range(label_size):
if newdocs[labelIndex] is not []:
#append features
fd = open(str(labelIndex) + '_' + feature_file_base,'a')
for nd in newdocs[labelIndex]:
fd.write(docs[nd])
fd.close()
#append labels
fd = open(str(labelIndex) + '_' + label_file_base,'a')
for nl in newlabels[labelIndex]:
fd.write(nl + '\n')
fd.close()
return newdocs
def flag_selection(label_size, localized_docs, file_base):
for i in range(label_size):
fd = open(str(i) + '_' + file_base)
if localized_docs[i] is not []:
fd.write('CHANGED')
else:
fd.write('NO_CHANGED')
fd.close()
"""
root = Node()
root = root.read_tree('dmoz_ont_ge_3_hierarchy.txt')
cache = []
n2ids = {}
root.get_all_node_ids_label_id(n2ids, cache)
fd = open('dmoz_ont_hier_path.txt', 'w')
ks = n2ids.keys()
ks.sort()
for k in ks:
fd.write(k + '\n')
fd.close()
"""
"""
top=Node()
root = top.read_tree('dmoz_hierarchy.txt')
depth = root.get_max_level()
label_size = root.get_tree_size()
label_size -= 1
#get ancesotr nodes and subtree weight
ancestors = []
parents = []
weight = []
nodes_per_level = [[] for i in range(depth+1)]
for i in range(label_size):
ancestors.append([])
weight.append([])
parents.append([])
root.get_nodes_per_level(1, nodes_per_level)
root.get_subtree_weight(weight)
root.get_ancestor_nodes(ancestors)
root.get_parents(parents)
root.get_subtree_size()
root.get_subtree_level()
levels = [root.children[i].level for i in nodes_per_level[1]]
sizes = [root.children[i].subsize for i in nodes_per_level[1]]
"""
| |
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.utils.tfxio_utils."""
import inspect
import tempfile
from typing import Callable, Iterator
from absl.testing import parameterized
import pyarrow as pa
import tensorflow as tf
from tfx.components.experimental.data_view import constants
from tfx.components.util import examples_utils
from tfx.components.util import tfxio_utils
from tfx.proto import example_gen_pb2
from tfx.types import standard_artifacts
from tfx_bsl.coders import tf_graph_record_decoder
from tfx_bsl.tfxio import raw_tf_record
from tfx_bsl.tfxio import record_based_tfxio
from tfx_bsl.tfxio import record_to_tensor_tfxio
from tfx_bsl.tfxio import tf_example_record
from tfx_bsl.tfxio import tf_sequence_example_record
from google.protobuf import text_format
from tensorflow_metadata.proto.v0 import schema_pb2
_RAW_RECORD_COLUMN_NAME = 'raw_record'
_MAKE_TFXIO_TEST_CASES = [
dict(
testcase_name='tf_example_record',
payload_format=example_gen_pb2.PayloadFormat.FORMAT_TF_EXAMPLE,
expected_tfxio_type=tf_example_record.TFExampleRecord),
dict(
testcase_name='tf_example_record_also_read_raw_records',
payload_format=example_gen_pb2.PayloadFormat.FORMAT_TF_EXAMPLE,
raw_record_column_name=_RAW_RECORD_COLUMN_NAME,
expected_tfxio_type=tf_example_record.TFExampleRecord),
dict(
testcase_name='tf_example_record_default_payload_format',
payload_format=None,
expected_tfxio_type=tf_example_record.TFExampleRecord),
dict(
testcase_name='tf_sequence_example_record',
payload_format=example_gen_pb2.PayloadFormat.FORMAT_TF_SEQUENCE_EXAMPLE,
expected_tfxio_type=tf_sequence_example_record.TFSequenceExampleRecord),
dict(
testcase_name='proto_with_data_view',
payload_format=example_gen_pb2.PayloadFormat.FORMAT_PROTO,
provide_data_view_uri=True,
expected_tfxio_type=record_to_tensor_tfxio.TFRecordToTensorTFXIO),
dict(
testcase_name='tf_example_raw_record',
payload_format=example_gen_pb2.PayloadFormat.FORMAT_TF_EXAMPLE,
read_as_raw_records=True,
raw_record_column_name=_RAW_RECORD_COLUMN_NAME,
expected_tfxio_type=raw_tf_record.RawTfRecordTFXIO),
dict(
testcase_name='proto_raw_record',
payload_format=example_gen_pb2.PayloadFormat.FORMAT_PROTO,
read_as_raw_records=True,
raw_record_column_name=_RAW_RECORD_COLUMN_NAME,
expected_tfxio_type=raw_tf_record.RawTfRecordTFXIO),
]
_RESOLVE_TEST_CASES = [
dict(
testcase_name='tf_example',
payload_formats=[example_gen_pb2.PayloadFormat.FORMAT_TF_EXAMPLE] * 2,
expected_payload_format=example_gen_pb2.PayloadFormat.FORMAT_TF_EXAMPLE,
expected_data_view_uri=None,
),
dict(
testcase_name='proto_with_data_view',
payload_formats=[example_gen_pb2.PayloadFormat.FORMAT_PROTO] * 3,
data_view_uris=['dataview1', 'dataview3', 'dataview2'],
data_view_create_times=['1', '3', '2'],
expected_payload_format=example_gen_pb2.PayloadFormat.FORMAT_PROTO,
expected_data_view_uri='dataview3',
),
dict(
testcase_name='proto_with_data_view_big_create_time',
payload_formats=[example_gen_pb2.PayloadFormat.FORMAT_PROTO] * 3,
data_view_uris=['dataview1', 'dataview3', 'dataview2'],
data_view_create_times=['1', '9', '1000000000009'],
expected_payload_format=example_gen_pb2.PayloadFormat.FORMAT_PROTO,
expected_data_view_uri='dataview2',
),
dict(
testcase_name='proto_with_data_view_int_create_time',
payload_formats=[example_gen_pb2.PayloadFormat.FORMAT_PROTO] * 3,
data_view_uris=['dataview1', 'dataview3', 'dataview2'],
data_view_create_times=[1, 3, 2],
expected_payload_format=example_gen_pb2.PayloadFormat.FORMAT_PROTO,
expected_data_view_uri='dataview3',
),
dict(
testcase_name='proto_without_data_view',
payload_formats=[example_gen_pb2.PayloadFormat.FORMAT_PROTO] * 3,
expected_payload_format=example_gen_pb2.PayloadFormat.FORMAT_PROTO,
expected_data_view_uri=None,
),
dict(
testcase_name='mixed_payload_formats',
payload_formats=[example_gen_pb2.PayloadFormat.FORMAT_TF_EXAMPLE,
example_gen_pb2.PayloadFormat.FORMAT_PROTO],
expected_error_type=ValueError,
expected_error_msg_regex='different payload formats'
),
dict(
testcase_name='proto_with_missing_data_view',
payload_formats=[example_gen_pb2.PayloadFormat.FORMAT_PROTO] * 3,
data_view_uris=['dataview1', None, 'dataview2'],
data_view_create_times=[1, None, 2],
expected_error_type=ValueError,
expected_data_view_uri='did not have DataView attached',
),
dict(
testcase_name='empty_input',
payload_formats=[],
expected_error_type=AssertionError,
expected_data_view_uri='At least one',
)
]
_FAKE_FILE_PATTERN = '/input/data'
_TELEMETRY_DESCRIPTORS = ['my', 'component']
_SCHEMA = text_format.Parse("""
feature {
name: "foo"
type: INT
}
""", schema_pb2.Schema())
class _SimpleTfGraphRecordDecoder(tf_graph_record_decoder.TFGraphRecordDecoder):
"""A simple DataView Decoder used for testing."""
def decode_record(self, record):
indices = tf.transpose(
tf.stack([
tf.range(tf.size(record), dtype=tf.int64),
tf.zeros(tf.size(record), dtype=tf.int64)
]))
return {
'sparse_tensor':
tf.SparseTensor(
values=record,
indices=indices,
dense_shape=[tf.size(record), 1])
}
class TfxioUtilsTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(*_MAKE_TFXIO_TEST_CASES)
def test_make_tfxio(self, payload_format, expected_tfxio_type,
raw_record_column_name=None,
provide_data_view_uri=False,
read_as_raw_records=False):
if provide_data_view_uri and tf.__version__ < '2':
self.skipTest('DataView is not supported under TF 1.x.')
if payload_format is None:
payload_format = 'FORMAT_TF_EXAMPLE'
data_view_uri = None
if provide_data_view_uri:
data_view_uri = tempfile.mkdtemp(dir=self.get_temp_dir())
tf_graph_record_decoder.save_decoder(_SimpleTfGraphRecordDecoder(),
data_view_uri)
tfxio = tfxio_utils.make_tfxio(
_FAKE_FILE_PATTERN, _TELEMETRY_DESCRIPTORS, payload_format,
data_view_uri, _SCHEMA, read_as_raw_records, raw_record_column_name)
self.assertIsInstance(tfxio, expected_tfxio_type)
# We currently only create RecordBasedTFXIO and the check below relies on
# that.
self.assertIsInstance(tfxio, record_based_tfxio.RecordBasedTFXIO)
self.assertEqual(tfxio.telemetry_descriptors, _TELEMETRY_DESCRIPTORS)
self.assertEqual(tfxio.raw_record_column_name, raw_record_column_name)
# Since we provide a schema, ArrowSchema() should not raise.
_ = tfxio.ArrowSchema()
@parameterized.named_parameters(*_MAKE_TFXIO_TEST_CASES)
def test_get_tfxio_factory_from_artifact(self,
payload_format,
expected_tfxio_type,
raw_record_column_name=None,
provide_data_view_uri=False,
read_as_raw_records=False):
if provide_data_view_uri and tf.__version__ < '2':
self.skipTest('DataView is not supported under TF 1.x.')
examples = standard_artifacts.Examples()
if payload_format is not None:
examples_utils.set_payload_format(examples, payload_format)
data_view_uri = None
if provide_data_view_uri:
data_view_uri = tempfile.mkdtemp(dir=self.get_temp_dir())
tf_graph_record_decoder.save_decoder(_SimpleTfGraphRecordDecoder(),
data_view_uri)
if data_view_uri is not None:
examples.set_string_custom_property(constants.DATA_VIEW_URI_PROPERTY_KEY,
data_view_uri)
examples.set_string_custom_property(constants.DATA_VIEW_CREATE_TIME_KEY,
'1')
tfxio_factory = tfxio_utils.get_tfxio_factory_from_artifact(
[examples],
_TELEMETRY_DESCRIPTORS,
_SCHEMA,
read_as_raw_records,
raw_record_column_name)
tfxio = tfxio_factory(_FAKE_FILE_PATTERN)
self.assertIsInstance(tfxio, expected_tfxio_type)
# We currently only create RecordBasedTFXIO and the check below relies on
# that.
self.assertIsInstance(tfxio, record_based_tfxio.RecordBasedTFXIO)
self.assertEqual(tfxio.telemetry_descriptors, _TELEMETRY_DESCRIPTORS)
self.assertEqual(tfxio.raw_record_column_name, raw_record_column_name)
# Since we provide a schema, ArrowSchema() should not raise.
_ = tfxio.ArrowSchema()
def test_get_tfxio_factory_from_artifact_data_view_legacy(self):
# This tests FORMAT_PROTO with data view where the DATA_VIEW_CREATE_TIME_KEY
# is an int value. This is a legacy property type and should be string type
# in the future.
if tf.__version__ < '2':
self.skipTest('DataView is not supported under TF 1.x.')
examples = standard_artifacts.Examples()
examples_utils.set_payload_format(
examples, example_gen_pb2.PayloadFormat.FORMAT_PROTO)
data_view_uri = tempfile.mkdtemp(dir=self.get_temp_dir())
tf_graph_record_decoder.save_decoder(_SimpleTfGraphRecordDecoder(),
data_view_uri)
examples.set_string_custom_property(constants.DATA_VIEW_URI_PROPERTY_KEY,
data_view_uri)
examples.set_int_custom_property(constants.DATA_VIEW_CREATE_TIME_KEY, '1')
tfxio_factory = tfxio_utils.get_tfxio_factory_from_artifact(
[examples],
_TELEMETRY_DESCRIPTORS,
_SCHEMA,
read_as_raw_records=False,
raw_record_column_name=None)
tfxio = tfxio_factory(_FAKE_FILE_PATTERN)
self.assertIsInstance(tfxio, record_to_tensor_tfxio.TFRecordToTensorTFXIO)
# We currently only create RecordBasedTFXIO and the check below relies on
# that.
self.assertIsInstance(tfxio, record_based_tfxio.RecordBasedTFXIO)
self.assertEqual(tfxio.telemetry_descriptors, _TELEMETRY_DESCRIPTORS)
# Since we provide a schema, ArrowSchema() should not raise.
_ = tfxio.ArrowSchema()
@parameterized.named_parameters(*_RESOLVE_TEST_CASES)
def test_resolve_payload_format_and_data_view_uri(
self,
payload_formats,
data_view_uris=None,
data_view_create_times=None,
expected_payload_format=None,
expected_data_view_uri=None,
expected_error_type=None,
expected_error_msg_regex=None):
examples = []
if data_view_uris is None:
data_view_uris = [None] * len(payload_formats)
if data_view_create_times is None:
data_view_create_times = [None] * len(payload_formats)
for payload_format, data_view_uri, data_view_create_time in zip(
payload_formats, data_view_uris, data_view_create_times):
artifact = standard_artifacts.Examples()
examples_utils.set_payload_format(artifact, payload_format)
if data_view_uri is not None:
artifact.set_string_custom_property(
constants.DATA_VIEW_URI_PROPERTY_KEY, data_view_uri)
if data_view_create_time is not None:
if isinstance(data_view_create_time, int):
artifact_setter_fn = artifact.set_int_custom_property
else:
artifact_setter_fn = artifact.set_string_custom_property
artifact_setter_fn(constants.DATA_VIEW_CREATE_TIME_KEY,
data_view_create_time)
examples.append(artifact)
if expected_error_type is None:
payload_format, data_view_uri = (
tfxio_utils.resolve_payload_format_and_data_view_uri(examples))
self.assertEqual(payload_format, expected_payload_format)
self.assertEqual(data_view_uri, expected_data_view_uri)
else:
with self.assertRaisesRegex(
expected_error_type, expected_error_msg_regex):
_ = tfxio_utils.resolve_payload_format_and_data_view_uri(examples)
def test_get_tf_dataset_factory_from_artifact(self):
examples = standard_artifacts.Examples()
examples_utils.set_payload_format(
examples, example_gen_pb2.PayloadFormat.FORMAT_TF_EXAMPLE)
dataset_factory = tfxio_utils.get_tf_dataset_factory_from_artifact(
[examples], _TELEMETRY_DESCRIPTORS)
self.assertIsInstance(dataset_factory, Callable)
self.assertEqual(tf.data.Dataset,
inspect.signature(dataset_factory).return_annotation)
def test_get_record_batch_factory_from_artifact(self):
examples = standard_artifacts.Examples()
examples_utils.set_payload_format(
examples, example_gen_pb2.PayloadFormat.FORMAT_TF_EXAMPLE)
record_batch_factory = tfxio_utils.get_record_batch_factory_from_artifact(
[examples], _TELEMETRY_DESCRIPTORS)
self.assertIsInstance(record_batch_factory, Callable)
self.assertEqual(Iterator[pa.RecordBatch],
inspect.signature(record_batch_factory).return_annotation)
def test_raise_if_data_view_uri_not_available(self):
examples = standard_artifacts.Examples()
examples_utils.set_payload_format(
examples, example_gen_pb2.PayloadFormat.FORMAT_PROTO)
with self.assertRaisesRegex(AssertionError, 'requires a DataView'):
tfxio_utils.get_tfxio_factory_from_artifact(
[examples], _TELEMETRY_DESCRIPTORS)(_FAKE_FILE_PATTERN)
def test_raise_if_read_as_raw_but_raw_column_name_not_provided(self):
examples = standard_artifacts.Examples()
with self.assertRaisesRegex(AssertionError,
'must provide raw_record_column_name'):
tfxio_utils.get_tfxio_factory_from_artifact(
[examples], _TELEMETRY_DESCRIPTORS, read_as_raw_records=True)(
_FAKE_FILE_PATTERN)
if __name__ == '__main__':
tf.test.main()
| |
# Copyright (C) 2016 SignalFx, Inc. All rights reserved.
from . import errors, messages
class Computation(object):
"""A live handle to a running SignalFlow computation."""
STATE_UNKNOWN = 0
STATE_STREAM_STARTED = 1
STATE_COMPUTATION_STARTED = 2
STATE_DATA_RECEIVED = 3
STATE_COMPLETED = 4
STATE_ABORTED = 5
def __init__(self, exec_fn):
self._id = None
self._exec_fn = exec_fn
self._stream = None
self._state = Computation.STATE_UNKNOWN
self._resolution = None
self._num_input_timeseries = 0
self._metadata = {}
self._last_logical_ts = None
self._expected_batches = 0
self._batch_count_detected = False
self._current_batch_message = None
self._current_batch_count = 0
self._find_matched_no_timeseries = False
self._find_limited_resultset = False
self._find_matched_size = 0
self._find_limit_size = 0
self._group_by_missing_property = False
self._group_by_missing_properties = []
# Kick it off.
self._stream = self._execute()
def _execute(self):
return self._exec_fn(self._last_logical_ts)
@property
def id(self):
return self._id
@property
def resolution(self):
return self._resolution
@property
def num_input_timeseries(self):
return self._num_input_timeseries
@property
def state(self):
return self._state
@property
def last_logical_ts(self):
return self._last_logical_ts
@property
def find_matched_no_timeseries(self):
return self._find_matched_no_timeseries
@property
def find_limited_resultset(self):
return self._find_limited_resultset
@property
def find_matched_size(self):
return self._find_matched_size
@property
def find_limit_size(self):
return self._find_limit_size
@property
def group_by_missing_property(self):
return self._group_by_missing_property
@property
def group_by_missing_properties(self):
return self._group_by_missing_properties
def close(self):
"""Manually close this computation and detach from its stream.
This computation object cannot be restarted, used or streamed for after
this method is called."""
self._state = Computation.STATE_COMPLETED
if self._stream:
self._stream.close()
self._stream = None
def get_known_tsids(self):
return sorted(self._metadata.keys())
def get_metadata(self, tsid):
"""Return the full metadata object for the given timeseries (by its
ID), if available."""
return self._metadata.get(tsid)
def stream(self):
"""Iterate over the messages from the computation's output.
Control and metadata messages are intercepted and interpreted to
enhance this Computation's object knowledge of the computation's
context. Data and event messages are yielded back to the caller as a
generator.
"""
iterator = iter(self._stream)
while self._state < Computation.STATE_COMPLETED:
try:
message = next(iterator)
except StopIteration:
if self._state < Computation.STATE_COMPLETED:
self._stream = self._execute()
iterator = iter(self._stream)
continue
break
if isinstance(message, messages.StreamStartMessage):
self._state = Computation.STATE_STREAM_STARTED
continue
if isinstance(message, messages.JobStartMessage):
self._state = Computation.STATE_COMPUTATION_STARTED
self._id = message.handle
yield message
continue
if isinstance(message, messages.JobProgressMessage):
yield message
continue
if isinstance(message, messages.ChannelAbortMessage):
self._state = Computation.STATE_ABORTED
raise errors.ComputationAborted(message.abort_info)
if isinstance(message, messages.EndOfChannelMessage):
self._state = Computation.STATE_COMPLETED
continue
# Intercept metadata messages to accumulate received metadata...
if isinstance(message, messages.MetadataMessage):
self._metadata[message.tsid] = message.properties
yield message
continue
# ...as well as expired-tsid messages to clean it up.
if isinstance(message, messages.ExpiredTsIdMessage):
if message.tsid in self._metadata:
del self._metadata[message.tsid]
yield message
continue
if isinstance(message, messages.InfoMessage):
self._process_info_message(message.message)
self._batch_count_detected = True
yield message
if self._current_batch_message:
yield self._get_batch_to_yield()
continue
# Accumulate data messages and release them when we have received
# all batches for the same logical timestamp.
if isinstance(message, messages.DataMessage):
self._state = Computation.STATE_DATA_RECEIVED
if not self._batch_count_detected:
self._expected_batches += 1
if not self._current_batch_message:
self._current_batch_message = message
self._current_batch_count = 1
elif (message.logical_timestamp_ms ==
self._current_batch_message.logical_timestamp_ms):
self._current_batch_message.add_data(message.data)
self._current_batch_count += 1
else:
self._batch_count_detected = True
if (self._batch_count_detected and
self._current_batch_count == self._expected_batches):
yield self._get_batch_to_yield()
continue
if isinstance(message, messages.EventMessage):
yield message
continue
if isinstance(message, messages.ErrorMessage):
raise errors.ComputationFailed(message.errors)
# Yield last batch, even if potentially incomplete.
if self._current_batch_message:
yield self._get_batch_to_yield()
def _process_info_message(self, message):
"""Process an information message received from the computation."""
# Extract the output resolution from the appropriate message, if
# it's present.
contents = message.get('contents', None)
if message['messageCode'] == 'JOB_RUNNING_RESOLUTION':
self._resolution = contents['resolutionMs']
elif message['messageCode'] == 'FETCH_NUM_TIMESERIES':
self._num_input_timeseries += int(message['numInputTimeSeries'])
elif message['messageCode'] == 'FIND_MATCHED_NO_TIMESERIES':
self._find_matched_no_timeseries = True
elif message['messageCode'] == 'FIND_LIMITED_RESULT_SET':
self._find_limited_resultset = True
self._find_matched_size = contents['matchedSize']
self._find_limit_size = contents['limitSize']
elif message['messageCode'] == 'GROUPBY_MISSING_PROPERTY':
self._group_by_missing_property = True
self._group_by_missing_properties = contents['propertyNames']
def _get_batch_to_yield(self):
to_yield = self._current_batch_message
self._current_batch_message = None
self._current_batch_count = 0
self._last_logical_ts = to_yield.logical_timestamp_ms
return to_yield
| |
# -*- coding:utf-8 -*-
# This file comes from https://github.com/podhmo/python-semver/blob/b42e9896e391e086b773fc621b23fa299d16b874/semver/__init__.py
#
# It is licensed under the following license:
#
# MIT License
# Copyright (c) 2016 podhmo
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import re
logger = logging.getLogger(__name__)
SEMVER_SPEC_VERSION = '2.0.0'
# Python 2/3 compatibility
try:
string_type = basestring
except NameError:
string_type = str
class _R(object):
def __init__(self, i):
self.i = i
def __call__(self):
v = self.i
self.i += 1
return v
def value(self):
return self.i
class Extendlist(list):
def __setitem__(self, i, v):
try:
list.__setitem__(self, i, v)
except IndexError:
if len(self) == i:
self.append(v)
else:
raise
def list_get(xs, i):
try:
return xs[i]
except IndexError:
return None
R = _R(0)
src = Extendlist()
regexp = {}
# The following Regular Expressions can be used for tokenizing,
# validating, and parsing SemVer version strings.
# ## Numeric Identifier
# A single `0`, or a non-zero digit followed by zero or more digits.
NUMERICIDENTIFIER = R()
src[NUMERICIDENTIFIER] = '0|[1-9]\\d*'
NUMERICIDENTIFIERLOOSE = R()
src[NUMERICIDENTIFIERLOOSE] = '[0-9]+'
# ## Non-numeric Identifier
# Zero or more digits, followed by a letter or hyphen, and then zero or
# more letters, digits, or hyphens.
NONNUMERICIDENTIFIER = R()
src[NONNUMERICIDENTIFIER] = '\\d*[a-zA-Z-][a-zA-Z0-9-]*'
# ## Main Version
# Three dot-separated numeric identifiers.
MAINVERSION = R()
src[MAINVERSION] = ('(' + src[NUMERICIDENTIFIER] + ')\\.' +
'(' + src[NUMERICIDENTIFIER] + ')\\.' +
'(' + src[NUMERICIDENTIFIER] + ')')
MAINVERSIONLOOSE = R()
src[MAINVERSIONLOOSE] = ('(' + src[NUMERICIDENTIFIERLOOSE] + ')\\.' +
'(' + src[NUMERICIDENTIFIERLOOSE] + ')\\.' +
'(' + src[NUMERICIDENTIFIERLOOSE] + ')')
# ## Pre-release Version Identifier
# A numeric identifier, or a non-numeric identifier.
PRERELEASEIDENTIFIER = R()
src[PRERELEASEIDENTIFIER] = ('(?:' + src[NUMERICIDENTIFIER] +
'|' + src[NONNUMERICIDENTIFIER] + ')')
PRERELEASEIDENTIFIERLOOSE = R()
src[PRERELEASEIDENTIFIERLOOSE] = ('(?:' + src[NUMERICIDENTIFIERLOOSE] +
'|' + src[NONNUMERICIDENTIFIER] + ')')
# ## Pre-release Version
# Hyphen, followed by one or more dot-separated pre-release version
# identifiers.
PRERELEASE = R()
src[PRERELEASE] = ('(?:-(' + src[PRERELEASEIDENTIFIER] +
'(?:\\.' + src[PRERELEASEIDENTIFIER] + ')*))')
PRERELEASELOOSE = R()
src[PRERELEASELOOSE] = ('(?:-?(' + src[PRERELEASEIDENTIFIERLOOSE] +
'(?:\\.' + src[PRERELEASEIDENTIFIERLOOSE] + ')*))')
# ## Build Metadata Identifier
# Any combination of digits, letters, or hyphens.
BUILDIDENTIFIER = R()
src[BUILDIDENTIFIER] = '[0-9A-Za-z-]+'
# ## Build Metadata
# Plus sign, followed by one or more period-separated build metadata
# identifiers.
BUILD = R()
src[BUILD] = ('(?:\\+(' + src[BUILDIDENTIFIER] +
'(?:\\.' + src[BUILDIDENTIFIER] + ')*))')
# ## Full Version String
# A main version, followed optionally by a pre-release version and
# build metadata.
# Note that the only major, minor, patch, and pre-release sections of
# the version string are capturing groups. The build metadata is not a
# capturing group, because it should not ever be used in version
# comparison.
FULL = R()
FULLPLAIN = ('v?' + src[MAINVERSION] + src[PRERELEASE] + '?' + src[BUILD] + '?')
src[FULL] = '^' + FULLPLAIN + '$'
# like full, but allows v1.2.3 and =1.2.3, which people do sometimes.
# also, 1.0.0alpha1 (prerelease without the hyphen) which is pretty
# common in the npm registry.
LOOSEPLAIN = ('[v=\\s]*' + src[MAINVERSIONLOOSE] +
src[PRERELEASELOOSE] + '?' +
src[BUILD] + '?')
LOOSE = R()
src[LOOSE] = '^' + LOOSEPLAIN + '$'
GTLT = R()
src[GTLT] = '((?:<|>)?=?)'
# Something like "2.*" or "1.2.x".
# Note that "x.x" is a valid xRange identifier, meaning "any version"
# Only the first item is strictly required.
XRANGEIDENTIFIERLOOSE = R()
src[XRANGEIDENTIFIERLOOSE] = src[NUMERICIDENTIFIERLOOSE] + '|x|X|\\*'
XRANGEIDENTIFIER = R()
src[XRANGEIDENTIFIER] = src[NUMERICIDENTIFIER] + '|x|X|\\*'
XRANGEPLAIN = R()
src[XRANGEPLAIN] = ('[v=\\s]*(' + src[XRANGEIDENTIFIER] + ')' +
'(?:\\.(' + src[XRANGEIDENTIFIER] + ')' +
'(?:\\.(' + src[XRANGEIDENTIFIER] + ')' +
'(?:' + src[PRERELEASE] + ')?' +
src[BUILD] + '?' +
')?)?')
XRANGEPLAINLOOSE = R()
src[XRANGEPLAINLOOSE] = ('[v=\\s]*(' + src[XRANGEIDENTIFIERLOOSE] + ')' +
'(?:\\.(' + src[XRANGEIDENTIFIERLOOSE] + ')' +
'(?:\\.(' + src[XRANGEIDENTIFIERLOOSE] + ')' +
'(?:' + src[PRERELEASELOOSE] + ')?' +
src[BUILD] + '?' +
')?)?')
XRANGE = R()
src[XRANGE] = '^' + src[GTLT] + '\\s*' + src[XRANGEPLAIN] + '$'
XRANGELOOSE = R()
src[XRANGELOOSE] = '^' + src[GTLT] + '\\s*' + src[XRANGEPLAINLOOSE] + '$'
# Tilde ranges.
# Meaning is "reasonably at or greater than"
LONETILDE = R()
src[LONETILDE] = '(?:~>?)'
TILDETRIM = R()
src[TILDETRIM] = '(\\s*)' + src[LONETILDE] + '\\s+'
regexp[TILDETRIM] = re.compile(src[TILDETRIM], re.M)
tildeTrimReplace = r'\1~'
TILDE = R()
src[TILDE] = '^' + src[LONETILDE] + src[XRANGEPLAIN] + '$'
TILDELOOSE = R()
src[TILDELOOSE] = ('^' + src[LONETILDE] + src[XRANGEPLAINLOOSE] + '$')
# Caret ranges.
# Meaning is "at least and backwards compatible with"
LONECARET = R()
src[LONECARET] = '(?:\\^)'
CARETTRIM = R()
src[CARETTRIM] = '(\\s*)' + src[LONECARET] + '\\s+'
regexp[CARETTRIM] = re.compile(src[CARETTRIM], re.M)
caretTrimReplace = r'\1^'
CARET = R()
src[CARET] = '^' + src[LONECARET] + src[XRANGEPLAIN] + '$'
CARETLOOSE = R()
src[CARETLOOSE] = '^' + src[LONECARET] + src[XRANGEPLAINLOOSE] + '$'
# A simple gt/lt/eq thing, or just "" to indicate "any version"
COMPARATORLOOSE = R()
src[COMPARATORLOOSE] = '^' + src[GTLT] + '\\s*(' + LOOSEPLAIN + ')$|^$'
COMPARATOR = R()
src[COMPARATOR] = '^' + src[GTLT] + '\\s*(' + FULLPLAIN + ')$|^$'
# An expression to strip any whitespace between the gtlt and the thing
# it modifies, so that `> 1.2.3` ==> `>1.2.3`
COMPARATORTRIM = R()
src[COMPARATORTRIM] = ('(\\s*)' + src[GTLT] +
'\\s*(' + LOOSEPLAIN + '|' + src[XRANGEPLAIN] + ')')
# this one has to use the /g flag
regexp[COMPARATORTRIM] = re.compile(src[COMPARATORTRIM], re.M)
comparatorTrimReplace = r'\1\2\3'
# Something like `1.2.3 - 1.2.4`
# Note that these all use the loose form, because they'll be
# checked against either the strict or loose comparator form
# later.
HYPHENRANGE = R()
src[HYPHENRANGE] = ('^\\s*(' + src[XRANGEPLAIN] + ')' +
'\\s+-\\s+' +
'(' + src[XRANGEPLAIN] + ')' +
'\\s*$')
HYPHENRANGELOOSE = R()
src[HYPHENRANGELOOSE] = ('^\\s*(' + src[XRANGEPLAINLOOSE] + ')' +
'\\s+-\\s+' +
'(' + src[XRANGEPLAINLOOSE] + ')' +
'\\s*$')
# Star ranges basically just allow anything at all.
STAR = R()
src[STAR] = '(<|>)?=?\\s*\\*'
# version name recovery for convenient
RECOVERYVERSIONNAME = R()
src[RECOVERYVERSIONNAME] = ('v?({n})(?:\\.({n}))?{pre}?'.format(n=src[NUMERICIDENTIFIER], pre=src[PRERELEASELOOSE]))
# Compile to actual regexp objects.
# All are flag-free, unless they were created above with a flag.
for i in range(R.value()):
logger.debug("genregxp %s %s", i, src[i])
if i not in regexp:
regexp[i] = re.compile(src[i])
def parse(version, loose):
if loose:
r = regexp[LOOSE]
else:
r = regexp[FULL]
m = r.search(version)
if m:
return semver(version, loose)
else:
return None
def valid(version, loose):
v = parse(version, loose)
if v.version:
return v
else:
return None
def clean(version, loose):
s = parse(version, loose)
if s:
return s.version
else:
return None
NUMERIC = re.compile("^\d+$")
def semver(version, loose):
if isinstance(version, SemVer):
if version.loose == loose:
return version
else:
version = version.version
elif not isinstance(version, string_type): # xxx:
raise ValueError("Invalid Version: {}".format(version))
"""
if (!(this instanceof SemVer))
return new SemVer(version, loose);
"""
return SemVer(version, loose)
make_semver = semver
class SemVer(object):
def __init__(self, version, loose):
logger.debug("SemVer %s, %s", version, loose)
self.loose = loose
self.raw = version
m = regexp[LOOSE if loose else FULL].search(version.strip())
if not m:
if not loose:
raise ValueError("Invalid Version: {}".format(version))
m = regexp[RECOVERYVERSIONNAME].search(version.strip())
self.major = int(m.group(1)) if m.group(1) else 0
self.minor = int(m.group(2)) if m.group(2) else 0
self.patch = 0
if not m.group(3):
self.prerelease = []
else:
self.prerelease = [(int(id) if NUMERIC.search(id) else id)
for id in m.group(3).split(".")]
else:
# these are actually numbers
self.major = int(m.group(1))
self.minor = int(m.group(2))
self.patch = int(m.group(3))
# numberify any prerelease numeric ids
if not m.group(4):
self.prerelease = []
else:
self.prerelease = [(int(id) if NUMERIC.search(id) else id)
for id in m.group(4).split(".")]
if m.group(5):
self.build = m.group(5).split(".")
else:
self.build = []
self.format() # xxx:
def format(self):
self.version = "{}.{}.{}".format(self.major, self.minor, self.patch)
if len(self.prerelease) > 0:
self.version += ("-{}".format(".".join(str(v) for v in self.prerelease)))
return self.version
def __repr__(self):
return "<SemVer {} >".format(self)
def __str__(self):
return self.version
def compare(self, other):
logger.debug('SemVer.compare %s %s %s', self.version, self.loose, other)
if not isinstance(other, SemVer):
other = make_semver(other, self.loose)
result = self.compare_main(other) or self.compare_pre(other)
logger.debug("compare result %s", result)
return result
def compare_main(self, other):
if not isinstance(other, SemVer):
other = make_semver(other, self.loose)
return (compare_identifiers(str(self.major), str(other.major)) or
compare_identifiers(str(self.minor), str(other.minor)) or
compare_identifiers(str(self.patch), str(other.patch)))
def compare_pre(self, other):
if not isinstance(other, SemVer):
other = make_semver(other, self.loose)
# NOT having a prerelease is > having one
is_self_more_than_zero = len(self.prerelease) > 0
is_other_more_than_zero = len(other.prerelease) > 0
if not is_self_more_than_zero and is_other_more_than_zero:
return 1
elif is_self_more_than_zero and not is_other_more_than_zero:
return -1
elif not is_self_more_than_zero and not is_other_more_than_zero:
return 0
i = 0
while True:
a = list_get(self.prerelease, i)
b = list_get(other.prerelease, i)
logger.debug("prerelease compare %s: %s %s", i, a, b)
i += 1
if a is None and b is None:
return 0
elif b is None:
return 1
elif a is None:
return -1
elif a == b:
continue
else:
return compare_identifiers(str(a), str(b))
def inc(self, release, identifier=None):
logger.debug("inc release %s %s", self.prerelease, release)
if release == 'premajor':
self.prerelease = []
self.patch = 0
self.minor = 0
self.major += 1
self.inc('pre', identifier=identifier)
elif release == "preminor":
self.prerelease = []
self.patch = 0
self.minor += 1
self.inc('pre', identifier=identifier)
elif release == "prepatch":
# If this is already a prerelease, it will bump to the next version
# drop any prereleases that might already exist, since they are not
# relevant at this point.
self.prerelease = []
self.inc('patch', identifier=identifier)
self.inc('pre', identifier=identifier)
elif release == 'prerelease':
# If the input is a non-prerelease version, this acts the same as
# prepatch.
if len(self.prerelease) == 0:
self.inc("patch", identifier=identifier)
self.inc("pre", identifier=identifier)
elif release == "major":
# If this is a pre-major version, bump up to the same major version.
# Otherwise increment major.
# 1.0.0-5 bumps to 1.0.0
# 1.1.0 bumps to 2.0.0
if self.minor != 0 or self.patch != 0 or len(self.prerelease) == 0:
self.major += 1
self.minor = 0
self.patch = 0
self.prerelease = []
elif release == "minor":
# If this is a pre-minor version, bump up to the same minor version.
# Otherwise increment minor.
# 1.2.0-5 bumps to 1.2.0
# 1.2.1 bumps to 1.3.0
if self.patch != 0 or len(self.prerelease) == 0:
self.minor += 1
self.patch = 0
self.prerelease = []
elif release == "patch":
# If this is not a pre-release version, it will increment the patch.
# If it is a pre-release it will bump up to the same patch version.
# 1.2.0-5 patches to 1.2.0
# 1.2.0 patches to 1.2.1
if len(self.prerelease) == 0:
self.patch += 1
self.prerelease = []
elif release == "pre":
# This probably shouldn't be used publicly.
# 1.0.0 "pre" would become 1.0.0-0 which is the wrong direction.
logger.debug("inc prerelease %s", self.prerelease)
if len(self.prerelease) == 0:
self.prerelease = [0]
else:
i = len(self.prerelease) - 1
while i >= 0:
if isinstance(self.prerelease[i], int):
self.prerelease[i] += 1
i -= 2
i -= 1
# ## this is needless code in python ##
# if i == -1: # didn't increment anything
# self.prerelease.append(0)
if identifier is not None:
# 1.2.0-beta.1 bumps to 1.2.0-beta.2,
# 1.2.0-beta.fooblz or 1.2.0-beta bumps to 1.2.0-beta.0
if self.prerelease[0] == identifier:
if not isinstance(self.prerelease[1], int):
self.prerelease = [identifier, 0]
else:
self.prerelease = [identifier, 0]
else:
raise ValueError('invalid increment argument: {}'.format(release))
self.format()
self.raw = self.version
return self
def inc(version, release, loose, identifier=None): # wow!
try:
return make_semver(version, loose).inc(release, identifier=identifier).version
except Exception as e:
logger.debug(e, exc_info=5)
return None
def compare_identifiers(a, b):
anum = NUMERIC.search(a)
bnum = NUMERIC.search(b)
if anum and bnum:
a = int(a)
b = int(b)
if anum and not bnum:
return -1
elif bnum and not anum:
return 1
elif a < b:
return -1
elif a > b:
return 1
else:
return 0
def rcompare_identifiers(a, b):
return compare_identifiers(b, a)
def compare(a, b, loose):
return make_semver(a, loose).compare(b)
def compare_loose(a, b):
return compare(a, b, True)
def rcompare(a, b, loose):
return compare(b, a, loose)
def make_key_function(loose):
def key_function(version):
v = make_semver(version, loose)
key = (v.major, v.minor, v.patch)
if v.prerelease:
key = key + tuple(v.prerelease)
else:
# NOT having a prerelease is > having one
key = key + (float('inf'),)
return key
return key_function
loose_key_function = make_key_function(True)
full_key_function = make_key_function(True)
def sort(list, loose):
keyf = loose_key_function if loose else full_key_function
list.sort(key=keyf)
return list
def rsort(list, loose):
keyf = loose_key_function if loose else full_key_function
list.sort(key=keyf, reverse=True)
return list
def gt(a, b, loose):
return compare(a, b, loose) > 0
def lt(a, b, loose):
return compare(a, b, loose) < 0
def eq(a, b, loose):
return compare(a, b, loose) == 0
def neq(a, b, loose):
return compare(a, b, loose) != 0
def gte(a, b, loose):
return compare(a, b, loose) >= 0
def lte(a, b, loose):
return compare(a, b, loose) <= 0
def cmp(a, op, b, loose):
logger.debug("cmp: %s", op)
if op == "===":
return a == b
elif op == "!==":
return a != b
elif op == "" or op == "=" or op == "==":
return eq(a, b, loose)
elif op == "!=":
return neq(a, b, loose)
elif op == ">":
return gt(a, b, loose)
elif op == ">=":
return gte(a, b, loose)
elif op == "<":
return lt(a, b, loose)
elif op == "<=":
return lte(a, b, loose)
else:
raise ValueError("Invalid operator: {}".format(op))
def comparator(comp, loose):
if isinstance(comp, Comparator):
if(comp.loose == loose):
return comp
else:
comp = comp.value
# if (!(this instanceof Comparator))
# return new Comparator(comp, loose)
return Comparator(comp, loose)
make_comparator = comparator
ANY = object()
class Comparator(object):
semver = None
def __init__(self, comp, loose):
logger.debug("comparator: %s %s", comp, loose)
self.loose = loose
self.parse(comp)
if self.semver == ANY:
self.value = ""
else:
self.value = self.operator + self.semver.version
def parse(self, comp):
if self.loose:
r = regexp[COMPARATORLOOSE]
else:
r = regexp[COMPARATOR]
logger.debug("parse comp=%s", comp)
m = r.search(comp)
if m is None:
raise ValueError("Invalid comparator: {}".format(comp))
self.operator = m.group(1)
# if it literally is just '>' or '' then allow anything.
if m.group(2) is None:
self.semver = ANY
else:
self.semver = semver(m.group(2), self.loose)
def __repr__(self):
return '<SemVer Comparator "{}">'.format(self)
def __str__(self):
return self.value
def test(self, version):
logger.debug('Comparator, test %s, %s', version, self.loose)
if self.semver == ANY:
return True
else:
return cmp(version, self.operator, self.semver, self.loose)
def make_range(range_, loose):
if isinstance(range_, Range) and range_.loose == loose:
return range_
# if (!(this instanceof Range))
# return new Range(range, loose);
return Range(range_, loose)
class Range(object):
def __init__(self, range_, loose):
self.loose = loose
# First, split based on boolean or ||
self.raw = range_
xs = [self.parse_range(r.strip()) for r in re.split(r"\s*\|\|\s*", range_)]
self.set = [r for r in xs if r]
if not len(self.set):
raise ValueError("Invalid SemVer Range: {}".format(range_))
self.format()
def __repr__(self):
return '<SemVer Range "{}">'.format(self.range)
def format(self):
self.range = "||".join([" ".join(c.value for c in comps).strip() for comps in self.set]).strip()
logger.debug("Range format %s", self.range)
return self.range
def __str__(self):
return self.range
def parse_range(self, range_):
loose = self.loose
logger.debug('range %s %s', range_, loose)
# `1.2.3 - 1.2.4` => `>=1.2.3 <=1.2.4`
if loose:
hr = regexp[HYPHENRANGELOOSE]
else:
hr = regexp[HYPHENRANGE]
range_ = hr.sub(hyphen_replace, range_,)
logger.debug('hyphen replace %s', range_)
# `> 1.2.3 < 1.2.5` => `>1.2.3 <1.2.5`
range_ = regexp[COMPARATORTRIM].sub(comparatorTrimReplace, range_)
logger.debug('comparator trim %s, %s', range_, regexp[COMPARATORTRIM])
# `~ 1.2.3` => `~1.2.3`
range_ = regexp[TILDETRIM].sub(tildeTrimReplace, range_)
# `^ 1.2.3` => `^1.2.3`
range_ = regexp[CARETTRIM].sub(caretTrimReplace, range_)
# normalize spaces
range_ = " ".join(re.split("\s+", range_))
# At this point, the range is completely trimmed and
# ready to be split into comparators.
if loose:
comp_re = regexp[COMPARATORLOOSE]
else:
comp_re = regexp[COMPARATOR]
set_ = re.split("\s+", ' '.join([parse_comparator(comp, loose) for comp in range_.split(" ")]))
if self.loose:
# in loose mode, throw out any that are not valid comparators
set_ = [comp for comp in set_ if comp_re.search(comp)]
set_ = [make_comparator(comp, loose) for comp in set_]
return set_
def test(self, version):
if not version: # xxx
return False
if isinstance(version, string_type):
version = make_semver(version, loose=self.loose)
for e in self.set:
if test_set(e, version):
return True
return False
# Mostly just for testing and legacy API reasons
def to_comparators(range_, loose):
return [" ".join([c.value for c in comp]).strip().split(" ")
for comp in make_range(range_, loose).set]
# comprised of xranges, tildes, stars, and gtlt's at this point.
# already replaced the hyphen ranges
# turn into a set of JUST comparators.
def parse_comparator(comp, loose):
logger.debug('comp %s', comp)
comp = replace_carets(comp, loose)
logger.debug('caret %s', comp)
comp = replace_tildes(comp, loose)
logger.debug('tildes %s', comp)
comp = replace_xranges(comp, loose)
logger.debug('xrange %s', comp)
comp = replace_stars(comp, loose)
logger.debug('stars %s', comp)
return comp
def is_x(id):
return id is None or id == "" or id.lower() == "x" or id == "*"
# ~, ~> --> * (any, kinda silly)
# ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0 <3.0.0
# ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0 <2.1.0
# ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0 <1.3.0
# ~1.2.3, ~>1.2.3 --> >=1.2.3 <1.3.0
# ~1.2.0, ~>1.2.0 --> >=1.2.0 <1.3.0
def replace_tildes(comp, loose):
return " ".join([replace_tilde(c, loose)
for c in re.split("\s+", comp.strip())])
def replace_tilde(comp, loose):
if loose:
r = regexp[TILDELOOSE]
else:
r = regexp[TILDE]
def repl(mob):
_ = mob.group(0)
M, m, p, pr, _ = mob.groups()
logger.debug("tilde %s %s %s %s %s %s", comp, _, M, m, p, pr)
if is_x(M):
ret = ""
elif is_x(m):
ret = '>=' + M + '.0.0 <' + str(int(M) + 1) + '.0.0'
elif is_x(p):
# ~1.2 == >=1.2.0 <1.3.0
ret = '>=' + M + '.' + m + '.0 <' + M + '.' + str(int(m) + 1) + '.0'
elif pr:
logger.debug("replaceTilde pr %s", pr)
if (pr[0] != "-"):
pr = '-' + pr
ret = '>=' + M + '.' + m + '.' + p + pr + ' <' + M + '.' + str(int(m) + 1) + '.0'
else:
# ~1.2.3 == >=1.2.3 <1.3.0
ret = '>=' + M + '.' + m + '.' + p + ' <' + M + '.' + str(int(m) + 1) + '.0'
logger.debug('tilde return, %s', ret)
return ret
return r.sub(repl, comp)
# ^ --> * (any, kinda silly)
# ^2, ^2.x, ^2.x.x --> >=2.0.0 <3.0.0
# ^2.0, ^2.0.x --> >=2.0.0 <3.0.0
# ^1.2, ^1.2.x --> >=1.2.0 <2.0.0
# ^1.2.3 --> >=1.2.3 <2.0.0
# ^1.2.0 --> >=1.2.0 <2.0.0
def replace_carets(comp, loose):
return " ".join([replace_caret(c, loose)
for c in re.split("\s+", comp.strip())])
def replace_caret(comp, loose):
if loose:
r = regexp[CARETLOOSE]
else:
r = regexp[CARET]
def repl(mob):
m0 = mob.group(0)
M, m, p, pr, _ = mob.groups()
logger.debug("caret %s %s %s %s %s %s", comp, m0, M, m, p, pr)
if is_x(M):
ret = ""
elif is_x(m):
ret = '>=' + M + '.0.0 <' + str((int(M) + 1)) + '.0.0'
elif is_x(p):
if M == "0":
ret = '>=' + M + '.' + m + '.0 <' + M + '.' + str((int(m) + 1)) + '.0'
else:
ret = '>=' + M + '.' + m + '.0 <' + str(int(M) + 1) + '.0.0'
elif pr:
logger.debug('replaceCaret pr %s', pr)
if pr[0] != "-":
pr = "-" + pr
if M == "0":
if m == "0":
ret = '>=' + M + '.' + m + '.' + (p or "") + pr + ' <' + M + '.' + m + "." + str(int(p or 0) + 1)
else:
ret = '>=' + M + '.' + m + '.' + (p or "") + pr + ' <' + M + '.' + str(int(m) + 1) + '.0'
else:
ret = '>=' + M + '.' + m + '.' + (p or "") + pr + ' <' + str(int(M) + 1) + '.0.0'
else:
if M == "0":
if m == "0":
ret = '>=' + M + '.' + m + '.' + (p or "") + ' <' + M + '.' + m + "." + str(int(p or 0) + 1)
else:
ret = '>=' + M + '.' + m + '.' + (p or "") + ' <' + M + '.' + str((int(m) + 1)) + '.0'
else:
ret = '>=' + M + '.' + m + '.' + (p or "") + ' <' + str(int(M) + 1) + '.0.0'
logger.debug('caret return %s', ret)
return ret
return r.sub(repl, comp)
def replace_xranges(comp, loose):
logger.debug('replaceXRanges %s %s', comp, loose)
return " ".join([replace_xrange(c, loose)
for c in re.split("\s+", comp.strip())])
def replace_xrange(comp, loose):
comp = comp.strip()
if loose:
r = regexp[XRANGELOOSE]
else:
r = regexp[XRANGE]
def repl(mob):
ret = mob.group(0)
gtlt, M, m, p, pr, _ = mob.groups()
logger.debug("xrange %s %s %s %s %s %s %s", comp, ret, gtlt, M, m, p, pr)
xM = is_x(M)
xm = xM or is_x(m)
xp = xm or is_x(p)
any_x = xp
if gtlt == "=" and any_x:
gtlt = ""
logger.debug("xrange gtlt=%s any_x=%s", gtlt, any_x)
if xM:
if gtlt == '>' or gtlt == '<':
# nothing is allowed
ret = '<0.0.0'
else:
ret = '*'
elif gtlt and any_x:
# replace X with 0, and then append the -0 min-prerelease
if xm:
m = 0
if xp:
p = 0
if gtlt == ">":
# >1 => >=2.0.0
# >1.2 => >=1.3.0
# >1.2.3 => >= 1.2.4
gtlt = ">="
if xm:
M = int(M) + 1
m = 0
p = 0
elif xp:
m = int(m) + 1
p = 0
elif gtlt == '<=':
# <=0.7.x is actually <0.8.0, since any 0.7.x should
# pass. Similarly, <=7.x is actually <8.0.0, etc.
gtlt = '<'
if xm:
M = int(M) + 1
else:
m = int(m) + 1
ret = gtlt + str(M) + '.' + str(m) + '.' + str(p)
elif xm:
ret = '>=' + M + '.0.0 <' + str(int(M) + 1) + '.0.0'
elif xp:
ret = '>=' + M + '.' + m + '.0 <' + M + '.' + str(int(m) + 1) + '.0'
logger.debug('xRange return %s', ret)
return ret
return r.sub(repl, comp)
# Because * is AND-ed with everything else in the comparator,
# and '' means "any version", just remove the *s entirely.
def replace_stars(comp, loose):
logger.debug('replaceStars %s %s', comp, loose)
# Looseness is ignored here. star is always as loose as it gets!
return regexp[STAR].sub("", comp.strip())
# This function is passed to string.replace(re[HYPHENRANGE])
# M, m, patch, prerelease, build
# 1.2 - 3.4.5 => >=1.2.0 <=3.4.5
# 1.2.3 - 3.4 => >=1.2.0 <3.5.0 Any 3.4.x will do
# 1.2 - 3.4 => >=1.2.0 <3.5.0
def hyphen_replace(mob):
from_, fM, fm, fp, fpr, fb, to, tM, tm, tp, tpr, tb = mob.groups()
if is_x(fM):
from_ = ""
elif is_x(fm):
from_ = '>=' + fM + '.0.0'
elif is_x(fp):
from_ = '>=' + fM + '.' + fm + '.0'
else:
from_ = ">=" + from_
if is_x(tM):
to = ""
elif is_x(tm):
to = '<' + str(int(tM) + 1) + '.0.0'
elif is_x(tp):
to = '<' + tM + '.' + str(int(tm) + 1) + '.0'
elif tpr:
to = '<=' + tM + '.' + tm + '.' + tp + '-' + tpr
else:
to = '<=' + to
return (from_ + ' ' + to).strip()
def test_set(set_, version):
for e in set_:
if not e.test(version):
return False
if len(version.prerelease) > 0:
# Find the set of versions that are allowed to have prereleases
# For example, ^1.2.3-pr.1 desugars to >=1.2.3-pr.1 <2.0.0
# That should allow `1.2.3-pr.2` to pass.
# However, `1.2.4-alpha.notready` should NOT be allowed,
# even though it's within the range set by the comparators.
for e in set_:
if e.semver == ANY:
continue
if len(e.semver.prerelease) > 0:
allowed = e.semver
if allowed.major == version.major and allowed.minor == version.minor and allowed.patch == version.patch:
return True
# Version has a -pre, but it's not one of the ones we like.
return False
return True
def satisfies(version, range_, loose=False):
try:
range_ = make_range(range_, loose)
except Exception as e:
return False
return range_.test(version)
def max_satisfying(versions, range_, loose=False):
try:
range_ob = make_range(range_, loose=loose)
except:
return None
max_ = None
max_sv = None
for v in versions:
if range_ob.test(v): # satisfies(v, range_, loose=loose)
if max_ is None or max_sv.compare(v) == -1: # compare(max, v, true)
max_ = v
max_sv = make_semver(max_, loose=loose)
return max_
def valid_range(range_, loose):
try:
# Return '*' instead of '' so that truthiness works.
# This will throw if it's invalid anyway
return make_range(range_, loose).range or "*"
except:
return None
# Determine if version is less than all the versions possible in the range
def ltr(version, range_, loose):
return outside(version, range_, "<", loose)
# Determine if version is greater than all the versions possible in the range.
def rtr(version, range_, loose):
return outside(version, range_, ">", loose)
def outside(version, range_, hilo, loose):
version = make_semver(version, loose)
range_ = make_range(range_, loose)
if hilo == ">":
gtfn = gt
ltefn = lte
ltfn = lt
comp = ">"
ecomp = ">="
elif hilo == "<":
gtfn = lt
ltefn = gte
ltfn = gt
comp = "<"
ecomp = "<="
else:
raise ValueError("Must provide a hilo val of '<' or '>'")
# If it satisifes the range it is not outside
if satisfies(version, range_, loose):
return False
# From now on, variable terms are as if we're in "gtr" mode.
# but note that everything is flipped for the "ltr" function.
for comparators in range_.set:
high = None
low = None
for comparator in comparators:
high = high or comparator
low = low or comparator
if gtfn(comparator.semver, high.semver, loose):
high = comparator
elif ltfn(comparator.semver, low.semver, loose):
low = comparator
# If the edge version comparator has a operator then our version
# isn't outside it
if high.operator == comp or high.operator == ecomp:
return False
# If the lowest version comparator has an operator and our version
# is less than it then it isn't higher than the range
if (not low.operator or low.operator == comp) and ltefn(version, low.semver):
return False
elif low.operator == ecomp and ltfn(version, low.semver):
return False
return True
| |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Pluggable Back-end for Account Server
"""
from uuid import uuid4
import time
import cPickle as pickle
import sqlite3
from swift.common.utils import Timestamp
from swift.common.db import DatabaseBroker, utf8encode
DATADIR = 'accounts'
POLICY_STAT_TRIGGER_SCRIPT = """
CREATE TRIGGER container_insert_ps AFTER INSERT ON container
BEGIN
INSERT OR IGNORE INTO policy_stat
(storage_policy_index, container_count, object_count, bytes_used)
VALUES (new.storage_policy_index, 0, 0, 0);
UPDATE policy_stat
SET container_count = container_count + (1 - new.deleted),
object_count = object_count + new.object_count,
bytes_used = bytes_used + new.bytes_used
WHERE storage_policy_index = new.storage_policy_index;
END;
CREATE TRIGGER container_delete_ps AFTER DELETE ON container
BEGIN
UPDATE policy_stat
SET container_count = container_count - (1 - old.deleted),
object_count = object_count - old.object_count,
bytes_used = bytes_used - old.bytes_used
WHERE storage_policy_index = old.storage_policy_index;
END;
"""
class AccountBroker(DatabaseBroker):
"""Encapsulates working with an account database."""
db_type = 'account'
db_contains_type = 'container'
db_reclaim_timestamp = 'delete_timestamp'
def _initialize(self, conn, put_timestamp, **kwargs):
"""
Create a brand new account database (tables, indices, triggers, etc.)
:param conn: DB connection object
:param put_timestamp: put timestamp
"""
if not self.account:
raise ValueError(
'Attempting to create a new database with no account set')
self.create_container_table(conn)
self.create_account_stat_table(conn, put_timestamp)
self.create_policy_stat_table(conn)
def create_container_table(self, conn):
"""
Create container table which is specific to the account DB.
:param conn: DB connection object
"""
conn.executescript("""
CREATE TABLE container (
ROWID INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT,
put_timestamp TEXT,
delete_timestamp TEXT,
object_count INTEGER,
bytes_used INTEGER,
deleted INTEGER DEFAULT 0,
storage_policy_index INTEGER DEFAULT 0
);
CREATE INDEX ix_container_deleted_name ON
container (deleted, name);
CREATE TRIGGER container_insert AFTER INSERT ON container
BEGIN
UPDATE account_stat
SET container_count = container_count + (1 - new.deleted),
object_count = object_count + new.object_count,
bytes_used = bytes_used + new.bytes_used,
hash = chexor(hash, new.name,
new.put_timestamp || '-' ||
new.delete_timestamp || '-' ||
new.object_count || '-' || new.bytes_used);
END;
CREATE TRIGGER container_update BEFORE UPDATE ON container
BEGIN
SELECT RAISE(FAIL, 'UPDATE not allowed; DELETE and INSERT');
END;
CREATE TRIGGER container_delete AFTER DELETE ON container
BEGIN
UPDATE account_stat
SET container_count = container_count - (1 - old.deleted),
object_count = object_count - old.object_count,
bytes_used = bytes_used - old.bytes_used,
hash = chexor(hash, old.name,
old.put_timestamp || '-' ||
old.delete_timestamp || '-' ||
old.object_count || '-' || old.bytes_used);
END;
""" + POLICY_STAT_TRIGGER_SCRIPT)
def create_account_stat_table(self, conn, put_timestamp):
"""
Create account_stat table which is specific to the account DB.
Not a part of Pluggable Back-ends, internal to the baseline code.
:param conn: DB connection object
:param put_timestamp: put timestamp
"""
conn.executescript("""
CREATE TABLE account_stat (
account TEXT,
created_at TEXT,
put_timestamp TEXT DEFAULT '0',
delete_timestamp TEXT DEFAULT '0',
container_count INTEGER,
object_count INTEGER DEFAULT 0,
bytes_used INTEGER DEFAULT 0,
hash TEXT default '00000000000000000000000000000000',
id TEXT,
status TEXT DEFAULT '',
status_changed_at TEXT DEFAULT '0',
metadata TEXT DEFAULT ''
);
INSERT INTO account_stat (container_count) VALUES (0);
""")
conn.execute('''
UPDATE account_stat SET account = ?, created_at = ?, id = ?,
put_timestamp = ?, status_changed_at = ?
''', (self.account, Timestamp(time.time()).internal, str(uuid4()),
put_timestamp, put_timestamp))
def create_policy_stat_table(self, conn):
"""
Create policy_stat table which is specific to the account DB.
Not a part of Pluggable Back-ends, internal to the baseline code.
:param conn: DB connection object
"""
conn.executescript("""
CREATE TABLE policy_stat (
storage_policy_index INTEGER PRIMARY KEY,
container_count INTEGER DEFAULT 0,
object_count INTEGER DEFAULT 0,
bytes_used INTEGER DEFAULT 0
);
INSERT OR IGNORE INTO policy_stat (
storage_policy_index, container_count, object_count,
bytes_used
)
SELECT 0, container_count, object_count, bytes_used
FROM account_stat
WHERE container_count > 0;
""")
def get_db_version(self, conn):
if self._db_version == -1:
self._db_version = 0
for row in conn.execute('''
SELECT name FROM sqlite_master
WHERE name = 'ix_container_deleted_name' '''):
self._db_version = 1
return self._db_version
def _delete_db(self, conn, timestamp, force=False):
"""
Mark the DB as deleted.
:param conn: DB connection object
:param timestamp: timestamp to mark as deleted
"""
conn.execute("""
UPDATE account_stat
SET delete_timestamp = ?,
status = 'DELETED',
status_changed_at = ?
WHERE delete_timestamp < ? """, (timestamp, timestamp, timestamp))
def _commit_puts_load(self, item_list, entry):
"""See :func:`swift.common.db.DatabaseBroker._commit_puts_load`"""
loaded = pickle.loads(entry.decode('base64'))
# check to see if the update includes policy_index or not
(name, put_timestamp, delete_timestamp, object_count, bytes_used,
deleted) = loaded[:6]
if len(loaded) > 6:
storage_policy_index = loaded[6]
else:
# legacy support during upgrade until first non legacy storage
# policy is defined
storage_policy_index = 0
item_list.append(
{'name': name,
'put_timestamp': put_timestamp,
'delete_timestamp': delete_timestamp,
'object_count': object_count,
'bytes_used': bytes_used,
'deleted': deleted,
'storage_policy_index': storage_policy_index})
def empty(self):
"""
Check if the account DB is empty.
:returns: True if the database has no active containers.
"""
self._commit_puts_stale_ok()
with self.get() as conn:
row = conn.execute(
'SELECT container_count from account_stat').fetchone()
return (row[0] == 0)
def make_tuple_for_pickle(self, record):
return (record['name'], record['put_timestamp'],
record['delete_timestamp'], record['object_count'],
record['bytes_used'], record['deleted'],
record['storage_policy_index'])
def put_container(self, name, put_timestamp, delete_timestamp,
object_count, bytes_used, storage_policy_index):
"""
Create a container with the given attributes.
:param name: name of the container to create
:param put_timestamp: put_timestamp of the container to create
:param delete_timestamp: delete_timestamp of the container to create
:param object_count: number of objects in the container
:param bytes_used: number of bytes used by the container
:param storage_policy_index: the storage policy for this container
"""
if delete_timestamp > put_timestamp and \
object_count in (None, '', 0, '0'):
deleted = 1
else:
deleted = 0
record = {'name': name, 'put_timestamp': put_timestamp,
'delete_timestamp': delete_timestamp,
'object_count': object_count,
'bytes_used': bytes_used,
'deleted': deleted,
'storage_policy_index': storage_policy_index}
self.put_record(record)
def _is_deleted_info(self, status, container_count, delete_timestamp,
put_timestamp):
"""
Apply delete logic to database info.
:returns: True if the DB is considered to be deleted, False otherwise
"""
return status == 'DELETED' or (
container_count in (None, '', 0, '0') and
Timestamp(delete_timestamp) > Timestamp(put_timestamp))
def _is_deleted(self, conn):
"""
Check account_stat table and evaluate info.
:param conn: database conn
:returns: True if the DB is considered to be deleted, False otherwise
"""
info = conn.execute('''
SELECT put_timestamp, delete_timestamp, container_count, status
FROM account_stat''').fetchone()
return self._is_deleted_info(**info)
def is_status_deleted(self):
"""Only returns true if the status field is set to DELETED."""
with self.get() as conn:
row = conn.execute('''
SELECT put_timestamp, delete_timestamp, status
FROM account_stat''').fetchone()
return row['status'] == "DELETED" or (
row['delete_timestamp'] > row['put_timestamp'])
def get_policy_stats(self, do_migrations=False):
"""
Get global policy stats for the account.
:param do_migrations: boolean, if True the policy stat dicts will
always include the 'container_count' key;
otherwise it may be ommited on legacy databases
until they are migrated.
:returns: dict of policy stats where the key is the policy index and
the value is a dictionary like {'object_count': M,
'bytes_used': N, 'container_count': L}
"""
columns = [
'storage_policy_index',
'container_count',
'object_count',
'bytes_used',
]
def run_query():
return (conn.execute('''
SELECT %s
FROM policy_stat
''' % ', '.join(columns)).fetchall())
self._commit_puts_stale_ok()
info = []
with self.get() as conn:
try:
info = run_query()
except sqlite3.OperationalError as err:
if "no such column: container_count" in str(err):
if do_migrations:
self._migrate_add_container_count(conn)
else:
columns.remove('container_count')
info = run_query()
elif "no such table: policy_stat" not in str(err):
raise
policy_stats = {}
for row in info:
stats = dict(row)
key = stats.pop('storage_policy_index')
policy_stats[key] = stats
return policy_stats
def get_info(self):
"""
Get global data for the account.
:returns: dict with keys: account, created_at, put_timestamp,
delete_timestamp, status_changed_at, container_count,
object_count, bytes_used, hash, id
"""
self._commit_puts_stale_ok()
with self.get() as conn:
return dict(conn.execute('''
SELECT account, created_at, put_timestamp, delete_timestamp,
status_changed_at, container_count, object_count,
bytes_used, hash, id
FROM account_stat
''').fetchone())
def list_containers_iter(self, limit, marker, end_marker, prefix,
delimiter):
"""
Get a list of containers sorted by name starting at marker onward, up
to limit entries. Entries will begin with the prefix and will not have
the delimiter after the prefix.
:param limit: maximum number of entries to get
:param marker: marker query
:param end_marker: end marker query
:param prefix: prefix query
:param delimiter: delimiter for query
:returns: list of tuples of (name, object_count, bytes_used, 0)
"""
(marker, end_marker, prefix, delimiter) = utf8encode(
marker, end_marker, prefix, delimiter)
self._commit_puts_stale_ok()
if delimiter and not prefix:
prefix = ''
orig_marker = marker
with self.get() as conn:
results = []
while len(results) < limit:
query = """
SELECT name, object_count, bytes_used, 0
FROM container
WHERE deleted = 0 AND """
query_args = []
if end_marker:
query += ' name < ? AND'
query_args.append(end_marker)
if marker and marker >= prefix:
query += ' name > ? AND'
query_args.append(marker)
elif prefix:
query += ' name >= ? AND'
query_args.append(prefix)
if self.get_db_version(conn) < 1:
query += ' +deleted = 0'
else:
query += ' deleted = 0'
query += ' ORDER BY name LIMIT ?'
query_args.append(limit - len(results))
curs = conn.execute(query, query_args)
curs.row_factory = None
if prefix is None:
# A delimiter without a specified prefix is ignored
return [r for r in curs]
if not delimiter:
if not prefix:
# It is possible to have a delimiter but no prefix
# specified. As above, the prefix will be set to the
# empty string, so avoid performing the extra work to
# check against an empty prefix.
return [r for r in curs]
else:
return [r for r in curs if r[0].startswith(prefix)]
# We have a delimiter and a prefix (possibly empty string) to
# handle
rowcount = 0
for row in curs:
rowcount += 1
marker = name = row[0]
if len(results) >= limit or not name.startswith(prefix):
curs.close()
return results
end = name.find(delimiter, len(prefix))
if end > 0:
marker = name[:end] + chr(ord(delimiter) + 1)
dir_name = name[:end + 1]
if dir_name != orig_marker:
results.append([dir_name, 0, 0, 1])
curs.close()
break
results.append(row)
if not rowcount:
break
return results
def merge_items(self, item_list, source=None):
"""
Merge items into the container table.
:param item_list: list of dictionaries of {'name', 'put_timestamp',
'delete_timestamp', 'object_count', 'bytes_used',
'deleted', 'storage_policy_index'}
:param source: if defined, update incoming_sync with the source
"""
def _really_merge_items(conn):
max_rowid = -1
curs = conn.cursor()
for rec in item_list:
record = [rec['name'], rec['put_timestamp'],
rec['delete_timestamp'], rec['object_count'],
rec['bytes_used'], rec['deleted'],
rec['storage_policy_index']]
query = '''
SELECT name, put_timestamp, delete_timestamp,
object_count, bytes_used, deleted,
storage_policy_index
FROM container WHERE name = ?
'''
if self.get_db_version(conn) >= 1:
query += ' AND deleted IN (0, 1)'
curs_row = curs.execute(query, (rec['name'],))
curs_row.row_factory = None
row = curs_row.fetchone()
if row:
row = list(row)
for i in xrange(5):
if record[i] is None and row[i] is not None:
record[i] = row[i]
if row[1] > record[1]: # Keep newest put_timestamp
record[1] = row[1]
if row[2] > record[2]: # Keep newest delete_timestamp
record[2] = row[2]
# If deleted, mark as such
if record[2] > record[1] and \
record[3] in (None, '', 0, '0'):
record[5] = 1
else:
record[5] = 0
curs.execute('''
DELETE FROM container WHERE name = ? AND
deleted IN (0, 1)
''', (record[0],))
curs.execute('''
INSERT INTO container (name, put_timestamp,
delete_timestamp, object_count, bytes_used,
deleted, storage_policy_index)
VALUES (?, ?, ?, ?, ?, ?, ?)
''', record)
if source:
max_rowid = max(max_rowid, rec['ROWID'])
if source:
try:
curs.execute('''
INSERT INTO incoming_sync (sync_point, remote_id)
VALUES (?, ?)
''', (max_rowid, source))
except sqlite3.IntegrityError:
curs.execute('''
UPDATE incoming_sync
SET sync_point=max(?, sync_point)
WHERE remote_id=?
''', (max_rowid, source))
conn.commit()
with self.get() as conn:
# create the policy stat table if needed and add spi to container
try:
_really_merge_items(conn)
except sqlite3.OperationalError as err:
if 'no such column: storage_policy_index' not in str(err):
raise
self._migrate_add_storage_policy_index(conn)
_really_merge_items(conn)
def _migrate_add_container_count(self, conn):
"""
Add the container_count column to the 'policy_stat' table and
update it
:param conn: DB connection object
"""
# add the container_count column
curs = conn.cursor()
curs.executescript('''
DROP TRIGGER container_delete_ps;
DROP TRIGGER container_insert_ps;
ALTER TABLE policy_stat
ADD COLUMN container_count INTEGER DEFAULT 0;
''' + POLICY_STAT_TRIGGER_SCRIPT)
# keep the simple case simple, if there's only one entry in the
# policy_stat table we just copy the total container count from the
# account_stat table
# if that triggers an update then the where changes <> 0 *would* exist
# and the insert or replace from the count subqueries won't execute
curs.executescript("""
UPDATE policy_stat
SET container_count = (
SELECT container_count
FROM account_stat)
WHERE (
SELECT COUNT(storage_policy_index)
FROM policy_stat
) <= 1;
INSERT OR REPLACE INTO policy_stat (
storage_policy_index,
container_count,
object_count,
bytes_used
)
SELECT p.storage_policy_index,
c.count,
p.object_count,
p.bytes_used
FROM (
SELECT storage_policy_index,
COUNT(*) as count
FROM container
WHERE deleted = 0
GROUP BY storage_policy_index
) c
JOIN policy_stat p
ON p.storage_policy_index = c.storage_policy_index
WHERE NOT EXISTS(
SELECT changes() as change
FROM policy_stat
WHERE change <> 0
);
""")
conn.commit()
def _migrate_add_storage_policy_index(self, conn):
"""
Add the storage_policy_index column to the 'container' table and
set up triggers, creating the policy_stat table if needed.
:param conn: DB connection object
"""
try:
self.create_policy_stat_table(conn)
except sqlite3.OperationalError as err:
if 'table policy_stat already exists' not in str(err):
raise
conn.executescript('''
ALTER TABLE container
ADD COLUMN storage_policy_index INTEGER DEFAULT 0;
''' + POLICY_STAT_TRIGGER_SCRIPT)
| |
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Thomas Parslow http://almostobsolete.net/
# Copyright (c) 2012 Robie Basak <robie@justgohome.co.uk>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from __future__ import with_statement
from .exceptions import UploadArchiveError
from .job import Job
from .writer import compute_hashes_from_fileobj, resume_file_upload, Writer
from .concurrent import ConcurrentUploader
from .utils import minimum_part_size, DEFAULT_PART_SIZE
import os.path
_MEGABYTE = 1024 * 1024
_GIGABYTE = 1024 * _MEGABYTE
MAXIMUM_ARCHIVE_SIZE = 10000 * 4 * _GIGABYTE
MAXIMUM_NUMBER_OF_PARTS = 10000
class Vault(object):
DefaultPartSize = DEFAULT_PART_SIZE
SingleOperationThreshold = 100 * _MEGABYTE
ResponseDataElements = (('VaultName', 'name', None),
('VaultARN', 'arn', None),
('CreationDate', 'creation_date', None),
('LastInventoryDate', 'last_inventory_date', None),
('SizeInBytes', 'size', 0),
('NumberOfArchives', 'number_of_archives', 0))
def __init__(self, layer1, response_data=None):
self.layer1 = layer1
if response_data:
for response_name, attr_name, default in self.ResponseDataElements:
value = response_data[response_name]
if isinstance(value, unicode):
value = value.encode('utf8')
setattr(self, attr_name, value)
else:
for response_name, attr_name, default in self.ResponseDataElements:
setattr(self, attr_name, default)
def __repr__(self):
return 'Vault("%s")' % self.arn
def delete(self):
"""
Delete's this vault. WARNING!
"""
self.layer1.delete_vault(self.name)
def upload_archive(self, filename, description=None):
"""
Adds an archive to a vault. For archives greater than 100MB the
multipart upload will be used.
:type file: str
:param file: A filename to upload
:type description: str
:param description: An optional description for the archive.
:rtype: str
:return: The archive id of the newly created archive
"""
if os.path.getsize(filename) > self.SingleOperationThreshold:
return self.create_archive_from_file(filename, description=description)
return self._upload_archive_single_operation(filename, description)
def _upload_archive_single_operation(self, filename, description):
"""
Adds an archive to a vault in a single operation. It's recommended for
archives less than 100MB
:type file: str
:param file: A filename to upload
:type description: str
:param description: A description for the archive.
:rtype: str
:return: The archive id of the newly created archive
"""
with open(filename, 'rb') as fileobj:
linear_hash, tree_hash = compute_hashes_from_fileobj(fileobj)
fileobj.seek(0)
response = self.layer1.upload_archive(self.name, fileobj,
linear_hash, tree_hash,
description)
return response['ArchiveId']
def create_archive_writer(self, part_size=DefaultPartSize,
description=None):
"""
Create a new archive and begin a multi-part upload to it.
Returns a file-like object to which the data for the archive
can be written. Once all the data is written the file-like
object should be closed, you can then call the get_archive_id
method on it to get the ID of the created archive.
:type part_size: int
:param part_size: The part size for the multipart upload.
:type description: str
:param description: An optional description for the archive.
:rtype: :class:`boto.glacier.writer.Writer`
:return: A Writer object that to which the archive data
should be written.
"""
response = self.layer1.initiate_multipart_upload(self.name,
part_size,
description)
return Writer(self, response['UploadId'], part_size=part_size)
def create_archive_from_file(self, filename=None, file_obj=None,
description=None, upload_id_callback=None):
"""
Create a new archive and upload the data from the given file
or file-like object.
:type filename: str
:param filename: A filename to upload
:type file_obj: file
:param file_obj: A file-like object to upload
:type description: str
:param description: An optional description for the archive.
:type upload_id_callback: function
:param upload_id_callback: if set, call with the upload_id as the
only parameter when it becomes known, to enable future calls
to resume_archive_from_file in case resume is needed.
:rtype: str
:return: The archive id of the newly created archive
"""
part_size = self.DefaultPartSize
if not file_obj:
file_size = os.path.getsize(filename)
try:
part_size = minimum_part_size(file_size, part_size)
except ValueError:
raise UploadArchiveError("File size of %s bytes exceeds "
"40,000 GB archive limit of Glacier.")
file_obj = open(filename, "rb")
writer = self.create_archive_writer(
description=description,
part_size=part_size)
if upload_id_callback:
upload_id_callback(writer.upload_id)
while True:
data = file_obj.read(part_size)
if not data:
break
writer.write(data)
writer.close()
return writer.get_archive_id()
@staticmethod
def _range_string_to_part_index(range_string, part_size):
start, inside_end = [int(value) for value in range_string.split('-')]
end = inside_end + 1
length = end - start
if length == part_size + 1:
# Off-by-one bug in Amazon's Glacier implementation,
# see: https://forums.aws.amazon.com/thread.jspa?threadID=106866
# Workaround: since part_size is too big by one byte, adjust it
end -= 1
inside_end -= 1
length -= 1
assert not (start % part_size), (
"upload part start byte is not on a part boundary")
assert (length <= part_size), "upload part is bigger than part size"
return start // part_size
def resume_archive_from_file(self, upload_id, filename=None,
file_obj=None):
"""Resume upload of a file already part-uploaded to Glacier.
The resumption of an upload where the part-uploaded section is empty
is a valid degenerate case that this function can handle.
One and only one of filename or file_obj must be specified.
:type upload_id: str
:param upload_id: existing Glacier upload id of upload being resumed.
:type filename: str
:param filename: file to open for resume
:type fobj: file
:param fobj: file-like object containing local data to resume. This
must read from the start of the entire upload, not just from the
point being resumed. Use fobj.seek(0) to achieve this if necessary.
:rtype: str
:return: The archive id of the newly created archive
"""
part_list_response = self.list_all_parts(upload_id)
part_size = part_list_response['PartSizeInBytes']
part_hash_map = {}
for part_desc in part_list_response['Parts']:
part_index = self._range_string_to_part_index(
part_desc['RangeInBytes'], part_size)
part_tree_hash = part_desc['SHA256TreeHash'].decode('hex')
part_hash_map[part_index] = part_tree_hash
if not file_obj:
file_obj = open(filename, "rb")
return resume_file_upload(
self, upload_id, part_size, file_obj, part_hash_map)
def concurrent_create_archive_from_file(self, filename, description,
**kwargs):
"""
Create a new archive from a file and upload the given
file.
This is a convenience method around the
:class:`boto.glacier.concurrent.ConcurrentUploader`
class. This method will perform a multipart upload
and upload the parts of the file concurrently.
:type filename: str
:param filename: A filename to upload
:param kwargs: Additional kwargs to pass through to
:py:class:`boto.glacier.concurrent.ConcurrentUploader`.
You can pass any argument besides the ``api`` and
``vault_name`` param (these arguments are already
passed to the ``ConcurrentUploader`` for you).
:raises: `boto.glacier.exception.UploadArchiveError` is an error
occurs during the upload process.
:rtype: str
:return: The archive id of the newly created archive
"""
uploader = ConcurrentUploader(self.layer1, self.name, **kwargs)
archive_id = uploader.upload(filename, description)
return archive_id
def retrieve_archive(self, archive_id, sns_topic=None,
description=None):
"""
Initiate a archive retrieval job to download the data from an
archive. You will need to wait for the notification from
Amazon (via SNS) before you can actually download the data,
this takes around 4 hours.
:type archive_id: str
:param archive_id: The id of the archive
:type description: str
:param description: An optional description for the job.
:type sns_topic: str
:param sns_topic: The Amazon SNS topic ARN where Amazon Glacier
sends notification when the job is completed and the output
is ready for you to download.
:rtype: :class:`boto.glacier.job.Job`
:return: A Job object representing the retrieval job.
"""
job_data = {'Type': 'archive-retrieval',
'ArchiveId': archive_id}
if sns_topic is not None:
job_data['SNSTopic'] = sns_topic
if description is not None:
job_data['Description'] = description
response = self.layer1.initiate_job(self.name, job_data)
return self.get_job(response['JobId'])
def retrieve_inventory(self, sns_topic=None,
description=None):
"""
Initiate a inventory retrieval job to list the items in the
vault. You will need to wait for the notification from
Amazon (via SNS) before you can actually download the data,
this takes around 4 hours.
:type description: str
:param description: An optional description for the job.
:type sns_topic: str
:param sns_topic: The Amazon SNS topic ARN where Amazon Glacier
sends notification when the job is completed and the output
is ready for you to download.
:rtype: :class:`boto.glacier.job.Job`
:return: A Job object representing the retrieval job.
"""
job_data = {'Type': 'inventory-retrieval'}
if sns_topic is not None:
job_data['SNSTopic'] = sns_topic
if description is not None:
job_data['Description'] = description
response = self.layer1.initiate_job(self.name, job_data)
return response['JobId']
def delete_archive(self, archive_id):
"""
This operation deletes an archive from the vault.
:type archive_id: str
:param archive_id: The ID for the archive to be deleted.
"""
return self.layer1.delete_archive(self.name, archive_id)
def get_job(self, job_id):
"""
Get an object representing a job in progress.
:type job_id: str
:param job_id: The ID of the job
:rtype: :class:`boto.glacier.job.Job`
:return: A Job object representing the job.
"""
response_data = self.layer1.describe_job(self.name, job_id)
return Job(self, response_data)
def list_jobs(self, completed=None, status_code=None):
"""
Return a list of Job objects related to this vault.
:type completed: boolean
:param completed: Specifies the state of the jobs to return.
If a value of True is passed, only completed jobs will
be returned. If a value of False is passed, only
uncompleted jobs will be returned. If no value is
passed, all jobs will be returned.
:type status_code: string
:param status_code: Specifies the type of job status to return.
Valid values are: InProgress|Succeeded|Failed. If not
specified, jobs with all status codes are returned.
:rtype: list of :class:`boto.glacier.job.Job`
:return: A list of Job objects related to this vault.
"""
response_data = self.layer1.list_jobs(self.name, completed,
status_code)
return [Job(self, jd) for jd in response_data['JobList']]
def list_all_parts(self, upload_id):
"""Automatically make and combine multiple calls to list_parts.
Call list_parts as necessary, combining the results in case multiple
calls were required to get data on all available parts.
"""
result = self.layer1.list_parts(self.name, upload_id)
marker = result['Marker']
while marker:
additional_result = self.layer1.list_parts(
self.name, upload_id, marker=marker)
result['Parts'].extend(additional_result['Parts'])
marker = additional_result['Marker']
# The marker makes no sense in an unpaginated result, and clearing it
# makes testing easier. This also has the nice property that the result
# is a normal (but expanded) response.
result['Marker'] = None
return result
| |
import requests
import string
import datetime
import tkinter as tk
from tkinter import *
from tkinter.ttk import *
from PIL import Image, ImageTk
# TODO: TEMP ETC. PLOTTING FROM MATPLOTLIB
# TODO: ADD OPTION TO CHOOSE HOW MANY DAYS REPORT (using spinbox)
# TODO: CONVERT RADIO BUTTONS TO COMBOBOXES (they look nicer)
# TODO: ADD error/status bar at the bottom to display errors etc.
class WeatherApp(Tk):
def __init__(self):
super().__init__()
self.title("The Weather App")
self.config(bg="#CAE4D8", bd=4, relief="groove", highlightbackground="#CAE4D8", highlightcolor="#CAE4D8")
# self.wm_attributes("-transparentcolor", "#CAE4D8")
self.geometry("1000x800")
self.resizable(width=FALSE, height=FALSE)
self.var_units = StringVar(value="metric")
self.var_report_type = StringVar(value="weather")
self.var_status = StringVar(value="")
self.report_buttons = []
self.units_buttons = []
# self.attributes("-alpha", 0.7)
# Theme definition
self.style = Style()
self.style.theme_use('default')
# ('winnative', 'clam', 'alt', 'default', 'classic', 'vista', 'xpnative')
# Style definitions
# COLORS USED:
# morning sky: #CAE4D8
# honey: #DCAE1D
# cerulean: #00303F
# mist: #7A9D96
self.style.configure("my.TLabel", foreground="#00303F", background="#CAE4D8", borderwidth=0, relief="flat",
padding=0)
self.style.configure("my.TEntry", foreground="#00303F", borderwidth=2, relief="raised", padding=3)
self.style.configure("my.TButton", foreground="#00303F", background="#DCAE1D", borderwidth=2, relief="flat",
padding=2)
self.style.configure("clear.TButton", foreground="#00303F", background="#CAE4D8", borderwidth=0, relief="flat",
padding=(0, 0, 4, 0), width=1, anchor=CENTER)
self.style.map("clear.TButton", background=[("active", "#CAE4D8")], foreground=[("pressed", "#CAE4D8")])
self.style.configure("my.TFrame", background="#CAE4D8")
self.style.configure("my.TText", background="#7A9D96", fg="#DCAE1D")
# LAYOUT DESIGN
#main background image
# self.image = Image.open(r"Resources\Images\main_bg.jpg")
# self.image_conv = ImageTk.PhotoImage(self.image)
# self.main_bg_img = self.image_conv
# self.background_label = Label(self, image=self.main_bg_img)
# self.background_label.place(x=0, y=0, relwidth=1, relheight=1)
# LOCATION FRAME
self.loc_frame = Frame(self, style="my.TFrame")
# self.loc_frame.place(x=25, y=25)
self.loc_frame.grid(row=0, column=0, padx=(5, 5), pady=(10, 4), sticky=EW)
# location label
self.location_img = PhotoImage(file=r"Resources\Labels\location.png")
self.l1 = Label(self.loc_frame, text="Location name", style="my.TLabel", image=self.location_img,
compound=CENTER)
self.l1.grid(row=0, column=0, padx=4, pady=4, sticky=W)
# location entry
self.e1_val = StringVar()
self.e1 = tk.Entry(self.loc_frame, textvariable=self.e1_val, background="#CAE4D8", fg="#00303F", width=40)
# self.e1 = Entry(self.loc_frame, textvariable=self.e1_val, width=70, style="my.TEntry")
self.e1.focus()
self.e1.grid(row=0, column=1, padx=0, pady=4, sticky=NSEW)
# clear location text button
self.button_clear_normal_img = PhotoImage(file=r"Resources\Buttons\clear_entry_normal.png")
self.button_clear_hover_img = PhotoImage(file=r"Resources\Buttons\clear_entry_hover.png")
self.b3 = Button(self.loc_frame, text="X", image=self.button_clear_normal_img, compound=CENTER,
command=self.clear_loc_entry, style="clear.TButton")
self.b3.grid(row=0, column=2, sticky=W, padx=0, pady=0)
self.b3.bind("<Return>", self.clear_loc_entry)
self.b3.bind("<Enter>", self.hover_clear_button)
self.b3.bind("<Leave>", self.normal_clear_button)
# # MAIN DISPLAY FRAME
# self.main_frame = Frame(self.background_label, style="my.TFrame")
# self.main_frame.grid(row=1, column=0)
# MAIN DISPLAY AREA CANVAS
self.main_canvas = Canvas(self, bg="#DCAE1D", borderwidth=2, relief="groove", highlightbackground="#CAE4D8", highlightcolor="#CAE4D8")
self.main_canvas.grid(row=1, column=0, padx=(5, 5), pady=(2, 2), sticky=NSEW)
# self.image = Image.open(r"Resources\Images\canvas_bg.jpg")
# self.image_conv = ImageTk.PhotoImage(self.image)
# self.canvas_bg_img = self.image_conv
# self.main_canvas.create_image(0, 0, image=self.main_bg_img, anchor=NW)
self.image = Image.open(r"Resources\Labels\location.png")
self.image_conv = ImageTk.PhotoImage(self.image)
self.canvas_img_1 = self.image_conv
self.main_canvas.create_image(20, 20, image=self.canvas_img_1, anchor=NW)
canvas_text = self.main_canvas.create_text(20, 12, text="test text", fill="#DCAE1D", anchor=NW)
self.metric_button = Button(self.main_canvas, text="degC")
self.metric_button.place(x=380, y=100)
# # MAIN BUTTON FRAME
# self.button_frame = Frame(self, style="my.TFrame")
# # self.button_frame.place(x=200, y=25)
# self.button_frame.grid(row=0, column=4, padx=(25, 25), pady=(25, 4), sticky=NSEW)
#
# # get report button
# self.button_img_normal = PhotoImage(file=r"Resources\Labels\location.png")
# self.button_img_hover = PhotoImage(file=r"Resources\Buttons\blank_1_hover.png")
# self.b1 = Button(self.button_frame, image=self.button_img_normal, compound=CENTER, text="Get report",
# command=self.get_report, style="my.TButton")
# self.b1.bind("<Enter>", self.hover_button)
# self.b1.bind("<Leave>", self.normal_button)
# self.b1.grid(row=0, column=3, padx=0, sticky=W)
#
# # close app button
# self.b2 = Button(self.button_frame, text="Close", command=self.close_app, style="my.TButton")
# self.b2.grid(row=0, column=7, padx=4, sticky=E)
# # TEXT BOX FRAME
# self.text_frame = Frame(self, style="my.TFrame")
# self.text_frame.grid(row=1, column=0, padx=(25, 25), pady=(0, 25), sticky=W)
#
# # main text box
# self.t1 = Text(self.text_frame, state=DISABLED, height=20, width=55, borderwidth=5, bg="#7A9D96")
# self.t1.grid(row=0, rowspan=20, column=0, sticky=NSEW)
#
# # status / error bar
# self.l_status = Label(self.text_frame, textvariable=self.var_status, width=30, style="my.TLabel")
# self.l_status.grid(row=21, column=0, pady=4, sticky=W)
# # SELECTION FRAME
# self.selection_frame = Frame(self, style="my.TFrame")
# self.selection_frame.grid(row=1, column=4, padx=4, sticky=N)
#
# #["weather", "forecast", "forecast/daily"] REPORT TYPE SECTION
# self.l4 = Label(self.selection_frame, text="Report type", style="my.TLabel")
# self.l4.grid(row=2, column=6, columnspan=2, sticky=N)
#
# # report type radio button
# for row, name, value in zip(range(3, 6), ["current", "5 days every 3 hours", "7 days, daily"],
# ):
# self.report_buttons.append(
# Radiobutton(self.selection_frame, text=name, variable=self.var_report_type, value=value,
# command=self.set_report_type))
# self.report_buttons[row - 3].grid(row=row, column=7, pady=4, sticky=W)
#
# # UNITS TYPE SECTION
# self.l5 = Label(self.selection_frame, text="Units type", style="my.TLabel")
# self.l5.grid(row=6, column=6, columnspan=2, sticky=N)
#
# # units type radio button
# for row, name, value in zip(range(7, 9), ["metric", "imperial"], ["metric", "imperial"]):
# self.units_buttons.append(Radiobutton(self.selection_frame, text=name, variable=self.var_units, value=value,
# command=self.set_units))
# self.units_buttons[row - 7].grid(row=row, column=7, pady=4, sticky=W)
self.bind("<Return>", self.get_report)
def clear_loc_entry(self, *args):
self.e1.delete(0, END)
self.e1.focus()
def hover_button(self, *args):
self.b1.configure(image=self.button_img_hover)
def normal_button(self, *args):
self.b1.configure(image=self.button_img_normal)
def hover_clear_button(self, *args):
self.b3.configure(image=self.button_clear_hover_img)
def normal_clear_button(self, *args):
self.b3.configure(image=self.button_clear_normal_img)
def set_report_type(self):
pass
def set_units(self):
pass
def get_report(self, *args):
""" Obtain json weather report"""
self.w_d = {}
response = {}
api_key = "fa730d41d41ae83226a227a150d927ac"
base_url = "http://api.openweathermap.org/data/2.5/{0}?q={1}{2}&APPID="
punctuation = string.punctuation
translator = str.maketrans('', '', punctuation)
location = self.e1_val.get()
location = location.translate(translator)
if self.var_units.get() == "":
units_prefix = ""
else:
units_prefix = "&units="
try:
response = requests.get(base_url.format(self.var_report_type.get(), location,
units_prefix + self.var_units.get()) + api_key)
except requests.exceptions.ConnectionError:
self.var_status.set("Unable to establish connection. Please connect to the internet")
return
self.w_d = response.json()
# had to add int(w_d["cod]) as the output from API is int (for current) or string (for longer forecasts)
if int(self.w_d["cod"]) != 200:
self.t1.config(state=NORMAL)
self.t1.delete(1.0, END)
self.t1.config(state=DISABLED)
self.var_status.set("Error: {0}, {1}".format(self.w_d["cod"], self.w_d["message"]))
else:
self.display_report()
def display_report(self):
""" Displays report converting data from self.w_d dictionary.
Unfortunately the API has major differences in data structure for each type of the
forecast and one method of extracting data for each cell does not work."""
# clean the error/status bar
self.var_status.set("")
self.time_conv = datetime.datetime.fromtimestamp
if self.var_units.get() == "metric":
self.temp_unit = "degC"
self.speed_unit = "m/s"
elif self.var_units.get() == "imperial":
self.temp_unit = "degF"
self.speed_unit = "mile/hr"
# Current weather report
if self.var_report_type.get() == "weather":
self.t1.config(state=NORMAL)
self.t1.delete(1.0, END)
self.t1.insert(END, ("Weather report for: {0}, {1}, lon: {2}, lat: {3}\n".
format(self.w_d["name"], self.w_d["sys"]["country"],
self.w_d["coord"]["lon"], self.w_d["coord"]["lat"])))
self.t1.insert(END, "Weather type: {0}, {1}\n".format(self.w_d["weather"][0]["main"].lower(),
self.w_d["weather"][0]["description"]))
self.t1.insert(END, "Cloud coverage: {0}%\n".format(self.w_d["clouds"]["all"]))
self.t1.insert(END, "Current temperature: {0} {1}\n".format(self.w_d["main"]["temp"], self.temp_unit))
self.t1.insert(END, "Current minimum temperature: {0} {1}\n".format(self.w_d["main"]['temp_min'],
self.temp_unit))
self.t1.insert(END, "Current maximum temperature: {0} {1}\n".format(self.w_d["main"]['temp_max'],
self.temp_unit))
self.t1.insert(END, "Pressure: {0} hPa\n".format(self.w_d["main"]["pressure"]))
self.t1.insert(END, "Humidity: {0}%\n".format(self.w_d["main"]["humidity"]))
self.t1.insert(END, "Visibility: {0} m\n".format(self.w_d["visibility"]))
self.t1.insert(END, "Wind speed: {0} {1}\n".format(self.w_d["wind"]["speed"], self.speed_unit))
self.t1.insert(END, "Wind direction: {0} deg\n".format(self.w_d["wind"]["deg"]))
for name in ["rain", "snow"]:
try:
self.t1.insert(END,
"{0} volume for last 3 hours: {1:.4} mm".format(name.title(), self.w_d[name]["3h"]))
except KeyError:
pass
self.t1.insert(END, "Sunrise at: {0}\n".format(self.time_conv(self.w_d["sys"]["sunrise"]).
strftime("%H:%M")))
self.t1.insert(END, "Sunset at: {0}\n".format(self.time_conv(self.w_d["sys"]["sunset"]).strftime("%H:%M")))
self.t1.config(state=DISABLED)
# 5 days / 3 hrs report
elif self.var_report_type.get() == "forecast":
self.t1.config(state=NORMAL)
self.t1.delete(1.0, END)
self.t1.insert(END, ("Weather report for: {0}, {1}, lon: {2}, lat: {3}\n\n".
format(self.w_d["city"]["name"], self.w_d["city"]["country"],
self.w_d["city"]["coord"]["lon"], self.w_d["city"]["coord"]["lat"])))
for item in self.w_d["list"]:
self.t1.insert(END, "Forecast at: {0}\n".format(item["dt_txt"]))
self.t1.insert(END, "Weather type: {0}, {1}\n".format(item["weather"][0]["main"].lower(),
item["weather"][0]["description"]))
self.t1.insert(END, "Cloud coverage: {0}%\n".format(item["clouds"]["all"]))
self.t1.insert(END, "Temperature: {0} {1}\n".format(item["main"]["temp"], self.temp_unit))
self.t1.insert(END, "Minimum temperature: {0} {1}\n".format(item["main"]['temp_min'], self.temp_unit))
self.t1.insert(END, "Maximum temperature: {0} {1}\n".format(item["main"]['temp_max'], self.temp_unit))
self.t1.insert(END, "Pressure: {0} hPa\n".format(item["main"]["pressure"]))
self.t1.insert(END, "Humidity: {0}%\n".format(item["main"]["humidity"]))
self.t1.insert(END, "Wind speed: {0} {1}\n".format(item["wind"]["speed"], self.speed_unit))
self.t1.insert(END, "Wind direction: {0} deg\n".format(item["wind"]["deg"]))
for name in ["rain", "snow"]:
try:
self.t1.insert(END,
"{0} volume for last 3 hours: {1:.4} mm".format(name.title(), item[name]["3h"]))
except KeyError:
pass
self.t1.insert(END, "\n\n")
self.t1.config(state=DISABLED)
# 16 days / daily report
else:
self.t1.config(state=NORMAL)
self.t1.delete(1.0, END)
self.t1.insert(END, ("Weather report for: {0}, {1}, lon: {2}, lat: {3}\n\n".
format(self.w_d["city"]["name"], self.w_d["city"]["country"],
self.w_d["city"]["coord"]["lon"], self.w_d["city"]["coord"]["lat"])))
for item in self.w_d["list"]:
self.t1.insert(END,
"Forecast on: {0}\n".format(self.time_conv(item["dt"]).strftime("%d/%m/%Y at %H:%M")))
self.t1.insert(END, "Weather type: {0}, {1}\n".format(item["weather"][0]["main"].lower(),
item["weather"][0]["description"]))
self.t1.insert(END, "Cloud coverage: {0}%\n".format(item["clouds"]))
self.t1.insert(END, "\nTemperatures during the day:\n")
for name, temp_type in zip(["morning", "day", "evening", "night", "minimum", "maximum"],
["morn", "day", "eve", "night", "min", "max"]):
self.t1.insert(END, "\t{0} {1} {2}\n".format(name, item["temp"][temp_type], self.temp_unit))
self.t1.insert(END, "\nPressure: {0} hPa\n".format(item["pressure"]))
self.t1.insert(END, "Humidity: {0}%\n".format(item["humidity"]))
self.t1.insert(END, "Wind speed: {0} {1}\n".format(item["speed"], self.speed_unit))
self.t1.insert(END, "Wind direction: {0} deg\n".format(item["deg"]))
for name in ["rain", "snow"]:
try:
self.t1.insert(END, "{0} volume for last 3 hours: {1:.4} mm".format(name.title(), item[name]))
except KeyError:
pass
self.t1.insert(END, "\n\n")
self.t1.config(state=DISABLED)
def close_app(self):
self.destroy()
app = WeatherApp()
app.mainloop()
| |
# !/usr/bin/env python
# coding=utf-8
"""
Calculates the proton dissociation constant (PKA) for the given free energy
data for a set of coordinates.
* Input is rad_PMF (use corr col (3))
** Plan for standard WHAM, too.
* Find local max (inc, then dec) (two back) (middle val is tgt);
in the algorithm below, I will call the coordinate points
r_i-1 r_i, and r_i+1 (middle point, r_i, is the tgt);
and for energy, corr_i-1, corr_i, and coor_i+1
* Up to local max, do math and add to sum
** Will need to calculate the spacing between coordinate values
(called delta_r); usually this will be a constant number, but
it is not in the case of your sample data because I deleted
points. You can certainly calculate this every step if you wish,
so we don't have to count on equal spacing; the calculation
can be delta_r = r_i+1 - r_i
** we will need pi
** a new constant we can call inv_C_0 (that's a zero) = 1660.0
(it's units are Angstrom ^ 3 / molecule )
** will will need kBT (you calculated this before, in wham_rad;
you can have the user enter the temp. The temp will be the
same as used in wham_rad and in making the wham input line
** sum_for_pka += 4.0 * pi * r_i ** 2 * math.exp( -corr_i / kBT ) * delta_r
** pKa = - math.log10 ( inv_C_0 / sum_for_pka )
* Result is PKA: out to stdout
* Debug out local max value
"""
from __future__ import print_function
import logging
import math
import argparse
import os
import sys
import numpy as np
from md_utils.md_common import (find_files_by_dir,
read_csv, write_csv, calc_kbt, create_out_fname, warning, allow_write)
from md_utils.wham import FREE_KEY, CORR_KEY, COORD_KEY
__author__ = 'mayes'
# Logging #
# logging.basicConfig(filename='fes_combo.log',level=logging.DEBUG)
# logging.basicConfig(level=logging.DEBUG)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('calc_pka')
# Error Codes
# The good status code
GOOD_RET = 0
INPUT_ERROR = 1
IO_ERROR = 2
# Constants #
OUT_PFX = 'pKa.'
# Inverse of the standard concentration in Angstrom ^ 3 / molecule
inv_C_0 = 1660.0
# Defaults #
DEF_FILE_PAT = 'rad_PMF*'
SRC_KEY = 'source_file'
PKA_KEY = 'pKa'
MAX_LOC = 'max_loc'
MAX_VAL = 'max_val'
OUT_KEY_SEQ = [SRC_KEY, PKA_KEY, MAX_LOC, MAX_VAL]
KEY_CONV = {FREE_KEY: float,
CORR_KEY: float,
COORD_KEY: float,
MAX_LOC: float,
MAX_VAL: float}
NO_MAX_RET = np.nan
NO_MAX_ERR = "No local max found"
# Exceptions #
class NoMaxError(Exception):
pass
# Logic #
def write_result(result, src_file, overwrite=False, basedir=None):
"""Writes the result to a file named for the given source file.
:param result: The result to write.
:param src_file: The original source file name.
:param overwrite: Whether to overwrite an existing file name.
:param basedir: The base directory to target (uses the source file's base directory
if not specified)
"""
f_name = create_out_fname(src_file, prefix=OUT_PFX, base_dir=basedir)
if allow_write(f_name, overwrite=overwrite):
write_csv(result, f_name, OUT_KEY_SEQ)
def calc_pka(file_data, kbt, coord_ts=None):
"""Calculates the proton dissociation constant (PKA) for the given free energy data.
:param file_data: The list of dicts to process.
:param kbt: The experimental temperature multiplied by Boltzmann's Constant.
:param coord_ts: specified user parameter; integrate to this coordinate value
:return: The PKA for the given data set or an error string if no local max is found.
"""
sum_for_pka = 0.0
data_len = len(file_data)
last_idx = data_len - 1
for i in range(data_len):
if i == last_idx:
raise NoMaxError(NO_MAX_ERR)
cur_coord = file_data[i][COORD_KEY]
cur_corr = file_data[i][CORR_KEY]
if math.isinf(cur_corr):
continue
delta_r = file_data[i + 1][COORD_KEY] - cur_coord
sum_for_pka += 4.0 * math.pi * cur_coord ** 2 * math.exp(-cur_corr / kbt) * delta_r
if i == 0:
continue
if coord_ts is None:
if cur_corr > file_data[i - 1][CORR_KEY] and cur_corr > file_data[i + 1][CORR_KEY]:
logger.info("Found local max '{:8.3f}' at coordinate '{:8.3f}'".format(cur_corr, cur_coord))
return -math.log10(inv_C_0 / sum_for_pka), cur_corr, cur_coord
else:
if cur_coord >= coord_ts:
logger.info("Integrating to input TS coordinate '{:8.3f}' with value '{:8.3f}'".format(cur_coord,
cur_corr))
return -math.log10(inv_C_0 / sum_for_pka), cur_corr, cur_coord
# CLI Processing #
def parse_cmdline(argv):
"""
Returns the parsed argument list and return code.
:param argv: is a list of arguments, or `None` for ``sys.argv[1:]``.
"""
if argv is None:
argv = sys.argv[1:]
# initialize the parser object:
parser = argparse.ArgumentParser(description='Calculates the proton dissociation constant '
'(PKA) for the given radially-corrected free '
'energy data for a set of coordinates.')
parser.add_argument("-d", "--base_dir", help="The starting point for a file search "
"(defaults to current directory)",
default=os.getcwd())
parser.add_argument("-f", "--src_file", help="The single file to read from (takes precedence "
"over base_dir)")
parser.add_argument('-p', "--pattern", help="The file pattern to search for "
"(defaults to '{}')".format(DEF_FILE_PAT),
default=DEF_FILE_PAT)
parser.add_argument('-o', "--overwrite", help='Overwrite existing target file',
action='store_true')
parser.add_argument('-c', "--coord_ts", help='Manually entered coordinate of TS. '
'Used in place of first local maximum.',
type=float)
parser.add_argument("temp", help="The temperature in Kelvin for the simulation", type=float)
try:
args = parser.parse_args(argv)
except SystemExit as e:
warning(e)
parser.print_help()
return [], INPUT_ERROR
return args, GOOD_RET
def main(argv=None):
""" Runs the main program.
:param argv: The command line arguments.
:return: The return code for the program's termination.
"""
args, ret = parse_cmdline(argv)
if ret != GOOD_RET:
return ret
kbt = calc_kbt(args.temp)
if args.coord_ts is not None:
logger.info("Read TS coordinate value: '{:8.3f}'".format(args.coord_ts))
try:
if args.src_file is not None:
file_data = read_csv(args.src_file, data_conv=KEY_CONV)
f_base_name = os.path.basename(args.src_file)
try:
pka, cur_corr, cur_coord = calc_pka(file_data, kbt, args.coord_ts)
result = [{SRC_KEY: f_base_name, PKA_KEY: pka, MAX_VAL: cur_corr, MAX_LOC: cur_coord}]
except NoMaxError:
result = [{SRC_KEY: f_base_name, PKA_KEY: NO_MAX_RET, MAX_VAL: NO_MAX_RET, MAX_LOC: NO_MAX_RET}]
write_result(result, args.src_file, args.overwrite)
else:
found_files = find_files_by_dir(args.base_dir, args.pattern)
logger.debug("Found '{}' dirs with files to process".format(len(found_files)))
if len(found_files) == 0:
raise IOError("No files found in specified directory '{}'".format(args.base_dir))
for f_dir, files in found_files.items():
results = []
for pmf_path, fname in ([(os.path.join(f_dir, tgt), tgt) for tgt in sorted(files)]):
file_data = read_csv(pmf_path, data_conv=KEY_CONV)
try:
pka, cur_corr, cur_coord = calc_pka(file_data, kbt, args.coord_ts)
results.append({SRC_KEY: fname, PKA_KEY: pka, MAX_VAL: cur_corr, MAX_LOC: cur_coord})
except NoMaxError:
results.append({SRC_KEY: fname, PKA_KEY: NO_MAX_RET, MAX_VAL: NO_MAX_RET,
MAX_LOC: NO_MAX_RET})
write_result(results, os.path.basename(f_dir), args.overwrite,
basedir=os.path.dirname(f_dir))
except IOError as e:
warning(e)
return IO_ERROR
return GOOD_RET # success
if __name__ == '__main__':
status = main()
sys.exit(status)
| |
# Copyright (c) 2014 Hitachi Data Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
import os
import paramiko
import time
from oslo_concurrency import processutils as putils
from cinder import exception
from cinder import test
from cinder import utils
from cinder.volume.drivers.hitachi import hnas_backend
evsfs_list = "\n\
FS ID FS Label FS Permanent ID EVS ID EVS Label\n\
----- ----------- ------------------ ------ ---------\n\
1026 gold 0xaadee0e035cfc0b7 1 EVS-Manila\n\
1029 test_hdp 0xaadee09634acfcac 1 EVS-Manila\n\
1030 fs-cinder 0xaadfcf742fba644e 2 EVS-Cinder\n\
1031 cinder2 0xaadfcf7e0769a6bc 3 EVS-Test\n\
1024 fs02-husvm 0xaac8715e2e9406cd 3 EVS-Test\n\
\n"
cluster_getmac = "cluster MAC: 83-68-96-AA-DA-5D"
version = "\n\
Model: HNAS 4040 \n\n\
Software: 11.2.3319.14 (built 2013-09-19 12:34:24+01:00) \n\n\
Hardware: NAS Platform (M2SEKW1339109) \n\n\
board MMB1 \n\
mmb 11.2.3319.14 release (2013-09-19 12:34:24+01:00)\n\n\
board MFB1 \n\
mfb1hw MB v0883 WL v002F TD v002F FD v002F TC v0059 \
RY v0059 TY v0059 IC v0059 WF v00E2 FS v00E2 OS v00E2 \
WD v00E2 DI v001A FC v0002 \n\
Serial no B1339745 (Thu Jan 1 00:00:50 2009) \n\n\
board MCP \n\
Serial no B1339109 (Thu Jan 1 00:00:49 2009) \n\
\n"
evsipaddr = "\n\
EVS Type Label IP Address Mask Port \n\
---------- --------------- ------------------ --------------- ------\n\
admin hnas4040 192.0.2.2 255.255.255.0 eth1 \n\
admin hnas4040 172.24.44.15 255.255.255.0 eth0 \n\
evs 1 EVSTest1 172.24.44.20 255.255.255.0 ag1 \n\
evs 1 EVSTest1 10.0.0.20 255.255.255.0 ag1 \n\
evs 2 EVSTest2 172.24.44.21 255.255.255.0 ag1 \n\
\n"
df_f = "\n\
ID Label EVS Size Used Snapshots Deduped Avail \
Thin ThinSize ThinAvail FS Type\n\
---- ---------- --- ------ ------------ --------- ------- ------------ \
---- -------- --------- --------------------\n\
1025 fs-cinder 2 250 GB 21.4 GB (9%) 0 B (0%) NA 228 GB (91%) \
No 32 KB,WFS-2,128 DSBs\n\
\n"
df_f_tb = "\n\
ID Label EVS Size Used Snapshots Deduped Avail \
Thin ThinSize ThinAvail FS Type\n\
---- ---------- --- ------ ------------ --------- ------- ------------ \
---- -------- --------- --------------------\n\
1025 fs-cinder 2 250 TB 21.4 TB (9%) 0 B (0%) NA 228 TB (91%) \
No 32 KB,WFS-2,128 DSBs\n\
\n"
nfs_export = "\n\
Export name: /export01-husvm \n\
Export path: /export01-husvm \n\
File system label: fs-cinder \n\
File system size: 250 GB \n\
File system free space: 228 GB \n\
File system state: \n\
formatted = Yes \n\
mounted = Yes \n\
failed = No \n\
thin provisioned = No \n\
Access snapshots: Yes \n\
Display snapshots: Yes \n\
Read Caching: Disabled \n\
Disaster recovery setting: \n\
Recovered = No \n\
Transfer setting = Use file system default \n\n\
Export configuration: \n\
127.0.0.1 \n\
\n"
iscsi_one_target = "\n\
Alias : cinder-default \n\
Globally unique name: iqn.2014-12.10.10.10.10:evstest1.cinder-default \n\
Comment : \n\
Secret : pxr6U37LZZJBoMc \n\
Authentication : Enabled \n\
Logical units : No logical units. \n\
\n\
LUN Logical Unit \n\
---- -------------------------------- \n\
0 cinder-lu \n\
1 volume-99da7ae7-1e7f-4d57-8bf... \n\
\n\
Access configuration: \n\
"
df_f_single_evs = "\n\
ID Label Size Used Snapshots Deduped Avail \
Thin ThinSize ThinAvail FS Type\n\
---- ---------- ------ ------------ --------- ------- ------------ \
---- -------- --------- --------------------\n\
1025 fs-cinder 250 GB 21.4 GB (9%) 0 B (0%) NA 228 GB (91%) \
No 32 KB,WFS-2,128 DSBs\n\
\n"
nfs_export_tb = "\n\
Export name: /export01-husvm \n\
Export path: /export01-husvm \n\
File system label: fs-cinder \n\
File system size: 250 TB \n\
File system free space: 228 TB \n\
\n"
nfs_export_not_available = "\n\
Export name: /export01-husvm \n\
Export path: /export01-husvm \n\
File system label: fs-cinder \n\
*** not available *** \n\
\n"
evs_list = "\n\
Node EVS ID Type Label Enabled Status IP Address Port \n\
---- ------ ------- --------------- ------- ------ ------------------- ---- \n\
1 Cluster hnas4040 Yes Online 192.0.2.200 eth1 \n\
1 0 Admin hnas4040 Yes Online 192.0.2.2 eth1 \n\
172.24.44.15 eth0 \n\
172.24.49.101 ag2 \n\
1 1 Service EVS-Manila Yes Online 172.24.49.32 ag2 \n\
172.24.48.32 ag4 \n\
1 2 Service EVS-Cinder Yes Online 172.24.49.21 ag2 \n\
1 3 Service EVS-Test Yes Online 192.168.100.100 ag2 \n\
\n"
iscsilu_list = "Name : cinder-lu \n\
Comment: \n\
Path : /.cinder/cinder-lu.iscsi \n\
Size : 2 GB \n\
File System : fs-cinder \n\
File System Mounted : YES \n\
Logical Unit Mounted: No"
iscsilu_list_tb = "Name : test-lu \n\
Comment: \n\
Path : /.cinder/test-lu.iscsi \n\
Size : 2 TB \n\
File System : fs-cinder \n\
File System Mounted : YES \n\
Logical Unit Mounted: No"
hnas_fs_list = "%(l1)s\n\n%(l2)s\n\n " % {'l1': iscsilu_list,
'l2': iscsilu_list_tb}
add_targetsecret = "Target created successfully."
iscsi_target_list = "\n\
Alias : cinder-GoldIsh\n\
Globally unique name: iqn.2015-06.10.10.10.10:evstest1.cinder-goldish\n\
Comment :\n\
Secret : None\n\
Authentication : Enabled\n\
Logical units : No logical units.\n\
Access configuration :\n\
\n\
Alias : cinder-default\n\
Globally unique name: iqn.2014-12.10.10.10.10:evstest1.cinder-default\n\
Comment :\n\
Secret : pxr6U37LZZJBoMc\n\
Authentication : Enabled\n\
Logical units : Logical units :\n\
\n\
LUN Logical Unit\n\
---- --------------------------------\n\
0 cinder-lu\n\
1 volume-99da7ae7-1e7f-4d57-8bf...\n\
\n\
Access configuration :\n\
"
backend_opts = {'mgmt_ip0': '0.0.0.0',
'cluster_admin_ip0': None,
'ssh_port': '22',
'username': 'supervisor',
'password': 'supervisor',
'ssh_private_key': 'test_key'}
target_chap_disable = "\n\
Alias : cinder-default \n\
Globally unique name: iqn.2014-12.10.10.10.10:evstest1.cinder-default \n\
Comment : \n\
Secret : \n\
Authentication : Disabled \n\
Logical units : No logical units. \n\
\n\
LUN Logical Unit \n\
---- -------------------------------- \n\
0 cinder-lu \n\
1 volume-99da7ae7-1e7f-4d57-8bf... \n\
\n\
Access configuration: \n\
"
file_clone_stat = "Clone: /nfs_cinder/cinder-lu \n\
SnapshotFile: FileHandle[00000000004010000d20116826ffffffffffffff] \n\
\n\
SnapshotFile: FileHandle[00000000004029000d81f26826ffffffffffffff] \n\
"
file_clone_stat_snap_file1 = "\
FileHandle[00000000004010000d20116826ffffffffffffff] \n\n\
References: \n\
Clone: /nfs_cinder/cinder-lu \n\
Clone: /nfs_cinder/snapshot-lu-1 \n\
Clone: /nfs_cinder/snapshot-lu-2 \n\
"
file_clone_stat_snap_file2 = "\
FileHandle[00000000004010000d20116826ffffffffffffff] \n\n\
References: \n\
Clone: /nfs_cinder/volume-not-used \n\
Clone: /nfs_cinder/snapshot-1 \n\
Clone: /nfs_cinder/snapshot-2 \n\
"
not_a_clone = "\
file-clone-stat: failed to get predecessor snapshot-files: File is not a clone"
class HDSHNASBackendTest(test.TestCase):
def __init__(self, *args, **kwargs):
super(HDSHNASBackendTest, self).__init__(*args, **kwargs)
def setUp(self):
super(HDSHNASBackendTest, self).setUp()
self.hnas_backend = hnas_backend.HNASSSHBackend(backend_opts)
def test_run_cmd(self):
self.mock_object(os.path, 'isfile',
mock.Mock(return_value=True))
self.mock_object(utils, 'execute')
self.mock_object(time, 'sleep')
self.mock_object(paramiko, 'SSHClient')
self.mock_object(paramiko.RSAKey, 'from_private_key_file')
self.mock_object(putils, 'ssh_execute',
mock.Mock(return_value=(df_f, '')))
out, err = self.hnas_backend._run_cmd('ssh', '0.0.0.0',
'supervisor', 'supervisor',
'df', '-a')
self.assertIn('fs-cinder', out)
self.assertIn('WFS-2,128 DSBs', out)
def test_run_cmd_retry_exception(self):
self.hnas_backend.cluster_admin_ip0 = '172.24.44.11'
exceptions = [putils.ProcessExecutionError(stderr='Connection reset'),
putils.ProcessExecutionError(stderr='Failed to establish'
' SSC connection'),
putils.ProcessExecutionError(stderr='Connection reset'),
putils.ProcessExecutionError(stderr='Connection reset'),
putils.ProcessExecutionError(stderr='Connection reset')]
self.mock_object(os.path, 'isfile',
mock.Mock(return_value=True))
self.mock_object(utils, 'execute')
self.mock_object(time, 'sleep')
self.mock_object(paramiko, 'SSHClient')
self.mock_object(paramiko.RSAKey, 'from_private_key_file')
self.mock_object(putils, 'ssh_execute',
mock.Mock(side_effect=exceptions))
self.assertRaises(exception.HNASConnError, self.hnas_backend._run_cmd,
'ssh', '0.0.0.0', 'supervisor', 'supervisor', 'df',
'-a')
def test_run_cmd_exception_without_retry(self):
self.mock_object(os.path, 'isfile',
mock.Mock(return_value=True))
self.mock_object(utils, 'execute')
self.mock_object(time, 'sleep')
self.mock_object(paramiko, 'SSHClient')
self.mock_object(paramiko.RSAKey, 'from_private_key_file')
self.mock_object(putils, 'ssh_execute',
mock.Mock(side_effect=putils.ProcessExecutionError
(stderr='Error')))
self.assertRaises(putils.ProcessExecutionError,
self.hnas_backend._run_cmd, 'ssh', '0.0.0.0',
'supervisor', 'supervisor', 'df', '-a')
def test_get_targets_empty_list(self):
self.mock_object(self.hnas_backend, '_run_cmd',
mock.Mock(return_value=('No targets', '')))
out = self.hnas_backend._get_targets('2')
self.assertEqual([], out)
def test_get_targets_not_found(self):
self.mock_object(self.hnas_backend, '_run_cmd',
mock.Mock(return_value=(iscsi_target_list, '')))
out = self.hnas_backend._get_targets('2', 'fake-volume')
self.assertEqual([], out)
def test__get_unused_luid_number_0(self):
tgt_info = {
'alias': 'cinder-default',
'secret': 'pxr6U37LZZJBoMc',
'iqn': 'iqn.2014-12.10.10.10.10:evstest1.cinder-default',
'lus': [
{'id': '1',
'name': 'cinder-lu2'},
{'id': '2',
'name': 'volume-test2'}
],
'auth': 'Enabled'
}
out = self.hnas_backend._get_unused_luid(tgt_info)
self.assertEqual(0, out)
def test__get_unused_no_luns(self):
tgt_info = {
'alias': 'cinder-default',
'secret': 'pxr6U37LZZJBoMc',
'iqn': 'iqn.2014-12.10.10.10.10:evstest1.cinder-default',
'lus': [],
'auth': 'Enabled'
}
out = self.hnas_backend._get_unused_luid(tgt_info)
self.assertEqual(0, out)
def test_get_version(self):
expected_out = {
'hardware': 'NAS Platform (M2SEKW1339109)',
'mac': '83-68-96-AA-DA-5D',
'version': '11.2.3319.14',
'model': 'HNAS 4040',
'serial': 'B1339745'
}
self.mock_object(self.hnas_backend, '_run_cmd',
mock.Mock(side_effect=[
(cluster_getmac, ''),
(version, '')]))
out = self.hnas_backend.get_version()
self.assertEqual(expected_out, out)
def test_get_evs(self):
self.mock_object(self.hnas_backend, '_run_cmd',
mock.Mock(return_value=(evsfs_list, '')))
out = self.hnas_backend.get_evs('fs-cinder')
self.assertEqual('2', out)
def test_get_export_list(self):
self.mock_object(self.hnas_backend, '_run_cmd',
mock.Mock(side_effect=[(nfs_export, ''),
(evsfs_list, ''),
(evs_list, '')]))
out = self.hnas_backend.get_export_list()
self.assertEqual('fs-cinder', out[0]['fs'])
self.assertEqual(250.0, out[0]['size'])
self.assertEqual(228.0, out[0]['free'])
self.assertEqual('/export01-husvm', out[0]['path'])
def test_get_export_list_data_not_available(self):
self.mock_object(self.hnas_backend, '_run_cmd',
mock.Mock(side_effect=[(nfs_export_not_available, ''),
(evsfs_list, ''),
(evs_list, '')]))
out = self.hnas_backend.get_export_list()
self.assertEqual('fs-cinder', out[0]['fs'])
self.assertEqual('/export01-husvm', out[0]['path'])
self.assertEqual(-1, out[0]['size'])
self.assertEqual(-1, out[0]['free'])
def test_get_export_list_tb(self):
size = float(250 * 1024)
free = float(228 * 1024)
self.mock_object(self.hnas_backend, '_run_cmd',
mock.Mock(side_effect=[(nfs_export_tb, ''),
(evsfs_list, ''),
(evs_list, '')]))
out = self.hnas_backend.get_export_list()
self.assertEqual('fs-cinder', out[0]['fs'])
self.assertEqual(size, out[0]['size'])
self.assertEqual(free, out[0]['free'])
self.assertEqual('/export01-husvm', out[0]['path'])
def test_file_clone(self):
path1 = '/.cinder/path1'
path2 = '/.cinder/path2'
self.mock_object(self.hnas_backend, '_run_cmd',
mock.Mock(return_value=(evsfs_list, '')))
self.hnas_backend.file_clone('fs-cinder', path1, path2)
calls = [mock.call('evsfs', 'list'), mock.call('console-context',
'--evs', '2',
'file-clone-create',
'-f', 'fs-cinder',
path1, path2)]
self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False)
def test_file_clone_wrong_fs(self):
self.mock_object(self.hnas_backend, '_run_cmd',
mock.Mock(return_value=(evsfs_list, '')))
self.assertRaises(exception.InvalidParameterValue,
self.hnas_backend.file_clone, 'fs-fake', 'src',
'dst')
def test_get_evs_info(self):
expected_out = {'evs_number': '1'}
expected_out2 = {'evs_number': '2'}
self.mock_object(self.hnas_backend, '_run_cmd',
mock.Mock(return_value=(evsipaddr, '')))
out = self.hnas_backend.get_evs_info()
self.hnas_backend._run_cmd.assert_called_with('evsipaddr', '-l')
self.assertEqual(expected_out, out['10.0.0.20'])
self.assertEqual(expected_out, out['172.24.44.20'])
self.assertEqual(expected_out2, out['172.24.44.21'])
def test_get_fs_info(self):
self.mock_object(self.hnas_backend, '_run_cmd',
mock.Mock(side_effect=[(df_f, ''),
(evsfs_list, ''),
(hnas_fs_list, '')]))
out = self.hnas_backend.get_fs_info('fs-cinder')
self.assertEqual('2', out['evs_id'])
self.assertEqual('fs-cinder', out['label'])
self.assertEqual('228', out['available_size'])
self.assertEqual('250', out['total_size'])
self.assertEqual(2050.0, out['provisioned_capacity'])
def test_get_fs_empty_return(self):
self.mock_object(self.hnas_backend, '_run_cmd',
mock.Mock(return_value=('Not mounted', '')))
out = self.hnas_backend.get_fs_info('fs-cinder')
self.assertEqual({}, out)
def test_get_fs_info_single_evs(self):
self.mock_object(self.hnas_backend, '_run_cmd',
mock.Mock(side_effect=[(df_f_single_evs, ''),
(evsfs_list, ''),
(hnas_fs_list, '')]))
out = self.hnas_backend.get_fs_info('fs-cinder')
self.assertEqual('fs-cinder', out['label'])
self.assertEqual('228', out['available_size'])
self.assertEqual('250', out['total_size'])
self.assertEqual(2050.0, out['provisioned_capacity'])
def test_get_fs_tb(self):
available_size = float(228 * 1024 ** 2)
total_size = float(250 * 1024 ** 2)
self.mock_object(self.hnas_backend, '_run_cmd',
mock.Mock(side_effect=[(df_f_tb, ''),
(evsfs_list, ''),
(hnas_fs_list, '')]))
out = self.hnas_backend.get_fs_info('fs-cinder')
self.assertEqual('fs-cinder', out['label'])
self.assertEqual(str(available_size), out['available_size'])
self.assertEqual(str(total_size), out['total_size'])
self.assertEqual(2050.0, out['provisioned_capacity'])
def test_get_fs_single_evs_tb(self):
available_size = float(228 * 1024 ** 2)
total_size = float(250 * 1024 ** 2)
self.mock_object(self.hnas_backend, '_run_cmd',
mock.Mock(side_effect=[(df_f_tb, ''),
(evsfs_list, ''),
(hnas_fs_list, '')]))
out = self.hnas_backend.get_fs_info('fs-cinder')
self.assertEqual('fs-cinder', out['label'])
self.assertEqual(str(available_size), out['available_size'])
self.assertEqual(str(total_size), out['total_size'])
self.assertEqual(2050.0, out['provisioned_capacity'])
def test_create_lu(self):
self.mock_object(self.hnas_backend, '_run_cmd',
mock.Mock(return_value=(evsfs_list, '')))
self.hnas_backend.create_lu('fs-cinder', '128', 'cinder-lu')
calls = [mock.call('evsfs', 'list'), mock.call('console-context',
'--evs', '2',
'iscsi-lu', 'add',
'-e', 'cinder-lu',
'fs-cinder',
'/.cinder/cinder-lu.'
'iscsi', '128G')]
self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False)
def test_delete_lu(self):
self.mock_object(self.hnas_backend, '_run_cmd',
mock.Mock(return_value=(evsfs_list, '')))
self.hnas_backend.delete_lu('fs-cinder', 'cinder-lu')
calls = [mock.call('evsfs', 'list'), mock.call('console-context',
'--evs', '2',
'iscsi-lu', 'del', '-d',
'-f', 'cinder-lu')]
self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False)
def test_extend_lu(self):
self.mock_object(self.hnas_backend, '_run_cmd',
mock.Mock(return_value=(evsfs_list, '')))
self.hnas_backend.extend_lu('fs-cinder', '128', 'cinder-lu')
calls = [mock.call('evsfs', 'list'), mock.call('console-context',
'--evs', '2',
'iscsi-lu', 'expand',
'cinder-lu', '128G')]
self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False)
def test_cloned_lu(self):
self.mock_object(self.hnas_backend, '_run_cmd',
mock.Mock(return_value=(evsfs_list, '')))
self.hnas_backend.create_cloned_lu('cinder-lu', 'fs-cinder', 'snap')
calls = [mock.call('evsfs', 'list'), mock.call('console-context',
'--evs', '2',
'iscsi-lu', 'clone',
'-e', 'cinder-lu',
'snap',
'/.cinder/snap.iscsi')]
self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False)
def test_get_existing_lu_info(self):
self.mock_object(self.hnas_backend, '_run_cmd',
mock.Mock(side_effect=[(evsfs_list, ''),
(iscsilu_list, '')]))
out = self.hnas_backend.get_existing_lu_info('cinder-lu', None, None)
self.assertEqual('cinder-lu', out['name'])
self.assertEqual('fs-cinder', out['filesystem'])
self.assertEqual(2.0, out['size'])
def test_get_existing_lu_info_tb(self):
self.mock_object(self.hnas_backend, '_run_cmd',
mock.Mock(side_effect=[(evsfs_list, ''),
(iscsilu_list_tb, '')]))
out = self.hnas_backend.get_existing_lu_info('test-lu', None, None)
self.assertEqual('test-lu', out['name'])
self.assertEqual('fs-cinder', out['filesystem'])
self.assertEqual(2048.0, out['size'])
def test_rename_existing_lu(self):
self.mock_object(self.hnas_backend, '_run_cmd',
mock.Mock(return_value=(evsfs_list, '')))
self.hnas_backend.rename_existing_lu('fs-cinder', 'cinder-lu',
'new-lu-name')
calls = [mock.call('evsfs', 'list'), mock.call('console-context',
'--evs', '2',
'iscsi-lu', 'mod', '-n',
"'new-lu-name'",
'cinder-lu')]
self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False)
def test_check_lu(self):
self.mock_object(self.hnas_backend, '_run_cmd',
mock.Mock(side_effect=[(evsfs_list, ''),
(iscsi_target_list, '')]))
out = self.hnas_backend.check_lu('cinder-lu', 'fs-cinder')
self.assertEqual('cinder-lu', out['tgt']['lus'][0]['name'])
self.assertEqual('pxr6U37LZZJBoMc', out['tgt']['secret'])
self.assertTrue(out['mapped'])
calls = [mock.call('evsfs', 'list'), mock.call('console-context',
'--evs', '2',
'iscsi-target', 'list')]
self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False)
def test_check_lu_not_found(self):
self.mock_object(self.hnas_backend, '_run_cmd',
mock.Mock(side_effect=[(evsfs_list, ''),
(iscsi_target_list, '')]))
# passing a volume fake-volume not mapped
out = self.hnas_backend.check_lu('fake-volume', 'fs-cinder')
self.assertFalse(out['mapped'])
self.assertEqual(0, out['id'])
self.assertIsNone(out['tgt'])
def test_add_iscsi_conn(self):
self.mock_object(self.hnas_backend, '_run_cmd',
mock.Mock(side_effect=[(evsfs_list, ''),
(iscsi_target_list, ''),
(evsfs_list, '')]))
out = self.hnas_backend.add_iscsi_conn('cinder-lu', 'fs-cinder', 3260,
'cinder-default', 'initiator')
self.assertEqual('cinder-lu', out['lu_name'])
self.assertEqual('fs-cinder', out['fs'])
self.assertEqual('0', out['lu_id'])
self.assertEqual(3260, out['port'])
calls = [mock.call('evsfs', 'list'),
mock.call('console-context', '--evs', '2', 'iscsi-target',
'list')]
self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False)
def test_add_iscsi_conn_not_mapped_volume(self):
not_mapped = {'mapped': False,
'id': 0,
'tgt': None}
self.mock_object(self.hnas_backend, 'check_lu',
mock.Mock(return_value=not_mapped))
self.mock_object(self.hnas_backend, '_run_cmd',
mock.Mock(side_effect=[(evsfs_list, ''),
(iscsi_target_list, ''),
('', '')]))
out = self.hnas_backend.add_iscsi_conn('cinder-lu', 'fs-cinder', 3260,
'cinder-default', 'initiator')
self.assertEqual('cinder-lu', out['lu_name'])
self.assertEqual('fs-cinder', out['fs'])
self.assertEqual(2, out['lu_id'])
self.assertEqual(3260, out['port'])
calls = [mock.call('evsfs', 'list'),
mock.call('console-context', '--evs', '2', 'iscsi-target',
'list')]
self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False)
def test_del_iscsi_conn(self):
iqn = 'iqn.2014-12.10.10.10.10:evstest1.cinder-default'
self.mock_object(self.hnas_backend, '_run_cmd',
mock.Mock(return_value=(iscsi_one_target, '')))
self.hnas_backend.del_iscsi_conn('2', iqn, '0')
calls = [mock.call('console-context', '--evs', '2', 'iscsi-target',
'list', iqn),
mock.call('console-context', '--evs', '2', 'iscsi-target',
'dellu', '-f', iqn, '0')]
self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False)
def test_del_iscsi_conn_volume_not_found(self):
iqn = 'iqn.2014-12.10.10.10.10:evstest1.cinder-fake'
self.mock_object(self.hnas_backend, '_run_cmd',
mock.Mock(return_value=(iscsi_one_target, '')))
self.hnas_backend.del_iscsi_conn('2', iqn, '10')
self.hnas_backend._run_cmd.assert_called_with('console-context',
'--evs', '2',
'iscsi-target', 'list',
iqn)
def test_check_target(self):
self.mock_object(self.hnas_backend, '_run_cmd',
mock.Mock(side_effect=[(evsfs_list, ''),
(iscsi_target_list, '')]))
out = self.hnas_backend.check_target('fs-cinder', 'cinder-default')
self.assertTrue(out['found'])
self.assertEqual('cinder-lu', out['tgt']['lus'][0]['name'])
self.assertEqual('cinder-default', out['tgt']['alias'])
self.assertEqual('pxr6U37LZZJBoMc', out['tgt']['secret'])
def test_check_target_not_found(self):
self.mock_object(self.hnas_backend, '_run_cmd',
mock.Mock(side_effect=[(evsfs_list, ''),
(iscsi_target_list, '')]))
out = self.hnas_backend.check_target('fs-cinder', 'cinder-fake')
self.assertFalse(out['found'])
self.assertIsNone(out['tgt'])
def test_set_target_secret(self):
targetalias = 'cinder-default'
secret = 'pxr6U37LZZJBoMc'
self.mock_object(self.hnas_backend, '_run_cmd',
mock.Mock(return_value=(evsfs_list, '')))
self.hnas_backend.set_target_secret(targetalias, 'fs-cinder', secret)
calls = [mock.call('evsfs', 'list'),
mock.call('console-context', '--evs', '2', 'iscsi-target',
'mod', '-s', 'pxr6U37LZZJBoMc', '-a', 'enable',
'cinder-default')]
self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False)
def test_set_target_secret_empty_target_list(self):
targetalias = 'cinder-default'
secret = 'pxr6U37LZZJBoMc'
self.mock_object(self.hnas_backend, '_run_cmd',
mock.Mock(side_effect=[(evsfs_list, ''),
('does not exist', ''),
('', '')]))
self.hnas_backend.set_target_secret(targetalias, 'fs-cinder', secret)
calls = [mock.call('console-context', '--evs', '2', 'iscsi-target',
'mod', '-s', 'pxr6U37LZZJBoMc', '-a', 'enable',
'cinder-default')]
self.hnas_backend._run_cmd.assert_has_calls(calls, any_order=False)
def test_get_target_secret(self):
self.mock_object(self.hnas_backend, '_run_cmd',
mock.Mock(side_effect=[(evsfs_list, ''),
(iscsi_one_target, '')]))
out = self.hnas_backend.get_target_secret('cinder-default',
'fs-cinder')
self.assertEqual('pxr6U37LZZJBoMc', out)
self.hnas_backend._run_cmd.assert_called_with('console-context',
'--evs', '2',
'iscsi-target', 'list',
'cinder-default')
def test_get_target_secret_chap_disabled(self):
self.mock_object(self.hnas_backend, '_run_cmd',
mock.Mock(side_effect=[(evsfs_list, ''),
(target_chap_disable, '')]))
out = self.hnas_backend.get_target_secret('cinder-default',
'fs-cinder')
self.assertEqual('', out)
self.hnas_backend._run_cmd.assert_called_with('console-context',
'--evs', '2',
'iscsi-target', 'list',
'cinder-default')
def test_get_target_iqn(self):
self.mock_object(self.hnas_backend, '_run_cmd',
mock.Mock(side_effect=[(evsfs_list, ''),
(iscsi_one_target, ''),
(add_targetsecret, '')]))
out = self.hnas_backend.get_target_iqn('cinder-default', 'fs-cinder')
self.assertEqual('iqn.2014-12.10.10.10.10:evstest1.cinder-default',
out)
def test_create_target(self):
self.mock_object(self.hnas_backend, '_run_cmd',
mock.Mock(return_value=(evsfs_list, '')))
self.hnas_backend.create_target('cinder-default', 'fs-cinder',
'pxr6U37LZZJBoMc')
def test_check_snapshot_parent_true(self):
self.mock_object(self.hnas_backend, '_run_cmd',
mock.Mock(
side_effect=[(evsfs_list, ''),
(file_clone_stat, ''),
(file_clone_stat_snap_file1, ''),
(file_clone_stat_snap_file2, '')]))
out = self.hnas_backend.check_snapshot_parent('cinder-lu',
'snapshot-lu-1',
'fs-cinder')
self.assertTrue(out)
self.hnas_backend._run_cmd.assert_called_with('console-context',
'--evs', '2',
'file-clone-stat'
'-snapshot-file', '-f',
'fs-cinder',
'00000000004010000d2011'
'6826ffffffffffffff]')
def test_check_snapshot_parent_false(self):
self.mock_object(self.hnas_backend, '_run_cmd',
mock.Mock(
side_effect=[(evsfs_list, ''),
(file_clone_stat, ''),
(file_clone_stat_snap_file1, ''),
(file_clone_stat_snap_file2, '')]))
out = self.hnas_backend.check_snapshot_parent('cinder-lu',
'snapshot-lu-3',
'fs-cinder')
self.assertFalse(out)
self.hnas_backend._run_cmd.assert_called_with('console-context',
'--evs', '2',
'file-clone-stat'
'-snapshot-file', '-f',
'fs-cinder',
'00000000004029000d81f26'
'826ffffffffffffff]')
def test_check_a_not_cloned_file(self):
self.mock_object(self.hnas_backend, '_run_cmd',
mock.Mock(
side_effect=[(evsfs_list, ''),
(not_a_clone, '')]))
self.assertRaises(exception.ManageExistingInvalidReference,
self.hnas_backend.check_snapshot_parent,
'cinder-lu', 'snapshot-name', 'fs-cinder')
def test_get_export_path(self):
export_out = '/export01-husvm'
self.mock_object(self.hnas_backend, '_run_cmd',
mock.Mock(side_effect=[(evsfs_list, ''),
(nfs_export, '')]))
out = self.hnas_backend.get_export_path(export_out, 'fs-cinder')
self.assertEqual(export_out, out)
self.hnas_backend._run_cmd.assert_called_with('console-context',
'--evs', '2',
'nfs-export', 'list',
export_out)
| |
"""Generic Z-Wave Entity Class."""
from __future__ import annotations
import logging
from zwave_js_server.client import Client as ZwaveClient
from zwave_js_server.const import NodeStatus
from zwave_js_server.model.value import Value as ZwaveValue, get_value_id
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity
from .const import DOMAIN
from .discovery import ZwaveDiscoveryInfo
from .helpers import get_device_id, get_unique_id
from .migrate import async_add_migration_entity_value
LOGGER = logging.getLogger(__name__)
EVENT_VALUE_UPDATED = "value updated"
EVENT_DEAD = "dead"
EVENT_ALIVE = "alive"
class ZWaveBaseEntity(Entity):
"""Generic Entity Class for a Z-Wave Device."""
def __init__(
self, config_entry: ConfigEntry, client: ZwaveClient, info: ZwaveDiscoveryInfo
) -> None:
"""Initialize a generic Z-Wave device entity."""
self.config_entry = config_entry
self.client = client
self.info = info
# entities requiring additional values, can add extra ids to this list
self.watched_value_ids = {self.info.primary_value.value_id}
if self.info.additional_value_ids_to_watch:
self.watched_value_ids = self.watched_value_ids.union(
self.info.additional_value_ids_to_watch
)
# Entity class attributes
self._attr_name = self.generate_name()
self._attr_unique_id = get_unique_id(
self.client.driver.controller.home_id, self.info.primary_value.value_id
)
self._attr_entity_registry_enabled_default = (
self.info.entity_registry_enabled_default
)
self._attr_assumed_state = self.info.assumed_state
# device is precreated in main handler
self._attr_device_info = {
"identifiers": {get_device_id(self.client, self.info.node)},
}
@callback
def on_value_update(self) -> None:
"""Call when one of the watched values change.
To be overridden by platforms needing this event.
"""
async def async_poll_value(self, refresh_all_values: bool) -> None:
"""Poll a value."""
if not refresh_all_values:
self.hass.async_create_task(
self.info.node.async_poll_value(self.info.primary_value)
)
LOGGER.info(
(
"Refreshing primary value %s for %s, "
"state update may be delayed for devices on battery"
),
self.info.primary_value,
self.entity_id,
)
return
for value_id in self.watched_value_ids:
self.hass.async_create_task(self.info.node.async_poll_value(value_id))
LOGGER.info(
(
"Refreshing values %s for %s, state update may be delayed for "
"devices on battery"
),
", ".join(self.watched_value_ids),
self.entity_id,
)
async def async_added_to_hass(self) -> None:
"""Call when entity is added."""
# Add value_changed callbacks.
self.async_on_remove(
self.info.node.on(EVENT_VALUE_UPDATED, self._value_changed)
)
for status_event in (EVENT_ALIVE, EVENT_DEAD):
self.async_on_remove(
self.info.node.on(status_event, self._node_status_alive_or_dead)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
f"{DOMAIN}_{self.unique_id}_poll_value",
self.async_poll_value,
)
)
# Add legacy Z-Wave migration data.
await async_add_migration_entity_value(
self.hass, self.config_entry, self.entity_id, self.info
)
def generate_name(
self,
include_value_name: bool = False,
alternate_value_name: str | None = None,
additional_info: list[str] | None = None,
name_suffix: str | None = None,
) -> str:
"""Generate entity name."""
if additional_info is None:
additional_info = []
name: str = (
self.info.node.name
or self.info.node.device_config.description
or f"Node {self.info.node.node_id}"
)
if name_suffix:
name = f"{name} {name_suffix}"
if include_value_name:
value_name = (
alternate_value_name
or self.info.primary_value.metadata.label
or self.info.primary_value.property_key_name
or self.info.primary_value.property_name
)
name = f"{name}: {value_name}"
for item in additional_info:
if item:
name += f" - {item}"
# append endpoint if > 1
if self.info.primary_value.endpoint > 1:
name += f" ({self.info.primary_value.endpoint})"
return name
@property
def available(self) -> bool:
"""Return entity availability."""
return (
self.client.connected
and bool(self.info.node.ready)
and self.info.node.status != NodeStatus.DEAD
)
@callback
def _node_status_alive_or_dead(self, event_data: dict) -> None:
"""
Call when node status changes to alive or dead.
Should not be overridden by subclasses.
"""
self.async_write_ha_state()
@callback
def _value_changed(self, event_data: dict) -> None:
"""Call when (one of) our watched values changes.
Should not be overridden by subclasses.
"""
value_id = event_data["value"].value_id
if value_id not in self.watched_value_ids:
return
value = self.info.node.values[value_id]
LOGGER.debug(
"[%s] Value %s/%s changed to: %s",
self.entity_id,
value.property_,
value.property_key_name,
value.value,
)
self.on_value_update()
self.async_write_ha_state()
@callback
def get_zwave_value(
self,
value_property: str | int,
command_class: int | None = None,
endpoint: int | None = None,
value_property_key: int | None = None,
add_to_watched_value_ids: bool = True,
check_all_endpoints: bool = False,
) -> ZwaveValue | None:
"""Return specific ZwaveValue on this ZwaveNode."""
# use commandclass and endpoint from primary value if omitted
return_value = None
if command_class is None:
command_class = self.info.primary_value.command_class
if endpoint is None:
endpoint = self.info.primary_value.endpoint
# lookup value by value_id
value_id = get_value_id(
self.info.node,
command_class,
value_property,
endpoint=endpoint,
property_key=value_property_key,
)
return_value = self.info.node.values.get(value_id)
# If we haven't found a value and check_all_endpoints is True, we should
# return the first value we can find on any other endpoint
if return_value is None and check_all_endpoints:
for endpoint_idx in self.info.node.endpoints:
if endpoint_idx != self.info.primary_value.endpoint:
value_id = get_value_id(
self.info.node,
command_class,
value_property,
endpoint=endpoint_idx,
property_key=value_property_key,
)
return_value = self.info.node.values.get(value_id)
if return_value:
break
# add to watched_ids list so we will be triggered when the value updates
if (
return_value
and return_value.value_id not in self.watched_value_ids
and add_to_watched_value_ids
):
self.watched_value_ids.add(return_value.value_id)
return return_value
@property
def should_poll(self) -> bool:
"""No polling needed."""
return False
| |
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 22 06:28:16 2015
@author: fergal
$Id$
$URL$
"""
__version__ = "$Id$"
__URL__ = "$URL$"
import numpy as np
import dave.pipeline.clipboard as clipboard
import dave.pipeline.pipeline as pl
import dave.pipeline.task as task
import dave.fileio.nca as nca
import dave.lpp.calcLPPoctave as lpp
import os
def runOne(k2id, config):
taskList = config['taskList']
clip = clipboard.Clipboard()
clip['config'] = config
clip['value'] = k2id
#clip['dataStorePath'] = config['dataStorePath']
#Check that all the tasks are properly defined
for t in taskList:
f = eval(t)
#Now run them.
for t in taskList:
f = eval(t)
clip = f(clip)
return clip
def loadMultiEventConfig():
cfg = dict()
cfg['debug'] = True
cfg['campaign'] = 3
cfg['timeout_sec'] = 120
cfg['nPointsForMedianSmooth'] = 2*48
cfg['maxNumEvents'] = 10
cfg['blsMinPeriod'] = 0.5
cfg['blsMaxPeriod'] = 30
#Vetting parameters
cfg['minSnrForDetection'] = 10.
cfg['maxLppForTransit'] = 0.03
cfg['minCentroidSignifForFp'] = 3.0
#The LPP mapping file is used by LPP to define the regions of param
#space where the transits cluster.
path = lpp.getLppDir()
cfg['lppMapFilePath'] = \
os.path.join(path, "octave/maps/mapQ1Q17DR24-DVMed6084.mat")
cfg['modshiftBasename'] = \
os.path.join(os.environ['HOME'],"daveOutput","modshift")
cfg['onepageBasename'] = \
os.path.join(os.environ['HOME'],"daveOutput","onepage")
#Location of the place all the light curves and TPF files are stored
cfg['dataStorePath'] = os.path.join(os.environ['HOME'],".mastio/k2")
#Location of the model PRF fits files.
cfg['prfPath'] = os.path.join(os.environ['HOME'], ".mastio/keplerprf")
#My front end
tasks = """pl.checkDirExistTask pl.serveTask pl.extractLightcurveTask
pl.computeCentroidsTask pl.rollPhaseTask pl.cotrendDataTask
pl.detrendDataTask singleEventSearchTask pl.saveOnError""".split()
cfg['taskList'] = tasks
cfg['singleEventTaskList'] = "blsTask trapezoidFitTask vetTask".split()
cfg['clipSavePath'] = "./clips"
cfg['keysToIgnoreWhenSaving'] = ["serve"]
return cfg
@task.task
def multiEventSearchTask(clip):
maxNumEvents= clip['config.maxNumEvents']
clip['eventList'] = []
for i in range(maxNumEvents):
subClip = searchForEvent(clip)
clip['eventList'].append(subClip)
if 'exception' in subClip:
clip['exception'] = subClip['exception']
clip['backtrace'] = subClip['backtrace']
return clip
if subClip['disposition.isSignificantEvent'] == False:
return clip
#Iteration limit reached.
return clip
@task.task
def singleEventSearchTask(clip):
clip['eventList'] = []
subClip = searchForEvent(clip)
if 'exception' in subClip.keys():
clip['exception'] = subClip['exception']
clip['backtrace'] = subClip['backtrace']
clip['eventList'].append(subClip)
return clip
import dave.fileio.kplrfits as kplrfits
def searchForEvent(clip):
subClip = clip.shallowCopy()
originalKeyList = subClip.keys()
#Set the flags attribute of the new subclip
#Problem with this code is it closely tied to the behaviour
#of multiEventSearchTask
try:
tmp = clip.eventList[-1]
flags = tmp['flags']
except (IndexError, KeyError):
flags = clip['detrend.flags']
subClip['flags'] = flags
#@TODO List of tasks to run should be config param
subClip = pl.placeholderBls(subClip)
subClip = trapezoidFitTask(subClip)
subClip = modshiftTask(subClip)
subClip = measureDiffImgCentroidsTask(subClip)
subClip = dispositionTask(subClip)
newKeys = list(set(subClip.keys()) - set(originalKeyList))
out = clipboard.Clipboard(__meta__=subClip['__meta__'])
for k in newKeys:
out[k] = subClip[k]
#Mark all locations for this event as data not to be used.
time = subClip['serve.time']
period_days = subClip['trapFit.period_days']
epoch_bkjd = subClip['trapFit.epoch_bkjd']
duration_days = subClip['trapFit.duration_hrs'] / 24.
# assert(np.all(np.isfinite(time[~flags])))
# assert(np.any(flags))
idx = kplrfits.markTransitCadences(time, period_days, epoch_bkjd, \
duration_days, numberOfDurations=2, flags=flags)
out['flags'] = flags | idx
return out
import dave.trapezoidFit.estimateSnr as tf
@task.task
def trapezoidFitTask(clip):
time_days = clip['serve.time']
flux_norm = clip['detrend.flux_frac']
flags = clip['flags']
period_days = clip['bls.period']
duration_hrs = clip['bls.duration_hrs']
phase_bkjd = clip['bls.epoch'] #Check this what BLS returns
depth_frac = clip['bls.depth']
#We don't know these values.
unc = np.ones_like(flux_norm)
unc[flags] = 1e99
flux_norm[flags] = 0
assert(np.all(np.isfinite(time_days[~flags])))
assert(np.all(np.isfinite(flux_norm[~flags])))
out = tf.getSnrOfTransit(time_days, flux_norm,\
unc, flags, \
period_days, phase_bkjd, duration_hrs, depth_frac)
assert(len(time_days) == len(out['bestFitModel']))
clip['trapFit'] = out
clip['trapFit.period_days']
clip['trapFit.epoch_bkjd']
clip['trapFit.duration_hrs']
clip['trapFit.ingress_hrs']
clip['trapFit.depth_frac']
clip['trapFit.bestFitModel']
clip['trapFit.snr']
return clip
import dave.trapezoidFit.trapfit as trapFit
import dave.vetting.ModShift as ModShift
@task.task
def modshiftTask(clip):
time = clip['serve.time']
flux = clip['detrend.flux_frac']
fl = clip['flags']
epic = clip['value']
basename = clip['config.modshiftBasename'] + "%010i" %(epic)
period_days = clip['trapFit.period_days']
epoch_bkjd = clip['trapFit.epoch_bkjd']
dur_hrs = clip['trapFit.duration_hrs']
ingress_hrs = clip['trapFit.ingress_hrs']
depth_ppm = 1e6*clip['trapFit.depth_frac']
subSampleN= 15
ioBlock = trapFit.trapezoid_model_onemodel(time[~fl], period_days, \
epoch_bkjd, depth_ppm, dur_hrs, \
ingress_hrs, subSampleN)
model = ioBlock.modellc -1 #Want mean of zero
# model *= -1 #Invert for testing
basename = "%s-%010i" %(basename, epic)
modplotint=1 # Change to 0 or anything besides 1 to not have modshift produce plot
out = ModShift.runModShift(time[~fl], flux[~fl], model, basename, \
"OBJECTNAME", period_days, epoch_bkjd, modplotint)
clip['modshift'] = out
#I don't know which values are important, so I can't enfornce contract yet
return clip
import dave.diffimg.centroid as cent
import dave.diffimg.prf as prf
@task.task
def measureDiffImgCentroidsTask(clip):
#Measuring centroids requires a lot of input params
period_days = clip['trapFit.period_days']
epoch_bkjd = clip['trapFit.epoch_bkjd'] #Check this what BLS returns
duration_hrs = clip['trapFit.duration_hrs']
cube = clip['serve.cube']
cube[ ~np.isfinite(cube) ] = 0
tpfHeader0 = clip['serve.tpfHeader0']
tpfHeader = clip['serve.tpfHeader']
ccdMod = tpfHeader0['MODULE']
ccdOut = tpfHeader0['OUTPUT']
bbox = cent.getBoundingBoxForImage(cube[0], tpfHeader)
rollPhase = clip['rollPhase.rollPhase']
prfPath = clip['config.prfPath']
prfObj = prf.KeplerPrf(prfPath)
time_days = clip['serve.time']
flags = clip['flags']
# import pdb; pdb.set_trace()
out,log = cent.measureDiffOffset(period_days, epoch_bkjd, duration_hrs, \
time_days, prfObj, ccdMod, ccdOut, cube, bbox, rollPhase, flags)
#Set column names
out = nca.Nca(out)
out.setLookup(1, "rin intr_col intr_row diff_col diff_row".split())
clip['diffImg'] = {'centroid_timeseries':out, 'log':log}
clip['diffImg.centroid_timeseries']
return clip
import dave.vetting.RoboVet as RoboVet
@task.task
def dispositionTask(clip):
"""Decide whether an event is a planet candidate or not
TODO:
Much of this should be parcelled off into a function
"""
#Thresholds
snrThreshold = clip['config.minSnrForDetection']
# lppThreshold = clip['config.maxLppForTransit']
offsetThreshold_sigma = clip['config.minCentroidSignifForFp']
#Data on which to make a decision
snr = clip['trapFit.snr']
modshiftDict = clip['modshift']
centroidArray = clip['diffImg.centroid_timeseries']
out = clipboard.Clipboard(isSignificantEvent=True, isCandidate=True, \
reasonForFail="None")
if snr < snrThreshold:
out['isSignificantEvent'] = False
out['isCandidate'] = False
out['reasonForFail'] = "SNR (%.1f) below threshold %.1f" \
%(snr, snrThreshold)
return out
#Parse modshift results
fluxVetDict = RoboVet.roboVet(modshiftDict)
out['fluxVet'] = fluxVetDict
assert(fluxVetDict['disp'] in ["candidate", "false positive"])
if fluxVetDict['disp'] == "false positive":
out['isCandidate'] = False
out['reasonForFail'] = fluxVetDict['comments']
return out
#Compute centroid offset and significance
result = cent.measureOffsetInTimeseries(centroidArray)
out['centroidVet'] = result
signif = result['signif']
offset = result['offset']
if signif > offsetThreshold_sigma:
out['isCandidate'] = False
out['reasonForFail'] = "Centroid offset of %.2f (%.1f sigma) detected" \
%( offset, signif)
return out
clip['disposition'] = out
#Enforce contract
clip['disposition.isSignificantEvent']
clip['disposition.isCandidate']
clip['disposition.reasonForFail']
return clip
| |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
import theano.tensor as T
from ..layers.core import Layer, Merge
from ..utils.theano_utils import ndim_tensor
from six.moves import range
class Sequential(Layer):
'''
Simple linear stack of layers.
inherited from Layer:
- get_params
- get_output_mask
- supports_masked_input
'''
def __init__(self, layers=[]):
self.layers = []
self.params = []
self.regularizers = []
self.constraints = []
self.updates = []
for layer in layers:
self.add(layer)
def set_previous(self, layer):
self.layers[0].previous = layer
def add(self, layer):
self.layers.append(layer)
if len(self.layers) > 1:
self.layers[-1].set_previous(self.layers[-2])
if not hasattr(self.layers[0], 'input'):
self.set_input()
layer.init_updates()
params, regularizers, constraints, updates = layer.get_params()
self.params += params
self.regularizers += regularizers
self.constraints += constraints
self.updates += updates
def get_output(self, train=False):
return self.layers[-1].get_output(train)
def set_input(self):
for l in self.layers:
if hasattr(l, 'input'):
ndim = l.input.ndim
self.layers[0].input = ndim_tensor(ndim)
break
def get_input(self, train=False):
if not hasattr(self.layers[0], 'input'):
self.set_input()
return self.layers[0].get_input(train)
@property
def input(self):
return self.get_input()
def get_weights(self):
weights = []
for layer in self.layers:
weights += layer.get_weights()
return weights
def set_weights(self, weights):
for i in range(len(self.layers)):
nb_param = len(self.layers[i].params)
self.layers[i].set_weights(weights[:nb_param])
weights = weights[nb_param:]
def get_config(self):
return {"name": self.__class__.__name__,
"layers": [layer.get_config() for layer in self.layers]}
class Graph(Layer):
'''
Implement a NN graph with arbitrary layer connections,
arbitrary number of inputs and arbitrary number of outputs.
Note: Graph can only be used as a layer
(connect, input, get_input, get_output)
when it has exactly one input and one output.
inherited from Layer:
- get_params
- get_output_mask
- supports_masked_input
- get_weights
- set_weights
'''
def __init__(self):
self.namespace = set() # strings
self.nodes = {} # layer-like
self.inputs = {} # layer-like
self.input_order = [] # strings
self.outputs = {} # layer-like
self.output_order = [] # strings
self.input_config = [] # dicts
self.output_config = [] # dicts
self.node_config = [] # dicts
self.params = []
self.regularizers = []
self.constraints = []
self.updates = []
def set_previous(self, layer):
if len(self.inputs) != 1 or len(self.outputs) != 1:
raise Exception('The Graph container can only be used as a layer \
when it has exactly one input and one output.')
self.inputs[self.input_order[0]].set_previous(layer)
def get_input(self, train=False):
if len(self.inputs) != 1 or len(self.outputs) != 1:
raise Exception('The Graph container can only be used as a layer \
when it has exactly one input and one output.')
return self.inputs[self.input_order[0]].get_input(train)
@property
def input(self):
return self.get_input()
def get_output(self, train=False):
if len(self.inputs) != 1 or len(self.outputs) != 1:
raise Exception('The Graph container can only be used as a layer \
when it has exactly one input and one output.')
return self.outputs[self.output_order[0]].get_output(train)
def add_input(self, name, ndim=2, dtype='float'):
if name in self.namespace:
raise Exception('Duplicate node identifier: ' + name)
self.namespace.add(name)
self.input_order.append(name)
layer = Layer() # empty layer
if dtype == 'float':
layer.input = ndim_tensor(ndim)
else:
if ndim == 2:
layer.input = T.imatrix()
else:
raise Exception('Type "int" can only be used with ndim==2 (Embedding).')
layer.input.name = name
self.inputs[name] = layer
self.input_config.append({'name': name, 'ndim': ndim, 'dtype': dtype})
def add_node(self, layer, name, input=None, inputs=[], merge_mode='concat', create_output=False):
if hasattr(layer, 'set_name'):
layer.set_name(name)
if name in self.namespace:
raise Exception('Duplicate node identifier: ' + name)
if input:
if input not in self.namespace:
raise Exception('Unknown node/input identifier: ' + input)
if input in self.nodes:
layer.set_previous(self.nodes[input])
elif input in self.inputs:
layer.set_previous(self.inputs[input])
if inputs:
to_merge = []
for n in inputs:
if n in self.nodes:
to_merge.append(self.nodes[n])
elif n in self.inputs:
to_merge.append(self.inputs[n])
else:
raise Exception('Unknown identifier: ' + n)
merge = Merge(to_merge, mode=merge_mode)
layer.set_previous(merge)
self.namespace.add(name)
self.nodes[name] = layer
self.node_config.append({'name': name,
'input': input,
'inputs': inputs,
'merge_mode': merge_mode})
layer.init_updates()
params, regularizers, constraints, updates = layer.get_params()
self.params += params
self.regularizers += regularizers
self.constraints += constraints
self.updates += updates
if create_output:
self.add_output(name, input=name)
def add_output(self, name, input=None, inputs=[], merge_mode='concat'):
if name in self.output_order:
raise Exception('Duplicate output identifier: ' + name)
if input:
if input not in self.namespace:
raise Exception('Unknown node/input identifier: ' + input)
if input in self.nodes:
self.outputs[name] = self.nodes[input]
elif input in self.inputs:
self.ouputs[name] = self.inputs[input]
if inputs:
to_merge = []
for n in inputs:
if n not in self.nodes:
raise Exception('Unknown identifier: ' + n)
to_merge.append(self.nodes[n])
merge = Merge(to_merge, mode=merge_mode)
self.outputs[name] = merge
self.output_order.append(name)
self.output_config.append({'name': name,
'input': input,
'inputs': inputs,
'merge_mode': merge_mode})
def get_config(self):
return {"name": self.__class__.__name__,
"input_config": self.input_config,
"node_config": self.node_config,
"output_config": self.output_config,
"input_order": self.input_order,
"output_order": self.output_order,
"nodes": dict([(c["name"], self.nodes[c["name"]].get_config()) for c in self.node_config])}
| |
# coding=utf-8
import os
import re
import yaml
from fabric.context_managers import settings
from fabric.decorators import task
from fabric.state import env
from fabric.utils import indent, abort
from blues.application.deploy import maybe_install_requirements
from refabric.utils import info
from refabric.contrib import blueprints
from .. import git
blueprint = blueprints.get('blues.app')
__all__ = []
def get_providers(*args, **kw):
from .providers import get_providers as real
return real(*args, **kw)
@task
def setup():
"""
Install project user, structure, env, source, dependencies and providers
"""
from .deploy import install_project, install_virtualenv, \
install_requirements, install_providers
from .project import requirements_txt, use_virtualenv
install_project()
if use_virtualenv():
install_virtualenv()
install_requirements(requirements_txt(), update_pip=True)
install_providers()
configure_providers()
@task
def configure():
"""
Deploy and configure providers
"""
code_changed = deploy(auto_reload=False)
configure_providers(force_reload=code_changed)
@task
def deploy(revision=None, auto_reload=True, force=False, update_pip=False):
"""
Reset source to configured branch and install requirements, if needed
:param bool auto_reload: Reload application providers if source has changed
:param bool force: Force install of requirements
:return bool: Source code has changed?
"""
from .deploy import update_source
from .project import use_virtualenv, project_home, project_name
from ..debian import chmod
from refabric.context_managers import silent
# Reset git repo
previous_commit, current_commit = update_source(revision)
code_changed = current_commit is not None and previous_commit != current_commit
if code_changed:
info('Updated git repository from: {} to: {}', previous_commit, current_commit)
else:
info('Reset git repository to: {}', current_commit)
# Add Google Service User Credentials if present
gcloudAccountKey = blueprint.get('gcloud_service_account_key')
if gcloudAccountKey:
context = {
'service_account_key': gcloudAccountKey,
}
blueprint.upload('gcloud/gcloud-service-account.json',
os.path.join(project_home(), 'gcloud-service-account.json'),
context,
user=project_name())
with silent():
chmod(
os.path.join(project_home(),'gcloud-service-account.json'),
mode=600,
owner=project_name(),
group=project_name())
if code_changed or force:
# Install python dependencies
if use_virtualenv():
maybe_install_requirements(previous_commit, current_commit, force,
update_pip=update_pip)
# Reload providers
if auto_reload:
reload()
return (previous_commit, current_commit) if code_changed else False
@task
def install_requirements():
"""
Install requirements witihn a virtualenv
"""
from .deploy import install_requirements
from .project import use_virtualenv
if use_virtualenv():
install_requirements()
else:
abort('Cannot install requirements without virtualenv')
@task
def deployed():
"""
Show deployed and last origin commit
"""
from .project import sudo_project, git_repository_path
msg = ''
params = []
with sudo_project():
repository_path = git_repository_path()
git.fetch(repository_path)
head_tag, head_tag_delta = git.current_tag(repository_path)
if head_tag_delta > 0:
msg += 'Latest tag: {} distance: {}'
params += [head_tag, head_tag_delta]
else:
msg += 'Deployed tag: {}'
params += [head_tag]
head_commit, head_message = git.log(repository_path)[0]
msg += '\nRevision: {} comment: {}'
params += [head_commit, head_message]
origin = git.get_origin(repository_path)
origin_commit, origin_message = git.log(repository_path, refspec=origin)[0]
if head_commit != origin_commit:
msg += '\nRemote: {} revision: {} comment: {}'
params += [origin, origin_commit, origin_message]
info(msg, *params)
return head_commit, origin_commit
@task
def incoming(revision=None):
"""
Show changes since the deployed revision
"""
from .project import sudo_project, git_repository_path
with sudo_project():
repository_path = git_repository_path()
git.fetch(repository_path)
current_revision, head_message = git.log(repository_path)[0]
if not revision:
origin = git.get_origin(repository_path)
revision, _ = git.log(repository_path, refspec=origin)[0]
if current_revision == revision:
info("No changes detected")
return None
refspec = '{0}..{1}'.format(current_revision, revision)
git_log = git.log(repository_path, refspec=refspec, count=False, author=True)
if not git_log:
info("Unable to get changelog (possibly different branches)")
return None
# (Re)fabric isn't always unicode safe
summary = u'\n'.join([u' :: '.join(row) for row in git_log])
info('Changes since deploy:\n{}', summary.encode('utf-8'))
return git_log
@task
def start():
"""
Start all application providers on current host
"""
providers = get_providers(env.host_string)
for provider in set(providers.values()):
provider.start()
@task
def stop():
"""
Stop all application providers on current host
"""
providers = get_providers(env.host_string)
for provider in set(providers.values()):
provider.stop()
@task
def reload():
"""
Reload all application providers on current host
"""
providers = get_providers(env.host_string)
for provider in set(providers.values()):
provider.reload()
@task
def status():
"""
get status from all application providers on current host
"""
providers = get_providers(env.host_string)
for provider in set(providers.values()):
provider.status(blueprint.get('project'))
@task
def configure_providers(force_reload=False):
"""
Render, upload and reload web & worker config
:param bool force_reload: Force reload of providers, even if not updated
:return dict: Application providers for current host
"""
from .project import sudo_project
with sudo_project():
providers = get_providers(env.host_string)
if 'web' in providers:
providers['web'].configure_web()
if 'worker' in providers:
providers['worker'].configure_worker()
# This may become a real provider in the future.
configure_environment()
for provider in set(providers.values()):
if provider.updates or force_reload:
provider.reload()
return providers
@task
def configure_environment():
from .project import project_home, project_name, sudo_project, git_repository_path
from ..shell import configure_profile
context = {"project_name": project_name()}
blueprint.upload('dotenv/dotenv',
os.path.join(project_home(), '.env'),
context=context,
user=project_name())
# Exports dotenv to the app user's interactive sessions
configure_profile(project_home(), dotenv=True)
config = blueprint.get('config', None)
if config:
context.update(config=config)
blueprint.upload('dotenv/dotconf',
os.path.join(git_repository_path(), '.env'),
context=context,
user=project_name())
@task
def configure_beat_schedule():
from .project import project_home, project_name
schedule = blueprint.get('schedule', None)
if schedule:
blueprint.upload('beat/schedule',
os.path.join(project_home(), '.schedule'),
context={'schedule': yaml.dump(schedule)},
user=project_name())
@task
def generate_nginx_conf(role='www'):
"""
Genereate nginx site config for web daemon
:param str role: Name of role (directory) to generate config to
"""
name = blueprint.get('project')
socket = blueprint.get('web.socket', default='0.0.0.0:3030')
host, _, port = socket.partition(':')
if port:
if len(env.hosts) > 1:
# Multiple hosts -> Bind upstream to each host:port
sockets = ['{}:{}'.format(host, port) for host in env.hosts]
else:
# Single host -> Bind upstream to unique configured socket
sockets = [socket]
else:
sockets = ['unix:{}'.format(socket)]
context = {
'name': name,
'sockets': sockets,
'domain': blueprint.get('web.domain', default='_'),
'ssl': blueprint.get('web.ssl', False),
'ip_hash': blueprint.get('web.ip_hash', False)
}
template = blueprint.get('web.nginx_conf')
if template is None:
template = 'nginx/site.conf'
else:
template = 'nginx/{}.conf'.format(template)
web_provider = blueprint.get('web.provider')
if web_provider and web_provider == 'uwsgi':
template = 'nginx/uwsgi_site.conf'
with settings(template_dirs=['templates']):
conf = blueprint.render_template(template, context)
conf_dir = os.path.join(
os.path.dirname(env['real_fabfile']),
'templates',
role,
'nginx',
'sites-available')
conf_path = os.path.join(conf_dir, '{}.conf'.format(name))
if not os.path.exists(conf_dir):
os.makedirs(conf_dir)
with open(conf_path, 'w+') as f:
f.write(conf)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for RNN cells."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class RNNCellTest(tf.test.TestCase):
def testCoupledInputForgetGateLSTMCell(self):
with self.test_session() as sess:
num_units = 2
state_size = num_units * 2
batch_size = 3
input_size = 4
expected_output = np.array(
[[0.121753, 0.121753],
[0.103349, 0.103349],
[0.100178, 0.100178]],
dtype=np.float32)
expected_state = np.array(
[[0.137523, 0.137523, 0.121753, 0.121753],
[0.105450, 0.105450, 0.103349, 0.103349],
[0.100742, 0.100742, 0.100178, 0.100178]],
dtype=np.float32)
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
x = tf.zeros([batch_size, input_size])
m = tf.zeros([batch_size, state_size])
output, state = tf.contrib.rnn.CoupledInputForgetGateLSTMCell(
num_units=num_units, forget_bias=1.0)(x, m)
sess.run([tf.global_variables_initializer()])
res = sess.run([output, state],
{x.name: np.array([[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]]),
m.name: 0.1 * np.ones((batch_size, state_size))})
# This is a smoke test: Only making sure expected values didn't change.
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], expected_output)
self.assertAllClose(res[1], expected_state)
def testTimeFreqLSTMCell(self):
with self.test_session() as sess:
num_units = 8
state_size = num_units * 2
batch_size = 3
input_size = 4
feature_size = 2
frequency_skip = 1
num_shifts = (input_size - feature_size) / frequency_skip + 1
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
x = tf.zeros([batch_size, input_size])
m = tf.zeros([batch_size, state_size*num_shifts])
output, state = tf.contrib.rnn.TimeFreqLSTMCell(
num_units=num_units, feature_size=feature_size,
frequency_skip=frequency_skip, forget_bias=1.0)(x, m)
sess.run([tf.global_variables_initializer()])
res = sess.run([output, state],
{x.name: np.array([[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]]),
m.name: 0.1 * np.ones((batch_size, state_size*(
num_shifts)))})
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape, (batch_size, num_units*num_shifts))
self.assertEqual(res[1].shape, (batch_size, state_size*num_shifts))
# Different inputs so different outputs and states
for i in range(1, batch_size):
self.assertTrue(
float(np.linalg.norm((res[0][0, :] - res[0][i, :]))) > 1e-6)
self.assertTrue(
float(np.linalg.norm((res[1][0, :] - res[1][i, :]))) > 1e-6)
def testGridLSTMCell(self):
with self.test_session() as sess:
num_units = 8
batch_size = 3
input_size = 4
feature_size = 2
frequency_skip = 1
num_shifts = int((input_size - feature_size) / frequency_skip + 1)
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
cell = tf.contrib.rnn.GridLSTMCell(
num_units=num_units, feature_size=feature_size,
frequency_skip=frequency_skip, forget_bias=1.0,
num_frequency_blocks=[num_shifts],
couple_input_forget_gates=True,
state_is_tuple=True)
inputs = tf.constant(np.array([[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]],
dtype=np.float32), dtype=tf.float32)
state_value = tf.constant(
0.1 * np.ones((batch_size, num_units), dtype=np.float32),
dtype=tf.float32)
init_state = cell.state_tuple_type(
*([state_value, state_value] * num_shifts))
output, state = cell(inputs, init_state)
sess.run([tf.global_variables_initializer()])
res = sess.run([output, state])
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape, (batch_size, num_units * num_shifts * 2))
for ss in res[1]:
self.assertEqual(ss.shape, (batch_size, num_units))
# Different inputs so different outputs and states
for i in range(1, batch_size):
self.assertTrue(
float(np.linalg.norm((res[0][0, :] - res[0][i, :]))) > 1e-6)
self.assertTrue(float(np.linalg.norm(
(res[1].state_f00_b00_c[0, :] - res[1].state_f00_b00_c[i, :])))
> 1e-6)
def testGridLSTMCellWithFrequencyBlocks(self):
with self.test_session() as sess:
num_units = 8
batch_size = 3
input_size = 4
feature_size = 2
frequency_skip = 1
num_frequency_blocks = [1, 1]
total_blocks = num_frequency_blocks[0] + num_frequency_blocks[1]
start_freqindex_list = [0, 2]
end_freqindex_list = [2, 4]
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
cell = tf.contrib.rnn.GridLSTMCell(
num_units=num_units, feature_size=feature_size,
frequency_skip=frequency_skip, forget_bias=1.0,
num_frequency_blocks=num_frequency_blocks,
start_freqindex_list=start_freqindex_list,
end_freqindex_list=end_freqindex_list,
couple_input_forget_gates=True,
state_is_tuple=True)
inputs = tf.constant(np.array([[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]],
dtype=np.float32), dtype=tf.float32)
state_value = tf.constant(
0.1 * np.ones((batch_size, num_units), dtype=np.float32),
dtype=tf.float32)
init_state = cell.state_tuple_type(
*([state_value, state_value] * total_blocks))
output, state = cell(inputs, init_state)
sess.run([tf.global_variables_initializer()])
res = sess.run([output, state])
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape,
(batch_size, num_units * total_blocks * 2))
for ss in res[1]:
self.assertEqual(ss.shape, (batch_size, num_units))
# Different inputs so different outputs and states
for i in range(1, batch_size):
self.assertTrue(
float(np.linalg.norm((res[0][0, :] - res[0][i, :]))) > 1e-6)
self.assertTrue(float(np.linalg.norm(
(res[1].state_f00_b00_c[0, :] - res[1].state_f00_b00_c[i, :])))
> 1e-6)
def testGridLstmCellWithCoupledInputForgetGates(self):
num_units = 2
batch_size = 3
input_size = 4
feature_size = 2
frequency_skip = 1
num_shifts = int((input_size - feature_size) / frequency_skip + 1)
expected_output = np.array(
[[0.416383, 0.416383, 0.403238, 0.403238, 0.524020, 0.524020,
0.565425, 0.565425, 0.557865, 0.557865, 0.609699, 0.609699],
[0.627331, 0.627331, 0.622393, 0.622393, 0.688342, 0.688342,
0.708078, 0.708078, 0.694245, 0.694245, 0.715171, 0.715171],
[0.711050, 0.711050, 0.709197, 0.709197, 0.736533, 0.736533,
0.744264, 0.744264, 0.737390, 0.737390, 0.745250, 0.745250]],
dtype=np.float32)
expected_state = np.array(
[[0.625556, 0.625556, 0.416383, 0.416383, 0.759134, 0.759134,
0.524020, 0.524020, 0.798795, 0.798795, 0.557865, 0.557865],
[0.875488, 0.875488, 0.627331, 0.627331, 0.936432, 0.936432,
0.688342, 0.688342, 0.941961, 0.941961, 0.694245, 0.694245],
[0.957327, 0.957327, 0.711050, 0.711050, 0.979522, 0.979522,
0.736533, 0.736533, 0.980245, 0.980245, 0.737390, 0.737390]],
dtype=np.float32)
for state_is_tuple in [False, True]:
with self.test_session() as sess:
with tf.variable_scope("state_is_tuple" + str(state_is_tuple),
initializer=tf.constant_initializer(0.5)):
cell = tf.contrib.rnn.GridLSTMCell(
num_units=num_units, feature_size=feature_size,
frequency_skip=frequency_skip, forget_bias=1.0,
num_frequency_blocks=[num_shifts],
couple_input_forget_gates=True,
state_is_tuple=state_is_tuple)
inputs = tf.constant(np.array([[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]],
dtype=np.float32), dtype=tf.float32)
if state_is_tuple:
state_value = tf.constant(
0.1 * np.ones((batch_size, num_units), dtype=np.float32),
dtype=tf.float32)
init_state = cell.state_tuple_type(
*([state_value, state_value] * num_shifts))
else:
init_state = tf.constant(
0.1 * np.ones((batch_size, num_units * num_shifts * 2),
dtype=np.float32),
dtype=tf.float32)
output, state = cell(inputs, init_state)
sess.run([tf.global_variables_initializer()])
res = sess.run([output, state])
# This is a smoke test: Only making sure expected values not change.
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], expected_output)
if not state_is_tuple:
self.assertAllClose(res[1], expected_state)
else:
# There should be num_shifts * 2 states in the tuple.
self.assertEqual(len(res[1]), num_shifts * 2)
# Checking the shape of each state to be batch_size * num_units
for ss in res[1]:
self.assertEqual(ss.shape[0], batch_size)
self.assertEqual(ss.shape[1], num_units)
self.assertAllClose(np.concatenate(res[1], axis=1), expected_state)
def testBidirectionGridLSTMCell(self):
with self.test_session() as sess:
num_units = 2
batch_size = 3
input_size = 4
feature_size = 2
frequency_skip = 1
num_shifts = int((input_size - feature_size) / frequency_skip + 1)
expected_output = np.array(
[[0.464130, 0.464130, 0.419165, 0.419165, 0.593283, 0.593283,
0.738350, 0.738350, 0.661638, 0.661638, 0.866774, 0.866774,
0.520789, 0.520789, 0.476968, 0.476968, 0.604341, 0.604341,
0.760207, 0.760207, 0.635773, 0.635773, 0.850218, 0.850218],
[0.669636, 0.669636, 0.628966, 0.628966, 0.736057, 0.736057,
0.895927, 0.895927, 0.755559, 0.755559, 0.954359, 0.954359,
0.692621, 0.692621, 0.652363, 0.652363, 0.737517, 0.737517,
0.899558, 0.899558, 0.745984, 0.745984, 0.946840, 0.946840],
[0.751109, 0.751109, 0.711716, 0.711716, 0.778357, 0.778357,
0.940779, 0.940779, 0.784530, 0.784530, 0.980604, 0.980604,
0.759940, 0.759940, 0.720652, 0.720652, 0.778552, 0.778552,
0.941606, 0.941606, 0.781035, 0.781035, 0.977731, 0.977731]],
dtype=np.float32)
expected_state = np.array(
[[0.710660, 0.710660, 0.464130, 0.464130, 0.877293, 0.877293,
0.593283, 0.593283, 0.958505, 0.958505, 0.661638, 0.661638,
0.785405, 0.785405, 0.520789, 0.520789, 0.890836, 0.890836,
0.604341, 0.604341, 0.928512, 0.928512, 0.635773, 0.635773],
[0.967579, 0.967579, 0.669636, 0.669636, 1.038811, 1.038811,
0.736057, 0.736057, 1.058201, 1.058201, 0.755559, 0.755559,
0.993088, 0.993088, 0.692621, 0.692621, 1.040288, 1.040288,
0.737517, 0.737517, 1.048773, 1.048773, 0.745984, 0.745984],
[1.053842, 1.053842, 0.751109, 0.751109, 1.079919, 1.079919,
0.778357, 0.778357, 1.085620, 1.085620, 0.784530, 0.784530,
1.062455, 1.062455, 0.759940, 0.759940, 1.080101, 1.080101,
0.778552, 0.778552, 1.082402, 1.082402, 0.781035, 0.781035]],
dtype=np.float32)
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
cell = tf.contrib.rnn.BidirectionalGridLSTMCell(
num_units=num_units, feature_size=feature_size,
share_time_frequency_weights=True,
frequency_skip=frequency_skip, forget_bias=1.0,
num_frequency_blocks=[num_shifts])
inputs = tf.constant(np.array([[1.0, 1.1, 1.2, 1.3],
[2.0, 2.1, 2.2, 2.3],
[3.0, 3.1, 3.2, 3.3]],
dtype=np.float32), dtype=tf.float32)
state_value = tf.constant(
0.1 * np.ones((batch_size, num_units), dtype=np.float32),
dtype=tf.float32)
init_state = cell.state_tuple_type(
*([state_value, state_value] * num_shifts * 2))
output, state = cell(inputs, init_state)
sess.run([tf.global_variables_initializer()])
res = sess.run([output, state])
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape, (batch_size, num_units*num_shifts*4))
self.assertAllClose(res[0], expected_output)
# There should be num_shifts * 4 states in the tuple.
self.assertEqual(len(res[1]), num_shifts * 4)
# Checking the shape of each state to be batch_size * num_units
for ss in res[1]:
self.assertEqual(ss.shape[0], batch_size)
self.assertEqual(ss.shape[1], num_units)
self.assertAllClose(np.concatenate(res[1], axis=1), expected_state)
def testBidirectionGridLSTMCellWithSliceOffset(self):
with self.test_session() as sess:
num_units = 2
batch_size = 3
input_size = 4
feature_size = 2
frequency_skip = 1
num_shifts = int((input_size - feature_size) / frequency_skip + 1)
expected_output = np.array(
[[0.464130, 0.464130, 0.419165, 0.419165, 0.593283, 0.593283,
0.738350, 0.738350, 0.661638, 0.661638, 0.866774, 0.866774,
0.322645, 0.322645, 0.276068, 0.276068, 0.584654, 0.584654,
0.690292, 0.690292, 0.640446, 0.640446, 0.840071, 0.840071],
[0.669636, 0.669636, 0.628966, 0.628966, 0.736057, 0.736057,
0.895927, 0.895927, 0.755559, 0.755559, 0.954359, 0.954359,
0.493625, 0.493625, 0.449236, 0.449236, 0.730828, 0.730828,
0.865996, 0.865996, 0.749429, 0.749429, 0.944958, 0.944958],
[0.751109, 0.751109, 0.711716, 0.711716, 0.778357, 0.778357,
0.940779, 0.940779, 0.784530, 0.784530, 0.980604, 0.980604,
0.608587, 0.608587, 0.566683, 0.566683, 0.777345, 0.777345,
0.925820, 0.925820, 0.782597, 0.782597, 0.976858, 0.976858]],
dtype=np.float32)
expected_state = np.array(
[[0.710660, 0.710660, 0.464130, 0.464130, 0.877293, 0.877293,
0.593283, 0.593283, 0.958505, 0.958505, 0.661638, 0.661638,
0.516575, 0.516575, 0.322645, 0.322645, 0.866628, 0.866628,
0.584654, 0.584654, 0.934002, 0.934002, 0.640446, 0.640446],
[0.967579, 0.967579, 0.669636, 0.669636, 1.038811, 1.038811,
0.736057, 0.736057, 1.058201, 1.058201, 0.755559, 0.755559,
0.749836, 0.749836, 0.493625, 0.493625, 1.033488, 1.033488,
0.730828, 0.730828, 1.052186, 1.052186, 0.749429, 0.749429],
[1.053842, 1.053842, 0.751109, 0.751109, 1.079919, 1.079919,
0.778357, 0.778357, 1.085620, 1.085620, 0.784530, 0.784530,
0.895999, 0.895999, 0.608587, 0.608587, 1.078978, 1.078978,
0.777345, 0.777345, 1.083843, 1.083843, 0.782597, 0.782597]],
dtype=np.float32)
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
cell = tf.contrib.rnn.BidirectionalGridLSTMCell(
num_units=num_units, feature_size=feature_size,
share_time_frequency_weights=True,
frequency_skip=frequency_skip, forget_bias=1.0,
num_frequency_blocks=[num_shifts],
backward_slice_offset=1)
inputs = tf.constant(np.array([[1.0, 1.1, 1.2, 1.3],
[2.0, 2.1, 2.2, 2.3],
[3.0, 3.1, 3.2, 3.3]],
dtype=np.float32), dtype=tf.float32)
state_value = tf.constant(
0.1 * np.ones((batch_size, num_units), dtype=np.float32),
dtype=tf.float32)
init_state = cell.state_tuple_type(
*([state_value, state_value] * num_shifts * 2))
output, state = cell(inputs, init_state)
sess.run([tf.global_variables_initializer()])
res = sess.run([output, state])
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape, (batch_size, num_units*num_shifts*4))
self.assertAllClose(res[0], expected_output)
# There should be num_shifts * 4 states in the tuple.
self.assertEqual(len(res[1]), num_shifts * 4)
# Checking the shape of each state to be batch_size * num_units
for ss in res[1]:
self.assertEqual(ss.shape[0], batch_size)
self.assertEqual(ss.shape[1], num_units)
self.assertAllClose(np.concatenate(res[1], axis=1), expected_state)
def testAttentionCellWrapperFailures(self):
with self.assertRaisesRegexp(
TypeError, "The parameter cell is not RNNCell."):
tf.contrib.rnn.AttentionCellWrapper(None, 0)
num_units = 8
for state_is_tuple in [False, True]:
with tf.Graph().as_default():
lstm_cell = tf.contrib.rnn.BasicLSTMCell(
num_units, state_is_tuple=state_is_tuple)
with self.assertRaisesRegexp(
ValueError, "attn_length should be greater than zero, got 0"):
tf.contrib.rnn.AttentionCellWrapper(lstm_cell, 0,
state_is_tuple=state_is_tuple)
with self.assertRaisesRegexp(
ValueError, "attn_length should be greater than zero, got -1"):
tf.contrib.rnn.AttentionCellWrapper(lstm_cell, -1,
state_is_tuple=state_is_tuple)
with tf.Graph().as_default():
lstm_cell = tf.contrib.rnn.BasicLSTMCell(
num_units, state_is_tuple=True)
with self.assertRaisesRegexp(
ValueError, "Cell returns tuple of states, but the flag "
"state_is_tuple is not set. State size is: *"):
tf.contrib.rnn.AttentionCellWrapper(
lstm_cell, 4, state_is_tuple=False)
def testAttentionCellWrapperZeros(self):
num_units = 8
attn_length = 16
batch_size = 3
input_size = 4
for state_is_tuple in [False, True]:
with tf.Graph().as_default():
with self.test_session() as sess:
with tf.variable_scope("state_is_tuple_" + str(state_is_tuple)):
lstm_cell = tf.contrib.rnn.BasicLSTMCell(
num_units, state_is_tuple=state_is_tuple)
cell = tf.contrib.rnn.AttentionCellWrapper(
lstm_cell, attn_length, state_is_tuple=state_is_tuple)
if state_is_tuple:
zeros = tf.zeros(
[batch_size, num_units], dtype=np.float32)
attn_state_zeros = tf.zeros(
[batch_size, attn_length * num_units], dtype=np.float32)
zero_state = ((zeros, zeros), zeros, attn_state_zeros)
else:
zero_state = tf.zeros(
[batch_size, num_units * 2 + attn_length
* num_units + num_units], dtype=np.float32)
inputs = tf.zeros([batch_size, input_size], dtype=tf.float32)
output, state = cell(inputs, zero_state)
self.assertEquals(output.get_shape(), [batch_size, num_units])
if state_is_tuple:
self.assertEquals(len(state), 3)
self.assertEquals(len(state[0]), 2)
self.assertEquals(state[0][0].get_shape(),
[batch_size, num_units])
self.assertEquals(state[0][1].get_shape(),
[batch_size, num_units])
self.assertEquals(state[1].get_shape(), [batch_size, num_units])
self.assertEquals(state[2].get_shape(),
[batch_size, attn_length * num_units])
tensors = [output] + list(state)
else:
self.assertEquals(
state.get_shape(), [batch_size, num_units * 2 + num_units
+ attn_length * num_units])
tensors = [output, state]
zero_result = sum([tf.reduce_sum(tf.abs(x)) for x in tensors])
sess.run(tf.global_variables_initializer())
self.assertTrue(sess.run(zero_result) < 1e-6)
def testAttentionCellWrapperValues(self):
num_units = 8
attn_length = 16
batch_size = 3
for state_is_tuple in [False, True]:
with tf.Graph().as_default():
with self.test_session() as sess:
with tf.variable_scope("state_is_tuple_" + str(state_is_tuple)):
lstm_cell = tf.contrib.rnn.BasicLSTMCell(
num_units, state_is_tuple=state_is_tuple)
cell = tf.contrib.rnn.AttentionCellWrapper(
lstm_cell, attn_length, state_is_tuple=state_is_tuple)
if state_is_tuple:
zeros = tf.constant(
0.1 * np.ones([batch_size, num_units],
dtype=np.float32), dtype=tf.float32)
attn_state_zeros = tf.constant(
0.1 * np.ones([batch_size, attn_length * num_units],
dtype=np.float32), dtype=tf.float32)
zero_state = ((zeros, zeros), zeros, attn_state_zeros)
else:
zero_state = tf.constant(
0.1 * np.ones([batch_size, num_units * 2 + num_units
+ attn_length * num_units],
dtype=np.float32), dtype=tf.float32)
inputs = tf.constant(np.array([[1., 1., 1., 1.],
[2., 2., 2., 2.],
[3., 3., 3., 3.]],
dtype=np.float32), dtype=tf.float32)
output, state = cell(inputs, zero_state)
if state_is_tuple:
concat_state = tf.concat_v2(
[state[0][0], state[0][1], state[1], state[2]], 1)
else:
concat_state = state
sess.run(tf.global_variables_initializer())
output, state = sess.run([output, concat_state])
# Different inputs so different outputs and states
for i in range(1, batch_size):
self.assertTrue(
float(np.linalg.norm((output[0, :] - output[i, :]))) > 1e-6)
self.assertTrue(
float(np.linalg.norm((state[0, :] - state[i, :]))) > 1e-6)
def testAttentionCellWrapperCorrectResult(self):
num_units = 4
attn_length = 6
batch_size = 2
expected_output = np.array(
[[0.955392, 0.408507, -0.60122, 0.270718],
[0.903681, 0.331165, -0.500238, 0.224052]],
dtype=np.float32)
expected_state = np.array(
[[
0.81331915, 0.32036272, 0.28079176, 1.08888793, 0.41264394,
0.1062041, 0.10444493, 0.32050529, 0.64655536, 0.70794445,
0.51896095, 0.31809306, 0.58086717, 0.49446869, 0.7641536,
0.12814975, 0.92231739, 0.89857256, 0.21889746, 0.38442063,
0.53481543, 0.8876909, 0.45823169, 0.5905602, 0.78038228,
0.56501579, 0.03971386, 0.09870267, 0.8074435, 0.66821432,
0.99211812, 0.12295902, 1.01412082, 0.33123279, -0.71114945,
0.40583119
], [
0.59962207, 0.42597458, -0.22491696, 0.98063421, 0.32548007,
0.11623692, -0.10100613, 0.27708149, 0.76956916, 0.6360054,
0.51719815, 0.50458527, 0.73000264, 0.66986895, 0.73576689,
0.86301267, 0.87887371, 0.35185754, 0.93417215, 0.64732957,
0.63173044, 0.66627824, 0.53644657, 0.20477486, 0.98458421,
0.38277245, 0.03746676, 0.92510188, 0.57714164, 0.84932971,
0.36127412, 0.12125921, 0.99780077, 0.31886846, -0.67595094,
0.56531656
]],
dtype=np.float32)
seed = 12345
tf.set_random_seed(seed)
for state_is_tuple in [False, True]:
with tf.Session() as sess:
with tf.variable_scope("state_is_tuple", reuse=state_is_tuple):
lstm_cell = tf.contrib.rnn.BasicLSTMCell(
num_units, state_is_tuple=state_is_tuple)
cell = tf.contrib.rnn.AttentionCellWrapper(
lstm_cell, attn_length, state_is_tuple=state_is_tuple)
zeros1 = tf.random_uniform(
(batch_size, num_units), 0.0, 1.0, seed=seed + 1)
zeros2 = tf.random_uniform(
(batch_size, num_units), 0.0, 1.0, seed=seed + 2)
zeros3 = tf.random_uniform(
(batch_size, num_units), 0.0, 1.0, seed=seed + 3)
attn_state_zeros = tf.random_uniform(
(batch_size, attn_length * num_units), 0.0, 1.0, seed=seed + 4)
zero_state = ((zeros1, zeros2), zeros3, attn_state_zeros)
if not state_is_tuple:
zero_state = tf.concat_v2([
zero_state[0][0], zero_state[0][1], zero_state[1], zero_state[2]
], 1)
inputs = tf.random_uniform(
(batch_size, num_units), 0.0, 1.0, seed=seed + 5)
output, state = cell(inputs, zero_state)
if state_is_tuple:
state = tf.concat_v2([state[0][0], state[0][1], state[1], state[2]],
1)
sess.run(tf.global_variables_initializer())
self.assertAllClose(sess.run(output), expected_output)
self.assertAllClose(sess.run(state), expected_state)
class LayerNormBasicLSTMCellTest(tf.test.TestCase):
# NOTE: all the values in the current test case have been calculated.
def testBasicLSTMCell(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
x = tf.zeros([1, 2])
c0 = tf.zeros([1, 2])
h0 = tf.zeros([1, 2])
state0 = tf.contrib.rnn.LSTMStateTuple(c0, h0)
c1 = tf.zeros([1, 2])
h1 = tf.zeros([1, 2])
state1 = tf.contrib.rnn.LSTMStateTuple(c1, h1)
state = (state0, state1)
cell = tf.contrib.rnn.LayerNormBasicLSTMCell(2)
cell = tf.contrib.rnn.MultiRNNCell([cell] * 2)
g, out_m = cell(x, state)
sess.run([tf.global_variables_initializer()])
res = sess.run([g, out_m],
{
x.name: np.array([[1., 1.]]),
c0.name: 0.1 * np.asarray([[0, 1]]),
h0.name: 0.1 * np.asarray([[2, 3]]),
c1.name: 0.1 * np.asarray([[4, 5]]),
h1.name: 0.1 * np.asarray([[6, 7]]),
})
expected_h = np.array([[-0.38079708, 0.38079708]])
expected_state0_c = np.array([[-1.0, 1.0]])
expected_state0_h = np.array([[-0.38079708, 0.38079708]])
expected_state1_c = np.array([[-1.0, 1.0]])
expected_state1_h = np.array([[-0.38079708, 0.38079708]])
actual_h = res[0]
actual_state0_c = res[1][0].c
actual_state0_h = res[1][0].h
actual_state1_c = res[1][1].c
actual_state1_h = res[1][1].h
self.assertAllClose(actual_h, expected_h, 1e-5)
self.assertAllClose(expected_state0_c, actual_state0_c, 1e-5)
self.assertAllClose(expected_state0_h, actual_state0_h, 1e-5)
self.assertAllClose(expected_state1_c, actual_state1_c, 1e-5)
self.assertAllClose(expected_state1_h, actual_state1_h, 1e-5)
with tf.variable_scope("other", initializer=tf.constant_initializer(0.5)):
x = tf.zeros([1, 3]) # Test BasicLSTMCell with input_size != num_units.
c = tf.zeros([1, 2])
h = tf.zeros([1, 2])
state = tf.contrib.rnn.LSTMStateTuple(c, h)
cell = tf.contrib.rnn.LayerNormBasicLSTMCell(2)
g, out_m = cell(x, state)
sess.run([tf.global_variables_initializer()])
res = sess.run([g, out_m],
{
x.name: np.array([[1., 1., 1.]]),
c.name: 0.1 * np.asarray([[0, 1]]),
h.name: 0.1 * np.asarray([[2, 3]]),
})
expected_h = np.array([[-0.38079708, 0.38079708]])
expected_c = np.array([[-1.0, 1.0]])
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], expected_h, 1e-5)
self.assertAllClose(res[1].c, expected_c, 1e-5)
self.assertAllClose(res[1].h, expected_h, 1e-5)
def testBasicLSTMCellWithStateTuple(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
x = tf.zeros([1, 2])
c0 = tf.zeros([1, 2])
h0 = tf.zeros([1, 2])
state0 = tf.contrib.rnn.LSTMStateTuple(c0, h0)
c1 = tf.zeros([1, 2])
h1 = tf.zeros([1, 2])
state1 = tf.contrib.rnn.LSTMStateTuple(c1, h1)
cell = tf.contrib.rnn.LayerNormBasicLSTMCell(2)
cell = tf.contrib.rnn.MultiRNNCell([cell] * 2)
h, (s0, s1) = cell(x, (state0, state1))
sess.run([tf.global_variables_initializer()])
res = sess.run([h, s0, s1],
{
x.name: np.array([[1., 1.]]),
c0.name: 0.1 * np.asarray([[0, 1]]),
h0.name: 0.1 * np.asarray([[2, 3]]),
c1.name: 0.1 * np.asarray([[4, 5]]),
h1.name: 0.1 * np.asarray([[6, 7]]),
})
expected_h = np.array([[-0.38079708, 0.38079708]])
expected_h0 = np.array([[-0.38079708, 0.38079708]])
expected_c0 = np.array([[-1.0, 1.0]])
expected_h1 = np.array([[-0.38079708, 0.38079708]])
expected_c1 = np.array([[-1.0, 1.0]])
self.assertEqual(len(res), 3)
self.assertAllClose(res[0], expected_h, 1e-5)
self.assertAllClose(res[1].c, expected_c0, 1e-5)
self.assertAllClose(res[1].h, expected_h0, 1e-5)
self.assertAllClose(res[2].c, expected_c1, 1e-5)
self.assertAllClose(res[2].h, expected_h1, 1e-5)
def testBasicLSTMCellWithDropout(self):
def _is_close(x, y, digits=4):
delta = x - y
return delta < 10 ** (-digits)
def _is_close_in(x, items, digits=4):
for i in items:
if _is_close(x, i, digits):
return True
return False
keep_prob = 0.5
c_high = 2.9998924946
c_low = 0.999983298578
h_low = 0.761552567265
h_high = 0.995008519604
num_units = 5
allowed_low = [2, 3]
with self.test_session() as sess:
with tf.variable_scope("other", initializer=tf.constant_initializer(1)):
x = tf.zeros([1, 5])
c = tf.zeros([1, 5])
h = tf.zeros([1, 5])
state = tf.contrib.rnn.LSTMStateTuple(c, h)
cell = tf.contrib.rnn.LayerNormBasicLSTMCell(
num_units, layer_norm=False, dropout_keep_prob=keep_prob)
g, s = cell(x, state)
sess.run([tf.global_variables_initializer()])
res = sess.run([g, s],
{
x.name: np.ones([1, 5]),
c.name: np.ones([1, 5]),
h.name: np.ones([1, 5]),
})
# Since the returned tensors are of size [1,n]
# get the first component right now.
actual_h = res[0][0]
actual_state_c = res[1].c[0]
actual_state_h = res[1].h[0]
# For each item in `c` (the cell inner state) check that
# it is equal to one of the allowed values `c_high` (not
# dropped out) or `c_low` (dropped out) and verify that the
# corresponding item in `h` (the cell activation) is coherent.
# Count the dropped activations and check that their number is
# coherent with the dropout probability.
dropped_count = 0
self.assertTrue((actual_h == actual_state_h).all())
for citem, hitem in zip(actual_state_c, actual_state_h):
self.assertTrue(_is_close_in(citem, [c_low, c_high]))
if _is_close(citem, c_low):
self.assertTrue(_is_close(hitem, h_low))
dropped_count += 1
elif _is_close(citem, c_high):
self.assertTrue(_is_close(hitem, h_high))
self.assertIn(dropped_count, allowed_low)
if __name__ == "__main__":
tf.test.main()
| |
'''
Copyright (c) 2017 Vanessa Sochat
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
from django.core import serializers
from django.contrib import messages
from django.contrib.auth import logout as auth_logout
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.shortcuts import render_to_response, redirect, render
from django.template.context import RequestContext
import logging
import os
import pickle
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.http import (
HttpResponse,
JsonResponse,
HttpResponseBadRequest,
HttpResponseRedirect
)
from docfish.settings import (
PAGINATION_SIZE,
SNACK_PRICE
)
from docfish.apps.snacks.models import (
Snack,
SnackBox
)
from docfish.apps.users.utils import (
count_annotations_bydate,
get_user
)
from docfish.apps.snacks.utils import (
get_snack,
get_snacks,
get_user_snacks,
get_snacks_tags,
request_snack,
paginate_results
)
from django.db.models import Q
import hashlib
##################################################################################
# SNACK VIEWS ####################################################################
##################################################################################
def snacks_home(request):
'''snacks home introduces the user to docfishSnacks
'''
context = dict()
if request.user.is_authenticated():
snacks = get_user_snacks(request.user)
context["snacks"] = snacks
return render(request, "snacks/snacks_home.html", context)
@login_required
def snack_details(request,sid):
'''allows a user to view details about a snack
'''
snack = get_snack(sid)
snackbox = get_user_snacks(request.user,return_snackbox=True)
context = {"snack": snack,
"snackbox":snackbox}
return render(request, "snacks/snack_details.html", context)
@login_required
def redeem_snacks(request):
'''allows a user to request a snack
'''
snackbox = get_user_snacks(request.user,return_snackbox=True)
context = {"snackbox": snackbox }
# If it's a post, they are asking to redeem a snack
if request.method == "POST":
message = request.POST.get('message', None)
if snackbox.earned_snack == True:
snackbox = request_snack(snackbox,message=message)
messages.info(request,"Snack request submit!")
# Can they earn another snack?
if snackbox.earned_snack == True:
context['worthy'] = "yes"
return render(request, "snacks/redeem_snacks.html", context)
@login_required
def snack_category(request,cid,page=None):
'''allows a user to view snacks filtered to a category
'''
snacks = get_snacks(tag_ids=cid)
return all_snacks(request,
page=page,
snacks=snacks)
@login_required
def all_snacks(request,page=None,snacks=None,tags=None):
'''show the user available snacks for selection
:param page: if defined, return a specific page for a snack
:param snacks: if defined, don't retrieve entire set
:param tags: If defined, show custom set of tags
'''
if page == None:
page = 1
if snacks == None:
snacks = get_snacks()
if tags == None:
tags = get_snacks_tags(snacks)
if len(snacks) >= PAGINATION_SIZE:
total_pages = int(PAGINATION_SIZE / len(snacks))
page_list = list(range(1,total_pages+1))
# Break results into pagination size
snacks = paginate_results(snacks,
size=PAGINATION_SIZE,
page=page)
else:
total_pages = 1
page_list = [1]
# Get user snacks for snackbox updating
usersnacks = get_user_snacks(request.user,
return_ids=True)
if len(snacks) == 0:
messages.info(request,"Uhoh, we didn't find any snacks! Why not try a search?")
return search_view(request)
context = {"snacks": snacks,
"nosidebar":'pickledawg',
"tags": tags,
"page": page,
"last_page": total_pages,
"pages": page_list,
"usersnacks": usersnacks}
return render(request, "snacks/all_snacks.html", context)
def view_snacks(request,user=None):
'''allows a user to view his or her snacks
'''
if user == None:
user = request.user
# View is not login only, so non authenticated without user need redirect
elif request.user.is_anonymous():
messages.info(request,"You must be logged in to see your snacks!")
return redirect('collections')
snacks = get_user_snacks(user,by_tag=True)
snackbox = get_user_snacks(user,return_snackbox=True)
dates = count_annotations_bydate(user)
context = {"categories": snacks,
"user": user,
"dates":dates,
"snackbox":snackbox, #TODO: look into why count is off
"snack_price":SNACK_PRICE}
return render(request, "snacks/my_snacks.html", context)
def user_snacks(request,uid):
'''see snackbox of other users
'''
user = get_user(uid)
return view_snacks(request,user=user)
@login_required
def disable_snack(request,sid):
'''disable a snack
'''
snack = get_snack(sid)
if request.user.is_authenticated():
snack.active = False
snack.save()
return JsonResponse({"message":"Snack disabled"})
return JsonResponse({"message":"Error disabling snack!"})
@login_required
def enable_snack(request,sid):
'''enable a snack
'''
snack = get_snack(sid)
if request.user.is_authenticated():
snack.active = True
snack.save()
return JsonResponse({"message":"Snack enabled"})
return JsonResponse({"message":"Error enabling snack!"})
##################################################################################
# USER SNACK VIEWS ###############################################################
##################################################################################
@login_required
def add_snack(request,sid):
'''add a snack to a user's set
'''
snack = get_snack(sid)
if request.method == "POST":
usersnacks = get_user_snacks(request.user,return_snackbox=True)
usersnacks.snacks.add(snack)
usersnacks.save()
return JsonResponse({"message":"Snack added successfully"})
return JsonResponse({"message":"Error adding snack!"})
@login_required
def remove_snack(request,sid):
'''remove a snack from a user's set
'''
snack = get_snack(sid)
if request.method == "POST":
usersnacks = get_user_snacks(request.user,return_snackbox=True)
usersnacks.snacks.remove(snack)
usersnacks.save()
return JsonResponse({"message":"Snack removed successfully"})
return JsonResponse({"message":"Error removing snack!"})
##################################################################################
# SNACK SEARCH VIEWS #############################################################
##################################################################################
def search_view(request):
tags = get_snacks_tags()
context = {'tags':tags}
return render(request, 'search/search.html', context)
def snack_search(request):
'''snack_search is the ajax driver to show results for a snack search.
the request can be ajax (from the search page) or live search page (non ajax)
'''
q = None
if request.is_ajax():
q = request.GET.get('q')
if request.method == "POST":
q = request.POST.get('q')
if q is not None:
results = Snack.objects.filter(
Q(tags__name__contains=q) |
Q(name__contains=q) |
Q(id__contains=q) |
Q(url__contains=q)).order_by('name').distinct()
tags = get_snacks_tags()
context = {"results":results,
"submit_result": "anything",
"tags":tags}
if request.is_ajax():
return render(request,'search/result.html', context)
return render(request,'search/search_full.html', context)
return redirect('search')
| |
# Roulette.py was created by Redjumpman for Redbot
# This will create a rrgame.JSON file and a data folder
import os
import random
import asyncio
from time import gmtime, strftime
from discord.ext import commands
from .utils.dataIO import dataIO
from .utils import checks
from __main__ import send_cmd_help
class Russianroulette:
"""Allows 6 players to play Russian Roulette"""
def __init__(self, bot):
self.bot = bot
self.file_path = "data/JumperCogs/roulette/russian.json"
self.system = dataIO.load_json(self.file_path)
self.kill_message = ["I was really pulling for {0} too. Oh well!",
"I guess {0} really wasn't a pea-brain!",
"Ahhh now that {0} is gone we can quit playing! No? Ok fine!",
("All things considered, I think we can all agree that {0} was a "
"straight shooter."),
"Noooooooo. Not {0}!", "I call dibs on {0}\'s stuff. Too soon?",
"Well I guess {0} and I won't be doing that thing anymore...",
"Here lies {0}. A loser.", "RIP {0}.", "I kinda hated {0} anyway.",
"Hey {0}! I'm back with your snacks! Oh...",
"{0}, you\'re like modern art now!", "Called it!",
"Really guys? {0}\'s dead? Well this server officially blows now.",
"Does this mean I don't have to return the book {0} lent me?",
"Oh come on! Now {0}\'s blood is all over my server!",
"I\'ll always remember {0}...", "Well at least {0} stopped crying.",
"Don\'t look at me. You guys are cleaning up {0}.",
"What I'm not crying. *sniff*", "I TOLD YOU, YOU COULD DO IT!",
"Well I'm sure someone will miss you, {0}.", "Never forget. {0}."
"Yeah. Real smart guys. Just kill off all the fun people.",
"I think I got some splatter on me. Gross",
"I told you it would blow your mind!", "Well this is fun...",
"I go to get popcorn and you all start without me. Rude.",
"Oh God. Just before {0} pulled the trigger they shit their pants.",
"I guess I\'ll dig this hole a little bigger...",
"10/10 would watch {0} blow their brains out again.",
"Well I hope {0} has life insurance...",
"See you in the next life, {0}", "AND THEIR OFF! Oh... wrong game."
"I don\'t know how, but I think {1} cheated.",
"{0} always said they wanted to go out with a bang.",
"So don\'t sing *another one bites the dust* ?",
"I can\'t tell if the smile on {1}\'s face is relief or insanity.",
"Oh stop crying {1}. {0} knew what they were getting into.",
"So that\'s what a human looks like on the inside!",
"My condolences {1}. I know you were *so* close to {0}.",
"GOD NO. PLEASE NO. PLEASE GOD NO. NOOOOOOOOOOOOOOOOOOOOOOO!",
"Time of death {2}. Cause: Stupidity.", "BOOM HEADSHOT! Sorry..."
"Don\'t act like you didn\'t enjoy that, {1}!",
"Is it weird that I wish {1} was dead instead?",
"Oh real great. {0} dies and I\'m still stuck with {1}. Real. Great.",
"Are you eating cheetos? Have some respect {1}! {0} just died!"]
self.version = "2.1"
@commands.group(pass_context=True, no_pm=True)
async def setrussian(self, ctx):
"""Russian Roulette Settings"""
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
@commands.command(name="rrversion", pass_context=True)
@checks.admin_or_permissions(manage_server=True)
async def _version_setrussian(self):
"""Shows the version of Russian Roulette"""
await self.bot.say("You are currently running Russian Roulette version "
"{}".format(self.version))
@setrussian.command(name="minbet", pass_context=True)
@checks.admin_or_permissions(manage_server=True)
async def _minbet_setrussian(self, ctx, bet: int):
"""Set the minimum starting bet for Russian Roulette games"""
server = ctx.message.server
settings = self.check_server_settings(server)
if bet > 0:
settings["System"]["Min Bet"] = bet
dataIO.save_json(self.file_path, self.system)
msg = "The initial bet to play russian roulette is set to {}".format(bet)
else:
msg = "I need a number higher than 0."
await self.bot.say(msg)
@commands.command(pass_context=True, no_pm=True)
@checks.admin_or_permissions(manage_server=True)
async def resetrr(self, ctx):
"""Reset command if game is stuck."""
server = ctx.message.server
settings = self.check_server_settings(server)
self.reset_game(settings)
await self.bot.say("Russian Roulette system has been reset.")
@commands.command(pass_context=True, no_pm=True, aliases=["rr"])
async def russian(self, ctx, bet: int):
user = ctx.message.author
server = ctx.message.server
settings = self.check_server_settings(server)
if await self.logic_checks(settings, user, bet):
if settings["System"]["Roulette Initial"]:
if user.id in settings["Players"]:
msg = "You are already in the circle. Don\'t be so eager to die."
elif len(settings["Players"].keys()) >= 6:
msg = "Sorry. The max amount of players is 6."
else:
if bet == settings["System"]["Start Bet"]:
self.player_add(settings, user, bet)
self.subtract_credits(settings, user, bet)
msg = "{} has joined the roulette circle".format(user.name)
else:
start_bet = settings["System"]["Start Bet"]
msg = "Your bet must be equal to {}.".format(start_bet)
await self.bot.say(msg)
else:
self.initial_set(settings, bet)
self.player_add(settings, user, bet)
self.subtract_credits(settings, user, bet)
await self.bot.say("{} has started a game of roulette with a starting bet of "
"{}\nThe game will start in 30 seconds or when 5 more "
"players join.".format(user.name, bet))
await asyncio.sleep(30)
if len(settings["Players"].keys()) == 1:
await self.bot.say("Sorry I can't let you play by yourself, that's just "
"suicide.\nTry again when you find some 'friends'.")
self.reset_game(settings)
else:
await self.bot.say("Gather around! The game of russian roulette is starting.\n"
"I'm going to load a round into this six shot revovler, "
"give it a good spin, and pass it off to someone at random."
"If everyone is lucky enough to have a turn, I\'ll start "
"all over. Good luck!")
await asyncio.sleep(5)
await self.roulette_game(settings, server)
self.reset_game(settings)
async def logic_checks(self, settings, user, bet):
if settings["System"]["Active"]:
await self.bot.say("A game of roulette is already active. Wait for it to end.")
return False
elif bet < settings["System"]["Min Bet"]:
min_bet = settings["System"]["Min Bet"]
await self.bot.say("Your bet must be greater than or equal to {}.".format(min_bet))
return False
elif len(settings["Players"].keys()) >= 6:
await self.bot.say("There are too many players playing at the moment")
return False
elif not self.enough_credits(user, bet):
await self.bot.say("You do not have enough credits or may need to register a bank "
"account")
return False
else:
return True
async def roulette_game(self, settings, server):
pot = settings["System"]["Pot"]
turn = 0
count = len(settings["Players"].keys())
while count > 0:
players = [server.get_member(x) for x in list(settings["Players"].keys())]
if count > 1:
count -= 1
turn += 1
await self.roulette_round(settings, server, players, turn)
else:
winner = players[0]
await self.bot.say("Congratulations {}, you're the only person alive. Enjoy your "
"blood money...\n{} credits were deposited into {}\'s "
"account".format(winner.mention, pot, winner.name))
bank = self.bot.get_cog("Economy").bank
bank.deposit_credits(winner, pot)
break
async def roulette_round(self, settings, server, players, turn):
roulette_circle = players[:]
chamber = 6
await self.bot.say("*{} put one round into the six shot revolver and gave it a good spin. "
"With a flick of the wrist, it locks in place."
"*".format(self.bot.user.name))
await asyncio.sleep(4)
await self.bot.say("Let's begin round {}.".format(turn))
while chamber >= 1:
if not roulette_circle:
roulette_circle = players[:] # Restart the circle when list is exhausted
chance = random.randint(1, chamber)
player = random.choice(roulette_circle)
await self.bot.say("{} presses the revolver to their temple and slowly squeezes the "
"trigger...".format(player.name))
if chance == 1:
await asyncio.sleep(4)
msg = "**BOOM**\n```{} died and was removed from the group.```".format(player.name)
await self.bot.say(msg)
msg2 = random.choice(self.kill_message)
settings["Players"].pop(player.id)
remaining = [server.get_member(x) for x in list(settings["Players"].keys())]
player2 = random.choice(remaining)
death_time = strftime("%H:%M:%S", gmtime())
await asyncio.sleep(5)
await self.bot.say(msg2.format(player.name, player2.name, death_time))
await asyncio.sleep(5)
break
else:
await asyncio.sleep(4)
await self.bot.say("**CLICK**\n```{} survived and passed the "
"revolver.```".format(player.name))
await asyncio.sleep(3)
roulette_circle.remove(player)
chamber -= 1
def reset_game(self, settings):
settings["System"]["Pot"] = 0
settings["System"]["Active"] = False
settings["System"]["Start Bet"] = 0
settings["System"]["Roulette Initial"] = False
settings["Players"] = {}
def player_add(self, settings, user, bet):
settings["System"]["Pot"] += bet
settings["Players"][user.id] = {"Name": user.name,
"Mention": user.mention,
"Bet": bet}
def initial_set(self, settings, bet):
settings["System"]["Start Bet"] = bet
settings["System"]["Roulette Initial"] = True
def subtract_credits(self, settings, user, bet):
bank = self.bot.get_cog('Economy').bank
bank.withdraw_credits(user, bet)
def enough_credits(self, user, amount):
bank = self.bot.get_cog('Economy').bank
if bank.account_exists(user):
if bank.can_spend(user, amount):
return True
else:
return False
else:
return False
def check_server_settings(self, server):
if server.id not in self.system["Servers"]:
default = {"System": {"Pot": 0,
"Active": False,
"Start Bet": 0,
"Roulette Initial": False,
"Min Bet": 50},
"Players": {}
}
self.system["Servers"][server.id] = default
dataIO.save_json(self.file_path, self.system)
print("Creating default russian roulette settings for Server: {}".format(server.name))
path = self.system["Servers"][server.id]
return path
else:
path = self.system["Servers"][server.id]
return path
def check_folders():
if not os.path.exists("data/JumperCogs/roulette"):
print("Creating data/JumperCogs/roulette folder...")
os.makedirs("data/JumperCogs/roulette")
def check_files():
system = {"Servers": {}}
f = "data/JumperCogs/roulette/russian.json"
if not dataIO.is_valid_json(f):
print("Creating default russian.json...")
dataIO.save_json(f, system)
def setup(bot):
check_folders()
check_files()
bot.add_cog(Russianroulette(bot))
| |
# -*- coding: utf-8 -*-
import datetime
from django.template.defaultfilters import slugify
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse, NoReverseMatch
from django.db import models
from django.utils.translation import ugettext_lazy as _, override
from django.utils.timezone import now
from cms.utils.i18n import get_current_language
from cms.models.fields import PlaceholderField
from cms.models.pluginmodel import CMSPlugin
from djangocms_text_ckeditor.fields import HTMLField
from filer.fields.image import FilerImageField
from taggit.models import (GenericTaggedItemBase as TaggitGenericTaggedItemBase,
ItemBase as TaggitItemBase)
from taggit.managers import TaggableManager
from hvad.models import TranslatableModel, TranslatedFields
from hvad.utils import get_translation
from unidecode import unidecode
from .managers import (
CategoryManager,
RelatedManager,
PublishedManager,
TagManager,
)
def get_slug_in_language(record, language):
if not record:
return None
if hasattr(record, record._meta.translations_cache) and language == record.language_code: # possibly no need to hit db, try cache
return record.lazy_translation_getter('slug')
else: # hit db
try:
translation = get_translation(record, language_code=language)
except models.ObjectDoesNotExist:
return None
else:
return translation.slug
def get_page_url(name, language):
try:
url = reverse(name)
except NoReverseMatch:
error = _("There is no page translation for the language: %(lang)s"
% {'lang': language})
raise ImproperlyConfigured(error)
return url
class Category(TranslatableModel):
translations = TranslatedFields(
name=models.CharField(_('Name'), max_length=255),
slug=models.SlugField(_('Slug'), max_length=255, blank=True,
help_text=_('Auto-generated. Clean it to have it re-created. '
'WARNING! Used in the URL. If changed, the URL will change. ')),
meta={'unique_together': [['slug', 'language_code']]}
)
ordering = models.IntegerField(_('Ordering'), default=0)
objects = CategoryManager()
class Meta:
verbose_name = _('Category')
verbose_name_plural = _('Categories')
ordering = ['ordering']
def __unicode__(self):
return self.lazy_translation_getter('name', str(self.pk))
def get_absolute_url(self, language=None):
language = language or get_current_language()
slug = get_slug_in_language(self, language)
with override(language):
if not slug: # category not translated in given language
try:
return get_page_url('latest-news', language)
except ImproperlyConfigured:
return '/'
kwargs = {'category_slug': slug}
return reverse('news-category', kwargs=kwargs)
class Tag(TranslatableModel):
translations = TranslatedFields(
name=models.CharField(_('Name'), max_length=255),
slug=models.SlugField(verbose_name=_('Slug'), max_length=100),
meta={'unique_together': [['slug', 'language_code']]}
)
objects = TagManager()
def __unicode__(self):
return self.name
@classmethod
def save_translations(cls, instance, **kwargs):
opts = cls._meta
if hasattr(instance, opts.translations_cache):
trans = getattr(instance, opts.translations_cache)
if not trans.master_id:
trans.master = instance
trans.slug = slugify(unidecode(trans.name))
trans.save()
class TaggedItemBase(TaggitItemBase):
tag = models.ForeignKey(Tag, related_name="%(app_label)s_%(class)s_items")
class Meta:
abstract = True
@classmethod
def tags_for(cls, model, instance=None):
if instance is not None:
return cls.tag_model().objects.filter(**{
'%s__content_object' % cls.tag_relname(): instance
})
return cls.tag_model().objects.filter(**{
'%s__content_object__isnull' % cls.tag_relname(): False
}).distinct()
class TaggedItem(TaggitGenericTaggedItemBase, TaggedItemBase):
class Meta:
verbose_name = _("Tagged Item")
verbose_name_plural = _("Tagged Items")
class News(TranslatableModel):
THUMBNAIL_SIZE = getattr(settings, 'ALDRYN_NEWS_ITEM_THUMBNAIL_SIZE', '100x100')
translations = TranslatedFields(
title=models.CharField(_('Title'), max_length=255),
slug=models.CharField(_('Slug'), max_length=255, blank=True,
help_text=_('Auto-generated. Clean it to have it re-created. '
'WARNING! Used in the URL. If changed, the URL will change. ')),
lead_in=HTMLField(_('Lead-in'),
help_text=_('Will be displayed in lists, and at the start of the detail page')),
meta={'unique_together': [['slug', 'language_code']]}
)
key_visual = FilerImageField(verbose_name=_('Key Visual'), blank=True, null=True)
content = PlaceholderField('blog_post_content')
publication_start = models.DateTimeField(_('Published Since'), default=datetime.datetime.now,
help_text=_('Used in the URL. If changed, the URL will change.'))
publication_end = models.DateTimeField(_('Published Until'), null=True, blank=True)
category = models.ForeignKey(Category, verbose_name=_('Category'), blank=True, null=True,
help_text=_('WARNING! Used in the URL. If changed, the URL will change.'))
objects = RelatedManager()
published = PublishedManager()
tags = TaggableManager(blank=True, through=TaggedItem)
class Meta:
verbose_name = _('News')
verbose_name_plural = _('News')
ordering = ['-publication_start']
def __unicode__(self):
return self.lazy_translation_getter('title', str(self.pk))
def get_absolute_url(self, language=None):
language = language or get_current_language()
slug = get_slug_in_language(self, language)
with override(language):
if not slug: # news not translated in given language
if self.category:
return self.category.get_absolute_url(language=language)
try:
return get_page_url('latest-news', language)
except ImproperlyConfigured:
return '/'
kwargs = {
'year': self.publication_start.year,
'month': self.publication_start.month,
'day': self.publication_start.day,
'slug': slug
}
category_slug = get_slug_in_language(self.category, language)
if category_slug:
kwargs['category_slug'] = category_slug
return reverse('news-detail', kwargs=kwargs)
class LatestNewsPlugin(CMSPlugin):
FULL = 'full'
SIMPLE = 'simple'
TYPES = (
(FULL, _("Full list")),
(SIMPLE, _("Simple list")),
)
latest_entries = models.PositiveSmallIntegerField(default=5, help_text=_('The number of latests entries to be displayed.'))
type_list = models.CharField(verbose_name=_("Type of list"), choices=TYPES, default=FULL, max_length=255)
tags = models.ManyToManyField('taggit.Tag', blank=True, help_text=_('Show only the news tagged with chosen tags.'))
def __unicode__(self):
return str(self.latest_entries)
def copy_relations(self, oldinstance):
self.tags = oldinstance.tags.all()
def get_news(self):
news = News.published.language(self.language).select_related('category')
tags = list(self.tags.all())
if tags:
tagged_news = News.objects.filter(tags__in=tags)
news = news.filter(id__in=tagged_news)
return news[:self.latest_entries]
class NewsLinksPlugin(CMSPlugin):
news = models.ManyToManyField(News, verbose_name=_("News"))
def copy_relations(self, oldinstance):
self.news = oldinstance.news.all()
def get_news(self):
return self.news.all()
| |
'''
Base netlink socket and marshal
===============================
All the netlink providers are derived from the socket
class, so they provide normal socket API, including
`getsockopt()`, `setsockopt()`, they can be used in
poll/select I/O loops etc.
asynchronous I/O
----------------
To run async reader thread, one should call
`NetlinkSocket.bind(async=True)`. In that case a
background thread will be launched. The thread will
automatically collect all the messages and store
into a userspace buffer.
.. note::
There is no need to turn on async I/O, if you
don't plan to receive broadcast messages.
ENOBUF and async I/O
--------------------
When Netlink messages arrive faster than a program
reads then from the socket, the messages overflow
the socket buffer and one gets ENOBUF on `recv()`::
... self.recv(bufsize)
error: [Errno 105] No buffer space available
One way to avoid ENOBUF, is to use async I/O. Then the
library not only reads and buffers all the messages, but
also re-prioritizes threads. Suppressing the parser
activity, the library increases the response delay, but
spares CPU to read and enqueue arriving messages as
fast, as it is possible.
With logging level DEBUG you can notice messages, that
the library started to calm down the parser thread::
DEBUG:root:Packet burst: the reader thread priority
is increased, beware of delays on netlink calls
Counters: delta=25 qsize=25 delay=0.1
This state requires no immediate action, but just some
more attention. When the delay between messages on the
parser thread exceeds 1 second, DEBUG messages become
WARNING ones::
WARNING:root:Packet burst: the reader thread priority
is increased, beware of delays on netlink calls
Counters: delta=2525 qsize=213536 delay=3
This state means, that almost all the CPU resources are
dedicated to the reader thread. It doesn't mean, that
the reader thread consumes 100% CPU -- it means, that the
CPU is reserved for the case of more intensive bursts. The
library will return to the normal state only when the
broadcast storm will be over, and then the CPU will be
100% loaded with the parser for some time, when it will
process all the messages queued so far.
when async I/O doesn't help
---------------------------
Sometimes, even turning async I/O doesn't fix ENOBUF.
Mostly it means, that in this particular case the Python
performance is not enough even to read and store the raw
data from the socket. There is no workaround for such
cases, except of using something *not* Python-based.
One can still play around with SO_RCVBUF socket option,
but it doesn't help much. So keep it in mind, and if you
expect massive broadcast Netlink storms, perform stress
testing prior to deploy a solution in the production.
classes
-------
'''
import os
import time
import struct
import logging
import traceback
import threading
from socket import AF_NETLINK
from socket import SOCK_DGRAM
from socket import MSG_PEEK
from socket import SOL_SOCKET
from socket import SO_RCVBUF
from socket import SO_SNDBUF
from socket import error as SocketError
from pyroute2.config import SocketBase
from pyroute2.common import AddrPool
from pyroute2.common import DEFAULT_RCVBUF
from pyroute2.netlink import nlmsg
from pyroute2.netlink import mtypes
from pyroute2.netlink import NetlinkError
from pyroute2.netlink import NetlinkDecodeError
from pyroute2.netlink import NetlinkHeaderDecodeError
from pyroute2.netlink import NLMSG_ERROR
from pyroute2.netlink import NLMSG_DONE
from pyroute2.netlink import NETLINK_GENERIC
from pyroute2.netlink import NLM_F_DUMP
from pyroute2.netlink import NLM_F_MULTI
from pyroute2.netlink import NLM_F_REQUEST
try:
from Queue import Queue
except ImportError:
from queue import Queue
class Marshal(object):
'''
Generic marshalling class
'''
msg_map = {}
debug = False
def __init__(self):
self.lock = threading.Lock()
# one marshal instance can be used to parse one
# message at once
self.msg_map = self.msg_map or {}
self.defragmentation = {}
def parse(self, data):
'''
Parse string data.
At this moment all transport, except of the native
Netlink is deprecated in this library, so we should
not support any defragmentation on that level
'''
offset = 0
result = []
while offset < len(data):
# pick type and length
(length, msg_type) = struct.unpack('IH', data[offset:offset+6])
error = None
if msg_type == NLMSG_ERROR:
code = abs(struct.unpack('i', data[offset+16:offset+20])[0])
if code > 0:
error = NetlinkError(code)
msg_class = self.msg_map.get(msg_type, nlmsg)
msg = msg_class(data[offset:offset+length], debug=self.debug)
try:
msg.decode()
msg['header']['error'] = error
# try to decode encapsulated error message
if error is not None:
enc_type = struct.unpack('H', msg.raw[24:26])[0]
enc_class = self.msg_map.get(enc_type, nlmsg)
enc = enc_class(msg.raw[20:])
enc.decode()
msg['header']['errmsg'] = enc
except NetlinkHeaderDecodeError as e:
# in the case of header decoding error,
# create an empty message
msg = nlmsg()
msg['header']['error'] = e
except NetlinkDecodeError as e:
msg['header']['error'] = e
mtype = msg['header'].get('type', None)
if mtype in (1, 2, 3, 4):
msg['event'] = mtypes.get(mtype, 'none')
self.fix_message(msg)
offset += msg.length
result.append(msg)
return result
def fix_message(self, msg):
pass
# 8<-----------------------------------------------------------
# Singleton, containing possible modifiers to the NetlinkSocket
# bind() call.
#
# Normally, you can open only one netlink connection for one
# process, but there is a hack. Current PID_MAX_LIMIT is 2^22,
# so we can use the rest to midify pid field.
#
# See also libnl library, lib/socket.c:generate_local_port()
sockets = AddrPool(minaddr=0x0,
maxaddr=0x3ff,
reverse=True)
# 8<-----------------------------------------------------------
class LockProxy(object):
def __init__(self, factory, key):
self.factory = factory
self.refcount = 0
self.key = key
self.internal = threading.Lock()
self.lock = factory.klass()
def acquire(self, *argv, **kwarg):
with self.internal:
self.refcount += 1
return self.lock.acquire()
def release(self):
with self.internal:
self.refcount -= 1
if (self.refcount == 0) and (self.key != 0):
try:
del self.factory.locks[self.key]
except KeyError:
pass
return self.lock.release()
def __enter__(self):
self.acquire()
def __exit__(self, exc_type, exc_value, traceback):
self.release()
class LockFactory(object):
def __init__(self, klass=threading.RLock):
self.klass = klass
self.locks = {0: LockProxy(self, 0)}
def __enter__(self):
self.locks[0].acquire()
def __exit__(self, exc_type, exc_value, traceback):
self.locks[0].release()
def __getitem__(self, key):
if key is None:
key = 0
if key not in self.locks:
self.locks[key] = LockProxy(self, key)
return self.locks[key]
def __delitem__(self, key):
del self.locks[key]
class NetlinkMixin(object):
'''
Generic netlink socket
'''
def __init__(self, family=NETLINK_GENERIC, port=None, pid=None):
super(NetlinkMixin, self).__init__(AF_NETLINK, SOCK_DGRAM, family)
global sockets
self.recv_plugin = self.recv_plugin_init
# 8<-----------------------------------------
# PID init is here only for compatibility,
# later it will be completely moved to bind()
self.addr_pool = AddrPool(minaddr=0xff)
self.epid = None
self.port = 0
self.fixed = True
self.backlog = {0: []}
self.monitor = False
self.callbacks = [] # [(predicate, callback, args), ...]
self.clean_cbs = {} # {msg_seq: [callback, ...], ...}
self.pthread = None
self.backlog_lock = threading.Lock()
self.read_lock = threading.Lock()
self.change_master = threading.Event()
self.lock = LockFactory()
self.buffer_queue = Queue()
self.qsize = 0
self.log = []
self.get_timeout = 3
self.get_timeout_exception = None
if pid is None:
self.pid = os.getpid() & 0x3fffff
self.port = port
self.fixed = self.port is not None
elif pid == 0:
self.pid = os.getpid()
else:
self.pid = pid
# 8<-----------------------------------------
self.groups = 0
self.marshal = Marshal()
# 8<-----------------------------------------
# Set default sockopts
self.setsockopt(SOL_SOCKET, SO_SNDBUF, 32768)
self.setsockopt(SOL_SOCKET, SO_RCVBUF, 1024 * 1024)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def release(self):
logging.warning("The `release()` call is deprecated")
logging.warning("Use `close()` instead")
self.close()
def register_callback(self, callback,
predicate=lambda x: True, args=None):
'''
Register a callback to run on a message arrival.
Callback is the function that will be called with the
message as the first argument. Predicate is the optional
callable object, that returns True or False. Upon True,
the callback will be called. Upon False it will not.
Args is a list or tuple of arguments.
Simplest example, assume ipr is the IPRoute() instance::
# create a simplest callback that will print messages
def cb(msg):
print(msg)
# register callback for any message:
ipr.register_callback(cb)
More complex example, with filtering::
# Set object's attribute after the message key
def cb(msg, obj):
obj.some_attr = msg["some key"]
# Register the callback only for the loopback device, index 1:
ipr.register_callback(cb,
lambda x: x.get('index', None) == 1,
(self, ))
Please note: you do **not** need to register the default 0 queue
to invoke callbacks on broadcast messages. Callbacks are
iterated **before** messages get enqueued.
'''
if args is None:
args = []
self.callbacks.append((predicate, callback, args))
def unregister_callback(self, callback):
'''
Remove the first reference to the function from the callback
register
'''
cb = tuple(self.callbacks)
for cr in cb:
if cr[1] == callback:
self.callbacks.pop(cb.index(cr))
return
def register_policy(self, policy, msg_class=None):
'''
Register netlink encoding/decoding policy. Can
be specified in two ways:
`nlsocket.register_policy(MSG_ID, msg_class)`
to register one particular rule, or
`nlsocket.register_policy({MSG_ID1: msg_class})`
to register several rules at once.
E.g.::
policy = {RTM_NEWLINK: ifinfmsg,
RTM_DELLINK: ifinfmsg,
RTM_NEWADDR: ifaddrmsg,
RTM_DELADDR: ifaddrmsg}
nlsocket.register_policy(policy)
One can call `register_policy()` as many times,
as one want to -- it will just extend the current
policy scheme, not replace it.
'''
if isinstance(policy, int) and msg_class is not None:
policy = {policy: msg_class}
assert isinstance(policy, dict)
for key in policy:
self.marshal.msg_map[key] = policy[key]
return self.marshal.msg_map
def unregister_policy(self, policy):
'''
Unregister policy. Policy can be:
* int -- then it will just remove one policy
* list or tuple of ints -- remove all given
* dict -- remove policies by keys from dict
In the last case the routine will ignore dict values,
it is implemented so just to make it compatible with
`get_policy_map()` return value.
'''
if isinstance(policy, int):
policy = [policy]
elif isinstance(policy, dict):
policy = list(policy)
assert isinstance(policy, (tuple, list, set))
for key in policy:
del self.marshal.msg_map[key]
return self.marshal.msg_map
def get_policy_map(self, policy=None):
'''
Return policy for a given message type or for all
message types. Policy parameter can be either int,
or a list of ints. Always return dictionary.
'''
if policy is None:
return self.marshal.msg_map
if isinstance(policy, int):
policy = [policy]
assert isinstance(policy, (list, tuple, set))
ret = {}
for key in policy:
ret[key] = self.marshal.msg_map[key]
return ret
def bind(self, groups=0, pid=None, async=False):
'''
Bind the socket to given multicast groups, using
given pid.
* If pid is None, use automatic port allocation
* If pid == 0, use process' pid
* If pid == <int>, use the value instead of pid
'''
if pid is not None:
self.port = 0
self.fixed = True
self.pid = pid or os.getpid()
self.groups = groups
# if we have pre-defined port, use it strictly
if self.fixed:
self.epid = self.pid + (self.port << 22)
super(NetlinkMixin, self).bind((self.epid, self.groups))
else:
# if we have no pre-defined port, scan all the
# range till the first available port
for i in range(1024):
try:
self.port = sockets.alloc()
self.epid = self.pid + (self.port << 22)
super(NetlinkMixin, self).bind((self.epid, self.groups))
# if we're here, bind() done successfully, just exit
break
except SocketError as e:
# pass occupied sockets, raise other exceptions
if e.errno != 98:
raise
else:
# raise "address in use" -- to be compatible
raise SocketError(98, 'Address already in use')
# all is OK till now, so start async recv, if we need
if async:
self._stop = False
self.recv_plugin = self.recv_plugin_queue
self.pthread = threading.Thread(target=self.async_recv)
self.pthread.setDaemon(True)
self.pthread.start()
def recv_plugin_init(self, *argv, **kwarg):
#
# One-shot method
#
# Substitutes itself with the current recv()
# pointer.
#
# It is required since child classes can
# initialize recv() in the init()
#
self.recv_plugin = self.recv
return self.recv(*argv, **kwarg)
def recv_plugin_queue(self, *argv, **kwarg):
data = self.buffer_queue.get()
if isinstance(data, Exception):
raise data
else:
return data
def async_recv(self):
while not self._stop:
try:
self.buffer_queue.put(self.recv(1024 * 1024))
except Exception as e:
self.buffer_queue.put(e)
def put(self, msg, msg_type,
msg_flags=NLM_F_REQUEST,
addr=(0, 0),
msg_seq=0,
msg_pid=None):
'''
Construct a message from a dictionary and send it to
the socket. Parameters:
* msg -- the message in the dictionary format
* msg_type -- the message type
* msg_flags -- the message flags to use in the request
* addr -- `sendto()` addr, default `(0, 0)`
* msg_seq -- sequence number to use
* msg_pid -- pid to use, if `None` -- use os.getpid()
Example::
s = IPRSocket()
s.bind()
s.put({'index': 1}, RTM_GETLINK)
s.get()
s.close()
Please notice, that the return value of `s.get()` can be
not the result of `s.put()`, but any broadcast message.
To fix that, use `msg_seq` -- the response must contain the
same `msg['header']['sequence_number']` value.
'''
if msg_seq != 0:
self.lock[msg_seq].acquire()
try:
if msg_seq not in self.backlog:
self.backlog[msg_seq] = []
if not isinstance(msg, nlmsg):
msg_class = self.marshal.msg_map[msg_type]
msg = msg_class(msg)
if msg_pid is None:
msg_pid = os.getpid()
msg['header']['type'] = msg_type
msg['header']['flags'] = msg_flags
msg['header']['sequence_number'] = msg_seq
msg['header']['pid'] = msg_pid
msg.encode()
if msg_seq not in self.clean_cbs:
self.clean_cbs[msg_seq] = []
self.clean_cbs[msg_seq].extend(msg.clean_cbs)
self.sendto(msg.buf.getvalue(), addr)
except:
raise
finally:
if msg_seq != 0:
self.lock[msg_seq].release()
def get(self, bufsize=DEFAULT_RCVBUF, msg_seq=0, terminate=None):
'''
Get parsed messages list. If `msg_seq` is given, return
only messages with that `msg['header']['sequence_number']`,
saving all other messages into `self.backlog`.
The routine is thread-safe.
The `bufsize` parameter can be:
* -1: bufsize will be calculated from the first 4 bytes of
the network data
* 0: bufsize will be calculated from SO_RCVBUF sockopt
* int >= 0: just a bufsize
'''
ctime = time.time()
with self.lock[msg_seq]:
if bufsize == -1:
# get bufsize from the network data
bufsize = struct.unpack("I", self.recv(4, MSG_PEEK))[0]
elif bufsize == 0:
# get bufsize from SO_RCVBUF
bufsize = self.getsockopt(SOL_SOCKET, SO_RCVBUF) // 2
ret = []
enough = False
while not enough:
# 8<-----------------------------------------------------------
#
# This stage changes the backlog, so use mutex to
# prevent side changes
self.backlog_lock.acquire()
##
# Stage 1. BEGIN
#
# 8<-----------------------------------------------------------
#
# Check backlog and return already collected
# messages.
#
if msg_seq == 0 and self.backlog[0]:
# Zero queue.
#
# Load the backlog, if there is valid
# content in it
ret.extend(self.backlog[0])
self.backlog[0] = []
# And just exit
self.backlog_lock.release()
break
elif self.backlog.get(msg_seq, None):
# Any other msg_seq.
#
# Collect messages up to the terminator.
# Terminator conditions:
# * NLMSG_ERROR != 0
# * NLMSG_DONE
# * terminate() function (if defined)
# * not NLM_F_MULTI
#
# Please note, that if terminator not occured,
# more `recv()` rounds CAN be required.
for msg in tuple(self.backlog[msg_seq]):
# Drop the message from the backlog, if any
self.backlog[msg_seq].remove(msg)
# If there is an error, raise exception
if msg['header'].get('error', None) is not None:
self.backlog[0].extend(self.backlog[msg_seq])
del self.backlog[msg_seq]
# The loop is done
self.backlog_lock.release()
raise msg['header']['error']
# If it is the terminator message, say "enough"
# and requeue all the rest into Zero queue
if (msg['header']['type'] == NLMSG_DONE) or \
(terminate is not None and terminate(msg)):
# The loop is done
enough = True
# If it is just a normal message, append it to
# the response
if not enough:
ret.append(msg)
# But finish the loop on single messages
if not msg['header']['flags'] & NLM_F_MULTI:
# but not multi -- so end the loop
enough = True
# Enough is enough, requeue the rest and delete
# our backlog
if enough:
self.backlog[0].extend(self.backlog[msg_seq])
del self.backlog[msg_seq]
break
# Next iteration
self.backlog_lock.release()
else:
# Stage 1. END
#
# 8<-------------------------------------------------------
#
# Stage 2. BEGIN
#
# 8<-------------------------------------------------------
#
# Receive the data from the socket and put the messages
# into the backlog
#
self.backlog_lock.release()
##
#
# Control the timeout. We should not be within the
# function more than TIMEOUT seconds. All the locks
# MUST be released here.
#
if time.time() - ctime > self.get_timeout:
if self.get_timeout_exception:
raise self.get_timeout_exception()
else:
return ret
#
if self.read_lock.acquire(False):
self.change_master.clear()
# If the socket is free to read from, occupy
# it and wait for the data
#
# This is a time consuming process, so all the
# locks, except the read lock must be released
data = self.recv_plugin(bufsize)
# Parse data
msgs = self.marshal.parse(data)
# Reset ctime -- timeout should be measured
# for every turn separately
ctime = time.time()
#
current = self.buffer_queue.qsize()
delta = current - self.qsize
if delta > 10:
delay = min(3, max(0.1, float(current) / 60000))
message = ("Packet burst: the reader thread "
"priority is increased, beware of "
"delays on netlink calls\n\tCounters: "
"delta=%s qsize=%s delay=%s "
% (delta, current, delay))
if delay < 1:
logging.debug(message)
else:
logging.warning(message)
time.sleep(delay)
self.qsize = current
# We've got the data, lock the backlog again
self.backlog_lock.acquire()
for msg in msgs:
seq = msg['header']['sequence_number']
if seq in self.clean_cbs:
for cb in self.clean_cbs[seq]:
try:
cb()
except:
logging.warning("Cleanup callback"
"fail: %s" % (cb))
logging.warning(traceback.format_exc())
del self.clean_cbs[seq]
if seq not in self.backlog:
if msg['header']['type'] == NLMSG_ERROR:
# Drop orphaned NLMSG_ERROR messages
continue
seq = 0
# 8<-----------------------------------------------
# Callbacks section
for cr in self.callbacks:
try:
if cr[0](msg):
cr[1](msg, *cr[2])
except:
logging.warning("Callback fail: %s" % (cr))
logging.warning(traceback.format_exc())
# 8<-----------------------------------------------
self.backlog[seq].append(msg)
# Monitor mode:
if self.monitor and seq != 0:
self.backlog[0].append(msg)
# We finished with the backlog, so release the lock
self.backlog_lock.release()
# Now wake up other threads
self.change_master.set()
# Finally, release the read lock: all data processed
self.read_lock.release()
else:
# If the socket is occupied and there is still no
# data for us, wait for the next master change or
# for a timeout
self.change_master.wait(1)
# 8<-------------------------------------------------------
#
# Stage 2. END
#
# 8<-------------------------------------------------------
return ret
def nlm_request(self, msg, msg_type,
msg_flags=NLM_F_REQUEST | NLM_F_DUMP,
terminate=None):
msg_seq = self.addr_pool.alloc()
with self.lock[msg_seq]:
try:
self.put(msg, msg_type, msg_flags, msg_seq=msg_seq)
ret = self.get(msg_seq=msg_seq, terminate=terminate)
return ret
except:
raise
finally:
# Ban this msg_seq for 0xff rounds
#
# It's a long story. Modern kernels for RTM_SET.* operations
# always return NLMSG_ERROR(0) == success, even not setting
# NLM_F_MULTY flag on other response messages and thus w/o
# any NLMSG_DONE. So, how to detect the response end? One
# can not rely on NLMSG_ERROR on old kernels, but we have to
# support them too. Ty, we just ban msg_seq for several rounds,
# and NLMSG_ERROR, being received, will become orphaned and
# just dropped.
#
# Hack, but true.
self.addr_pool.free(msg_seq, ban=0xff)
def close(self):
'''
Correctly close the socket and free all resources.
'''
global sockets
if self.pthread is not None:
self._stop = True
if self.epid is not None:
assert self.port is not None
if not self.fixed:
sockets.free(self.port)
self.epid = None
super(NetlinkMixin, self).close()
class NetlinkSocket(NetlinkMixin, SocketBase):
pass
| |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
from builtins import object, str
from textwrap import dedent
from pants.engine.build_files import create_graph_rules
from pants.engine.fs import create_fs_rules
from pants.engine.mapper import AddressMapper
from pants.engine.rules import RootRule, RuleIndex, SingletonRule, TaskRule
from pants.engine.selectors import Get, Select
from pants.util.objects import Exactly
from pants_test.engine.examples.parsers import JsonParser
from pants_test.engine.examples.planners import Goal
from pants_test.engine.util import TargetTable, assert_equal_with_printing, create_scheduler
class AGoal(Goal):
@classmethod
def products(cls):
return [A]
class A(object):
def __repr__(self):
return 'A()'
class B(object):
def __repr__(self):
return 'B()'
class C(object):
def __repr__(self):
return 'C()'
class D(object):
def __repr__(self):
return 'D()'
def noop(*args):
pass
class SubA(A):
def __repr__(self):
return 'SubA()'
_suba_root_rules = [RootRule(SubA)]
class RuleIndexTest(unittest.TestCase):
def test_creation_fails_with_bad_declaration_type(self):
with self.assertRaises(TypeError) as cm:
RuleIndex.create([A()])
self.assertEquals("Unexpected rule type: <class 'pants_test.engine.test_rules.A'>."
" Rules either extend Rule, or are static functions decorated with @rule.",
str(cm.exception))
class RulesetValidatorTest(unittest.TestCase):
def test_ruleset_with_missing_product_type(self):
rules = _suba_root_rules + [TaskRule(A, [Select(B)], noop)]
with self.assertRaises(Exception) as cm:
create_scheduler(rules)
self.assert_equal_with_printing(dedent("""
Rules with errors: 1
(A, (Select(B),), noop):
no rule was available to compute B for subject type SubA
""").strip(),
str(cm.exception))
def test_ruleset_with_rule_with_two_missing_selects(self):
rules = _suba_root_rules + [TaskRule(A, [Select(B), Select(C)], noop)]
with self.assertRaises(Exception) as cm:
create_scheduler(rules)
self.assert_equal_with_printing(dedent("""
Rules with errors: 1
(A, (Select(B), Select(C)), noop):
no rule was available to compute B for subject type SubA
no rule was available to compute C for subject type SubA
""").strip(),
str(cm.exception))
def test_ruleset_with_selector_only_provided_as_root_subject(self):
rules = [RootRule(B), TaskRule(A, [Select(B)], noop)]
create_scheduler(rules)
def test_ruleset_with_superclass_of_selected_type_produced_fails(self):
rules = [
RootRule(C),
TaskRule(A, [Select(B)], noop),
TaskRule(B, [Select(SubA)], noop)
]
with self.assertRaises(Exception) as cm:
create_scheduler(rules)
self.assert_equal_with_printing(dedent("""
Rules with errors: 2
(A, (Select(B),), noop):
no rule was available to compute B for subject type C
(B, (Select(SubA),), noop):
no rule was available to compute SubA for subject type C
""").strip(),
str(cm.exception))
def test_ruleset_with_explicit_type_constraint(self):
rules = _suba_root_rules + [
TaskRule(Exactly(A), [Select(B)], noop),
TaskRule(B, [Select(A)], noop)
]
create_scheduler(rules)
def test_ruleset_with_failure_due_to_incompatible_subject_for_singleton(self):
rules = [
RootRule(A),
TaskRule(D, [Select(C)], noop),
SingletonRule(B, B()),
]
with self.assertRaises(Exception) as cm:
create_scheduler(rules)
# This error message could note near matches like the singleton.
self.assert_equal_with_printing(dedent("""
Rules with errors: 1
(D, (Select(C),), noop):
no rule was available to compute C for subject type A
""").strip(),
str(cm.exception))
def test_not_fulfillable_duplicated_dependency(self):
# If a rule depends on another rule+subject in two ways, and one of them is unfulfillable
# Only the unfulfillable one should be in the errors.
rules = _suba_root_rules + [
TaskRule(B, [Select(D)], noop),
TaskRule(D, [Select(A), Select(SubA)], noop, input_gets=[Get(A, C)]),
TaskRule(A, [Select(SubA)], noop)
]
with self.assertRaises(Exception) as cm:
create_scheduler(rules)
self.assert_equal_with_printing(dedent("""
Rules with errors: 2
(B, (Select(D),), noop):
no rule was available to compute D for subject type SubA
(D, (Select(A), Select(SubA)), [Get(A, C)], noop):
no rule was available to compute A for subject type C
""").strip(),
str(cm.exception))
assert_equal_with_printing = assert_equal_with_printing
class RuleGraphMakerTest(unittest.TestCase):
# TODO something with variants
# TODO HasProducts?
def test_smallest_full_test(self):
rules = _suba_root_rules + [
RootRule(SubA),
TaskRule(Exactly(A), [Select(SubA)], noop)
]
fullgraph = self.create_full_graph(rules)
self.assert_equal_with_printing(dedent("""
digraph {
// root subject types: SubA
// root entries
"Select(A) for SubA" [color=blue]
"Select(A) for SubA" -> {"(A, (Select(SubA),), noop) of SubA"}
// internal entries
"(A, (Select(SubA),), noop) of SubA" -> {"SubjectIsProduct(SubA)"}
}""").strip(), fullgraph)
def test_full_graph_for_planner_example(self):
symbol_table = TargetTable()
address_mapper = AddressMapper(JsonParser(symbol_table), '*.BUILD.json')
rules = create_graph_rules(address_mapper, symbol_table) + create_fs_rules()
fullgraph_str = self.create_full_graph(rules)
print('---diagnostic------')
print(fullgraph_str)
print('/---diagnostic------')
in_root_rules = False
in_all_rules = False
all_rules = []
root_rule_lines = []
for line in fullgraph_str.splitlines():
if line.startswith(' // root subject types:'):
pass
elif line.startswith(' // root entries'):
in_root_rules = True
elif line.startswith(' // internal entries'):
in_all_rules = True
elif in_all_rules:
all_rules.append(line)
elif in_root_rules:
root_rule_lines.append(line)
else:
pass
self.assertTrue(6 < len(all_rules))
self.assertTrue(12 < len(root_rule_lines)) # 2 lines per entry
def test_smallest_full_test_multiple_root_subject_types(self):
rules = [
RootRule(SubA),
RootRule(A),
TaskRule(A, [Select(SubA)], noop),
TaskRule(B, [Select(A)], noop)
]
fullgraph = self.create_full_graph(rules)
self.assert_equal_with_printing(dedent("""
digraph {
// root subject types: A, SubA
// root entries
"Select(A) for A" [color=blue]
"Select(A) for A" -> {"SubjectIsProduct(A)"}
"Select(A) for SubA" [color=blue]
"Select(A) for SubA" -> {"(A, (Select(SubA),), noop) of SubA"}
"Select(B) for A" [color=blue]
"Select(B) for A" -> {"(B, (Select(A),), noop) of A"}
"Select(B) for SubA" [color=blue]
"Select(B) for SubA" -> {"(B, (Select(A),), noop) of SubA"}
// internal entries
"(A, (Select(SubA),), noop) of SubA" -> {"SubjectIsProduct(SubA)"}
"(B, (Select(A),), noop) of A" -> {"SubjectIsProduct(A)"}
"(B, (Select(A),), noop) of SubA" -> {"(A, (Select(SubA),), noop) of SubA"}
}""").strip(),
fullgraph)
def test_single_rule_depending_on_subject_selection(self):
rules = [
TaskRule(Exactly(A), [Select(SubA)], noop)
]
subgraph = self.create_subgraph(A, rules, SubA())
self.assert_equal_with_printing(dedent("""
digraph {
// root subject types: SubA
// root entries
"Select(A) for SubA" [color=blue]
"Select(A) for SubA" -> {"(A, (Select(SubA),), noop) of SubA"}
// internal entries
"(A, (Select(SubA),), noop) of SubA" -> {"SubjectIsProduct(SubA)"}
}""").strip(),
subgraph)
def test_multiple_selects(self):
rules = [
TaskRule(Exactly(A), [Select(SubA), Select(B)], noop),
TaskRule(B, [], noop)
]
subgraph = self.create_subgraph(A, rules, SubA())
self.assert_equal_with_printing(dedent("""
digraph {
// root subject types: SubA
// root entries
"Select(A) for SubA" [color=blue]
"Select(A) for SubA" -> {"(A, (Select(SubA), Select(B)), noop) of SubA"}
// internal entries
"(A, (Select(SubA), Select(B)), noop) of SubA" -> {"SubjectIsProduct(SubA)" "(B, (,), noop) of SubA"}
"(B, (,), noop) of SubA" -> {}
}""").strip(),
subgraph)
def test_one_level_of_recursion(self):
rules = [
TaskRule(Exactly(A), [Select(B)], noop),
TaskRule(B, [Select(SubA)], noop)
]
subgraph = self.create_subgraph(A, rules, SubA())
self.assert_equal_with_printing(dedent("""
digraph {
// root subject types: SubA
// root entries
"Select(A) for SubA" [color=blue]
"Select(A) for SubA" -> {"(A, (Select(B),), noop) of SubA"}
// internal entries
"(A, (Select(B),), noop) of SubA" -> {"(B, (Select(SubA),), noop) of SubA"}
"(B, (Select(SubA),), noop) of SubA" -> {"SubjectIsProduct(SubA)"}
}""").strip(),
subgraph)
def test_noop_removal_in_subgraph(self):
rules = [
TaskRule(Exactly(A), [Select(C)], noop),
TaskRule(Exactly(A), [], noop),
SingletonRule(B, B()),
]
subgraph = self.create_subgraph(A, rules, SubA(), validate=False)
self.assert_equal_with_printing(dedent("""
digraph {
// root subject types: SubA
// root entries
"Select(A) for SubA" [color=blue]
"Select(A) for SubA" -> {"(A, (,), noop) of SubA"}
// internal entries
"(A, (,), noop) of SubA" -> {}
}""").strip(),
subgraph)
def test_noop_removal_full_single_subject_type(self):
rules = _suba_root_rules + [
TaskRule(Exactly(A), [Select(C)], noop),
TaskRule(Exactly(A), [], noop),
]
fullgraph = self.create_full_graph(rules, validate=False)
self.assert_equal_with_printing(dedent("""
digraph {
// root subject types: SubA
// root entries
"Select(A) for SubA" [color=blue]
"Select(A) for SubA" -> {"(A, (,), noop) of SubA"}
// internal entries
"(A, (,), noop) of SubA" -> {}
}""").strip(),
fullgraph)
def test_root_tuple_removed_when_no_matches(self):
rules = [
RootRule(C),
RootRule(D),
TaskRule(Exactly(A), [Select(C)], noop),
TaskRule(Exactly(B), [Select(D), Select(A)], noop),
]
fullgraph = self.create_full_graph(rules, validate=False)
self.assert_equal_with_printing(dedent("""
digraph {
// root subject types: C, D
// root entries
"Select(A) for C" [color=blue]
"Select(A) for C" -> {"(A, (Select(C),), noop) of C"}
// internal entries
"(A, (Select(C),), noop) of C" -> {"SubjectIsProduct(C)"}
}""").strip(),
fullgraph)
def test_noop_removal_transitive(self):
# If a noop-able rule has rules that depend on it,
# they should be removed from the graph.
rules = [
TaskRule(Exactly(B), [Select(C)], noop),
TaskRule(Exactly(A), [Select(B)], noop),
TaskRule(Exactly(A), [], noop),
]
subgraph = self.create_subgraph(A, rules, SubA(), validate=False)
self.assert_equal_with_printing(dedent("""
digraph {
// root subject types: SubA
// root entries
"Select(A) for SubA" [color=blue]
"Select(A) for SubA" -> {"(A, (,), noop) of SubA"}
// internal entries
"(A, (,), noop) of SubA" -> {}
}""").strip(),
subgraph)
def test_get_with_matching_singleton(self):
rules = [
TaskRule(Exactly(A), [Select(SubA)], noop, input_gets=[Get(B, C)]),
SingletonRule(B, B()),
]
subgraph = self.create_subgraph(A, rules, SubA())
#TODO perhaps singletons should be marked in the dot format somehow
self.assert_equal_with_printing(dedent("""
digraph {
// root subject types: SubA
// root entries
"Select(A) for SubA" [color=blue]
"Select(A) for SubA" -> {"(A, (Select(SubA),), [Get(B, C)], noop) of SubA"}
// internal entries
"(A, (Select(SubA),), [Get(B, C)], noop) of SubA" -> {"SubjectIsProduct(SubA)" "Singleton(B(), B)"}
}""").strip(),
subgraph)
def test_depends_on_multiple_one_noop(self):
rules = [
TaskRule(B, [Select(A)], noop),
TaskRule(A, [Select(C)], noop),
TaskRule(A, [Select(SubA)], noop)
]
subgraph = self.create_subgraph(B, rules, SubA(), validate=False)
self.assert_equal_with_printing(dedent("""
digraph {
// root subject types: SubA
// root entries
"Select(B) for SubA" [color=blue]
"Select(B) for SubA" -> {"(B, (Select(A),), noop) of SubA"}
// internal entries
"(A, (Select(SubA),), noop) of SubA" -> {"SubjectIsProduct(SubA)"}
"(B, (Select(A),), noop) of SubA" -> {"(A, (Select(SubA),), noop) of SubA"}
}""").strip(),
subgraph)
def test_multiple_depend_on_same_rule(self):
rules = _suba_root_rules + [
TaskRule(B, [Select(A)], noop),
TaskRule(C, [Select(A)], noop),
TaskRule(A, [Select(SubA)], noop)
]
subgraph = self.create_full_graph(rules)
self.assert_equal_with_printing(dedent("""
digraph {
// root subject types: SubA
// root entries
"Select(A) for SubA" [color=blue]
"Select(A) for SubA" -> {"(A, (Select(SubA),), noop) of SubA"}
"Select(B) for SubA" [color=blue]
"Select(B) for SubA" -> {"(B, (Select(A),), noop) of SubA"}
"Select(C) for SubA" [color=blue]
"Select(C) for SubA" -> {"(C, (Select(A),), noop) of SubA"}
// internal entries
"(A, (Select(SubA),), noop) of SubA" -> {"SubjectIsProduct(SubA)"}
"(B, (Select(A),), noop) of SubA" -> {"(A, (Select(SubA),), noop) of SubA"}
"(C, (Select(A),), noop) of SubA" -> {"(A, (Select(SubA),), noop) of SubA"}
}""").strip(),
subgraph)
def test_get_simple(self):
rules = [
TaskRule(Exactly(A), [], noop, [Get(B, D)]),
TaskRule(B, [Select(D)], noop),
]
subgraph = self.create_subgraph(A, rules, SubA())
self.assert_equal_with_printing(dedent("""
digraph {
// root subject types: SubA
// root entries
"Select(A) for SubA" [color=blue]
"Select(A) for SubA" -> {"(A, (,), [Get(B, D)], noop) of SubA"}
// internal entries
"(A, (,), [Get(B, D)], noop) of SubA" -> {"(B, (Select(D),), noop) of D"}
"(B, (Select(D),), noop) of D" -> {"SubjectIsProduct(D)"}
}""").strip(),
subgraph)
def create_full_graph(self, rules, validate=True):
scheduler = create_scheduler(rules, validate=validate)
return "\n".join(scheduler.rule_graph_visualization())
def create_subgraph(self, requested_product, rules, subject, validate=True):
scheduler = create_scheduler(rules + _suba_root_rules, validate=validate)
return "\n".join(scheduler.rule_subgraph_visualization(type(subject), requested_product))
assert_equal_with_printing = assert_equal_with_printing
| |
#!/usr/bin/python
# (c) 2018 Jim Hawkins. MIT licensed, see https://opensource.org/licenses/MIT
# Part of Blender Driver, see https://github.com/sjjhsjjh/blender-driver
"""Utility layer on the main Blender Python programming interface. This isn't
the utilities for the Blender Game Engine.
This module can only be used from within Blender."""
# Exit if run other than as a module.
if __name__ == '__main__':
print(__doc__)
raise SystemExit(1)
# Standard library imports, in alphabetic order.
#
# Module for JavaScript Object Notation (JSON) strings.
# https://docs.python.org/3.5/library/json.html
import json
#
# Module for levelled logging messages.
# Tutorial is here: https://docs.python.org/3.5/howto/logging.html
# Reference is here: https://docs.python.org/3.5/library/logging.html
from logging import DEBUG, INFO, WARNING, ERROR, log
#
# Blender library imports, in alphabetic order.
#
# These modules can only be imported if running from within Blender.
try:
#
# Main Blender Python interface.
import bpy
#
# Vectors.
from mathutils import Vector
except ImportError as error:
print(__doc__)
print(error)
def load_driver(driverClass, arguments):
"""Load the owner data subclass and initialise everything.
This subroutine has all the hard-coded names.
This is the end of the chain from:
- The blenderdriver.py script, which launches Blender and specifies the \
.blend file to open and tells Blender to run the launch_blender_driver.py \
script.
- The launch_blender_driver.py script, which works out the name of the \
driver application module, extends the Python module path, and then calls \
this function.
The driver application class is instantiated in two different stages: Data and \
Game. The Data stage is here. The Game class is when the Blender Game Engine \
starts.
"""
log(DEBUG, 'begin {} "{}".', arguments, __package__)
#
# Add and configure the driver gateway object, on which everything else will
# depend. It is a Blender Empty.
driverGateway = set_up_object(arguments.gateway)
#
# Create the settings collection. The settings include:
# - The module path and name of the application class.
# - The arguments from the command line.
# - The Blender version string, so that it can be retrieved in the BGE
# stage.
settings = {'module': driverClass.__module__,
'class': driverClass.__name__,
'arguments': None if arguments is None else vars(arguments),
'blender': bpy.app.version_string[:] }
#
# Instantiate the application class.
driver = driverClass(settings)
#
# Call the application's constructor for the Blender data stage. Pass it a
# reference to the scene and the driver gateway. This is a data scene, not a
# game scene. For now, assume there is only a single scene.
driver.data_constructor(bpy.data.scenes[0], driverGateway)
#
# Call the override-able initialisation.
driver.data_initialise()
#
# Attach the controllers for BGE to the gateway object.
# Default is that the controllers module is in the same package as this
# file. This can be overriden on the launch script command line, in case a
# diagnostic controllers package is to be run instead.
controllersPackage = __package__
if arguments.controllersPackage is not None:
controllersPackage = arguments.controllersPackage
controllers = get_controllers(
driver, controllersPackage, arguments.controllersModule,
('initialise', 'tick', 'keyboard'))
log(DEBUG, 'controllers {}.', vars(controllers))
configure_gateway(driverGateway, controllers, driver.tickInterval)
#
# Put a collection of configuration settings into one or more game
# properties. The collection gets read from there by the
# blender_driver.controllers initialise() subroutine when the Blender game
# engine is started.
set_game_property(driverGateway, 'settingsJSON', json.dumps(settings) )
#
# Start the Blender Game Engine, if that was specified.
# Could also have an option to export as a blender game here.
if arguments.start:
log(DEBUG, 'starting BGE.')
bpy.ops.view3d.game_start()
log(DEBUG, 'end.')
def set_up_object(name, params={}):
"""Set up an object in the data layer. Returns a reference to the object."""
object_ = None
objectIndex = bpy.data.objects.find(name)
if objectIndex >= 0:
object_ = bpy.data.objects[objectIndex]
new_ = (object_ is None)
#
# Create a new object with the specified Blender mesh, if necessary.
subtype = params.get('subtype')
text = params.get('text')
if new_:
if text is not None:
curve = bpy.data.curves.new(name, 'FONT')
curve.align_x = 'CENTER'
curve.align_y = 'CENTER'
curve.body = text
object_ = bpy.data.objects.new(name, curve)
elif subtype is None or subtype == 'Empty':
object_ = bpy.data.objects.new(name, None)
else:
object_ = bpy.data.objects.new(name, bpy.data.meshes[subtype])
#
# Set its physics type and related attributes.
physicsType = params.get('physicsType')
if physicsType is None and new_:
if subtype is None:
physicsType = 'NO_COLLISION'
else:
physicsType = 'RIGID_BODY'
if physicsType is not None:
object_.game.physics_type = physicsType
if physicsType != 'NO_COLLISION':
object_.game.use_collision_bounds = True
#
# Position the object, if necessary.
location = params.get('location')
if location is not None:
object_.location = Vector(location)
#
# Scale the object, if necessary.
scale = params.get('scale')
if scale is not None:
object_.scale = Vector(scale)
#
# Add the object to the current scene.
if new_:
bpy.context.scene.objects.link(object_)
#
# Set its Blender ghost, if specified.
ghost = params.get('ghost')
if ghost is not None:
object_.game.use_ghost = ghost
#
# Add the object to the required layers.
#
# The gateway object, which has subtype None and text None, goes on every
# layer. Template objects go on layer one only. This means that:
#
# - Template objects aren't visible by default.
# - Template objects can be addObject'd later, by bge.
# - The module that contains the controllers of the gateway object always
# gets imported, whatever layer happens to be active when BGE gets
# started.
layer = 1
if subtype is None and text is None:
layer = 0
#
# It seems that Blender doesn't allow an object to be on no layers at any
# time. This makes the following line necessary, in addition to the for
# loop.
object_.layers[layer] = True
for index in range(len(object_.layers)):
if index != layer:
object_.layers[index] = (layer == 0)
#
# Refresh the current scene.
bpy.context.scene.update()
#
# Return a reference to the object.
return object_
def get_controllers(driver, packageName, moduleName, controllers):
"""Get the names of the specified controllers that exist in the specified
module and package. Names of controllers are returned in a namespace type of
object, in a package.module.controller format. The application can remove
any, for diagnostic purposes."""
#
# Declare an empty class to use as a namespace.
# https://docs.python.org/3.5/tutorial/classes.html#odds-and-ends
class Controllers:
pass
return_ = Controllers()
#
# Start by adding all of them.
for controller in controllers:
setattr(return_, controller, ".".join(
(packageName, moduleName, controller)))
#
# Give the application an opportunity to remove any, for diagnostic
# purposes.
driver.diagnostic_remove_controllers(return_)
return return_
def configure_gateway(driverGateway, controllers, tickInterval):
"""Set various configurations that make the driver gateway work or are
convenient."""
#
bpy.context.scene.render.engine = 'BLENDER_GAME'
bpy.ops.wm.addon_enable(module="game_engine_save_as_runtime")
#
# Controller and sensor for initialisation.
if controllers.initialise is not None:
sensor = add_sensor(driverGateway, controllers.initialise)
#
# Controller and sensor for every tick.
if controllers.tick is not None:
sensor = add_sensor(driverGateway, controllers.tick)
sensor.use_pulse_true_level = True
#
# Set the tick frequency using whatever API the current version of
# Blender has.
if hasattr(sensor, 'frequency'):
sensor.frequency = tickInterval
else:
sensor.tick_skip = tickInterval
#
# Controller and sensor for the keyboard. This allows, for example, a back
# door to be added to terminate the engine.
if controllers.keyboard is not None:
sensor = add_sensor(driverGateway, controllers.keyboard, 'KEYBOARD')
sensor.use_all_keys = True
def add_sensor(driver, subroutine, sensorType='ALWAYS'):
driver = select_only(driver)
bpy.ops.logic.controller_add(type='PYTHON')
#
# Only way to access the controller just added is to get the last one now.
controller = driver.game.controllers[-1]
controller.mode = 'MODULE'
controller.module = subroutine
controller.name = subroutine
bpy.ops.logic.sensor_add(type=sensorType)
#
# Only way to access the sensor just added is to get the last one now.
sensor = driver.game.sensors[-1]
sensor.name = subroutine
sensor.use_tap = True
sensor.link(controller)
return sensor
def select_only(target):
"""Set the Blender user interface selection to a specified single object, or
to nothing. If a single object is selected then it is also made active. Some
parts of the programming interface also require that an object is
selected."""
bpy.ops.object.select_all(action='DESELECT')
if target is not None:
if isinstance(target, str):
target = bpy.data.objects[target]
target.select = True
bpy.context.scene.objects.active = target
return target
def set_up_objects(objectsDict):
return_ = []
if objectsDict is None:
return return_
for name in objectsDict.keys():
return_.append(set_up_object(name, objectsDict[name]))
return return_
def set_game_property(object_, key, value):
"""Set a game property in the data context, i.e. before the game engine has
started."""
object_ = select_only(object_)
#
# Attempt to add the value to a single property. This might not work.
bpy.ops.object.game_property_new(type='STRING', name=key)
#
# Get a reference to the new game property.
gameProperty = object_.game.properties[-1]
#
# Set the value, then check that it worked.
gameProperty.value = value
if gameProperty.value == value:
return object_
#
# If this code is reached, then it didn't work.
#
# Confirm that it didn't work because the value is too long.
if not value.startswith(gameProperty.value):
# The set didn't work, and it isn't because the value is too long.
# Fail now.
raise AssertionError(''.join((
'Game property value set failed. Expected "', value,
'". Actual "', gameProperty.value, '"' )))
#
# The set didn't work because the value is too long. Split the value
# across an "array" of game properties. Actually, a number of game
# properties with a single root name and numeric suffixes.
#
# Find out the maximum length of a game property.
max = len(gameProperty.value)
#
# Delete the property that failed to take the whole value.
bpy.ops.object.game_property_remove(-1)
#
# Break the value into chunks and set each into a game property with a
# key that has a suffix for its chunk number.
chunks = int(len(value) / max) + 1
index = 0
for chunk in range(chunks):
chunkValue = value[ index + max*chunk : index + max*(chunk+1) ]
bpy.ops.object.game_property_new(type='STRING', name=key + str(chunk))
chunkProperty = object_.game.properties[-1]
chunkProperty.value = chunkValue
return object_
def get_game_property(object_, key):
"""Get a property value from a game object in the game context, i.e. when
the game engine is running."""
properties = object_.getPropertyNames()
if key in properties:
# Property name on its own found. It contains the whole value.
return object_[key]
if ''.join((key, '0')) in properties:
# Property name found with 0 appended. The value is split across a
# number of properties. Concatenate them to retrieve the value.
value = ''
index = 0
while True:
chunkName = ''.join((key, str(index)))
if chunkName not in properties:
break
value = ''.join((value, object_[chunkName]))
index += 1
return value
raise AttributeError(''.join(('No game property for "', key, '"')))
def delete_except(keepers):
# Following lines are a bit naughty. They add some meshes using the ops API.
# This is only done in order to add the items to the project's meshes. The
# next thing that happens is everything gets deleted, including the newly
# added objects. The meshes are not deleted when the objects are deleted
# though.
# If we don't do this, then objects based on these meshes cannot be created
# later.
bpy.ops.mesh.primitive_uv_sphere_add()
bpy.ops.mesh.primitive_circle_add()
bpy.ops.mesh.primitive_torus_add()
bpy.ops.mesh.primitive_cone_add()
#
# Delete everything except the keepers.
#
# Select all layers.
for layer_index in range(len(bpy.data.scenes[0].layers)):
bpy.data.scenes[0].layers[layer_index] = True
#
# Select all objects, on all layers.
bpy.ops.object.select_all(action='SELECT')
#
# Unselect the keepers.
if keepers is not None:
for keeper in keepers:
if isinstance(keeper, str):
if keeper in bpy.data.objects:
bpy.data.objects[keeper].select = False
else:
raise AttributeError(''.join((
'bpyutils delete_except "', keeper, '" not found.')))
else:
keeper.select = False
#
# And delete.
bpy.ops.object.delete()
#
# Select only the first layer.
for layer_index in range(len(bpy.data.scenes[0].layers)):
if layer_index <= 0:
bpy.data.scenes[0].layers[layer_index] = True
else:
bpy.data.scenes[0].layers[layer_index] = False
#
# Return None if there were no keepers.
if keepers is None or len(keepers) < 1:
return None
#
# Otherwise, select and return the first keeper.
return select_only(keepers[0])
def set_active_layer(layer):
# It'd nice to set the active layer here. There doesn't seem to be any way
# to do that in Python. Second best is to terminate if it happens to have
# the wrong value.
#
# for index in range(len(bpy.data.scenes[0].layers)):
# bpy.data.scenes[0].layers[index] = (index == 0)
# print( index, bpy.data.scenes[0].layers[index] )
# bpy.data.scenes[0].layers[0] = True
# print( "Active layer:", bpy.data.scenes[0].active_layer )
# bpy.context.scene.update()
activeLayer = bpy.data.scenes[0].active_layer
if activeLayer != layer:
raise RuntimeError("".join((
"Active layer wrong. You have to set it manually, sorry.",
" Required:", str(layer), ". Actual:", str(activeLayer), ".")))
| |
import itertools
import json
import os
import tempfile
from numpy import average, std
from numpy.random import random_integers, random_sample
from tempfile import NamedTemporaryFile
from penchy.compat import unittest, write
from penchy.jobs.filters import *
from penchy.jobs.typecheck import Types
from penchy.util import tempdir
from penchy.tests.util import get_json_data, make_system_composition
class DacapoHarnessTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
d = get_json_data('DacapoHarnessFilter')
cls.multiple_iterations = d['multiple_iterations']
cls.single_iterations = d['single_iterations']
cls.failed_single = d['failed_single']
cls.wrong_input = d['wrong_input']
def setUp(self):
super(DacapoHarnessTest, self).setUp()
self.d = DacapoHarness()
self.mi = write_to_tempfiles(DacapoHarnessTest.multiple_iterations)
self.si = write_to_tempfiles(DacapoHarnessTest.single_iterations)
self.failed = write_to_tempfiles(DacapoHarnessTest.failed_single)
self.wrong_input = write_to_tempfiles(DacapoHarnessTest.wrong_input)
def tearDown(self):
for f in itertools.chain(self.mi, self.si, self.failed,
self.wrong_input):
f.close()
def test_multi_iteration_path(self):
invocations = len(self.mi)
stderr = [i.name for i in self.mi]
self.d.run(stderr=stderr)
self._assert_correct_out(invocations)
def test_single_iteration_path(self):
invocations = len(self.si)
stderr = [i.name for i in self.si]
self.d.run(stderr=stderr)
self._assert_correct_out(invocations)
def test_failed(self):
invocations = len(self.failed)
stderr = [i.name for i in self.failed]
self.d.run(stderr=stderr)
self.assertListEqual(self.d.out['failures'], [1] * invocations)
def test_wrong_input(self):
stderr = [i.name for i in self.wrong_input]
for e in stderr:
with self.assertRaises(WrongInputError):
self.d.run(stderr=[e])
def _assert_correct_out(self, invocations):
self.assertSetEqual(set(self.d.out), self.d._output_names)
self.assertEqual(len(self.d.out['failures']), invocations)
self.assertEqual(len(self.d.out['times']), invocations)
self.assertEqual(len(self.d.out['valid']), invocations)
class HProfCpuTimesTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
h = get_json_data('HProfCpuTimesFilter')
cls.single_iterations = h['single_iterations']
cls.wrong_input = h['wrong_input']
def setUp(self):
super(HProfCpuTimesTest, self).setUp()
self.h = HProfCpuTimes()
self.si = write_to_tempfiles(HProfCpuTimesTest.single_iterations)
self.wrong_input = write_to_tempfiles(HProfCpuTimesTest.wrong_input)
def tearDown(self):
for f in itertools.chain(self.si, self.wrong_input):
f.close()
def test_single_iteration_path(self):
invocations = len(self.si)
hprof_file = [i.name for i in self.si]
self.h.run(hprof=hprof_file)
self._assert_correct_out(invocations)
def test_wrong_input(self):
hprof_files = [i.name for i in self.wrong_input]
for hprof_file in hprof_files:
with self.assertRaises(WrongInputError):
self.h.run(hprof=[hprof_file])
def _assert_correct_out(self, invocations):
self.assertSetEqual(set(self.h.out), self.h._output_names)
for k in self.h.out.keys():
self.assertEqual(len(self.h.out[k]), invocations)
class TamiflexTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
h = get_json_data('TamiflexFilter')
cls.single_iterations = h['single_iterations']
cls.wrong_input = h['wrong_input']
def setUp(self):
super(TamiflexTest, self).setUp()
self.h = Tamiflex()
self.si = write_to_tempfiles(TamiflexTest.single_iterations)
self.wrong_input = write_to_tempfiles(TamiflexTest.wrong_input)
def tearDown(self):
for f in itertools.chain(self.si, self.wrong_input):
f.close()
def test_single_iteration_path(self):
invocations = len(self.si)
ref_log = [i.name for i in self.si]
self.h.run(reflection_log=ref_log)
self._assert_correct_out(invocations)
def test_wrong_input(self):
ref_logs = [i.name for i in self.wrong_input]
for ref_log in ref_logs:
with self.assertRaises(WrongInputError):
self.h.run(reflection_log=[ref_log])
def _assert_correct_out(self, invocations):
self.assertSetEqual(set(self.h.out), self.h._output_names)
for k in self.h.out.keys():
self.assertEqual(len(self.h.out[k]), invocations)
def write_to_tempfiles(data):
files = []
for d in data:
# itentionally not closing, do in tearDown
f = NamedTemporaryFile(prefix='penchy')
write(f, d)
f.seek(0)
files.append(f)
return files
class HProfTest(unittest.TestCase):
def test_wrong_outputs(self):
with self.assertRaises(ValueError):
HProf(outputs=Types(('a', list, int), ('b', list, int)),
start_marker='', end_marker='',
skip=1, data_re=None, start_re=None)
class ExtractTest(unittest.TestCase):
def setUp(self):
self.results = {1: {'a': 42,
'b': 32},
2: {'b': 0,
'c': 21}}
def test_implicit(self):
f = Extract('a', 'b')
f._run(results=self.results)
self.assertEqual(f.out, {'a': 42, 'b': 32})
def test_explicit(self):
f = Extract((1, 'a'), (2, 'b'))
f._run(results=self.results)
self.assertEqual(f.out, {'a': 42, 'b': 0})
def test_implicit_fail(self):
f = Extract('a', 'd')
with self.assertRaises(WrongInputError):
f._run(results=self.results)
def test_explicit_fail_column(self):
f = Extract((1, 'a'), (2, 'd'))
with self.assertRaises(WrongInputError):
f._run(results=self.results)
def test_explicit_fail_composition(self):
f = Extract((1, 'a'), (3, 'c'))
with self.assertRaises(WrongInputError):
f._run(results=self.results)
def test_no_arguments(self):
with self.assertRaises(ValueError):
Extract()
def test_malformed_argument(self):
with self.assertRaises(ValueError):
Extract('a', (1, 'a', 'b'))
class MergeTest(unittest.TestCase):
def setUp(self):
self.results = {1: {'a': 42,
'b': 32},
2: {'b': 0,
'c': 21}}
def test_implicit(self):
f = Merge(('col1', 'col2'), [('a', Value('id1')), ('b', Value('id2'))])
f._run(results=self.results)
self.assertEqual(f.out, {'col1': [42, 32], 'col2': ['id1', 'id2']})
def test_explicit(self):
f = Merge(('col1', 'col2'), [(1, 'a', Value('id1')), (2, 'b', Value('id2'))])
f._run(results=self.results)
self.assertEqual(f.out, {'col1': [42, 0], 'col2': ['id1', 'id2']})
def test_implicit_fail(self):
f = Merge(('col1', 'col2'), [('a', Value('id1')), ('d', Value('id2'))])
with self.assertRaises(WrongInputError):
f._run(results=self.results)
def test_explicit_fail_column(self):
f = Merge(('col1', 'col2'), [(1, 'a', Value('id1')), (2, 'd', Value('id2'))])
with self.assertRaises(WrongInputError):
f._run(results=self.results)
def test_explicit_fail_composition(self):
f = Merge(('col1', 'col2'), [(1, 'a', Value('id1')), (3, 'c', Value('id2'))])
with self.assertRaises(WrongInputError):
f._run(results=self.results)
def test_malformed_arguments_type(self):
with self.assertRaises(ValueError):
Merge(('col1', 'col2'), [(1, 42, Value('id1')), (2, 'c', Value('id2'))])
def test_malformed_arguments_length_too_small(self):
with self.assertRaises(ValueError):
Merge(('col1', 'col2'), [(1, 'b'), (2, 'c', Value('id2'))])
def test_malformed_arguments_length_too_big(self):
with self.assertRaises(ValueError):
Merge(('col1', 'col2'), [(1, 'b', Value('id1'), Value('foo')), (2, 'c', Value('id2'))])
class MergingReceiveTest(unittest.TestCase):
def setUp(self):
environment = {'receive': lambda: self.results}
self.kwargs = {':environment:' : environment}
self.results = {1: {'a': 42,
'b': 32},
2: {'b': 0,
'c': 21}}
def test_implicit(self):
f = MergingReceive(('col1', 'col2'), [('a', Value('id1')), ('b', Value('id2'))])
f._run(**self.kwargs)
self.assertEqual(f.out, {'col1': [42, 32], 'col2': ['id1', 'id2']})
def test_explicit(self):
f = MergingReceive(('col1', 'col2'), [(1, 'a', Value('id1')), (2, 'b', Value('id2'))])
f._run(**self.kwargs)
self.assertEqual(f.out, {'col1': [42, 0], 'col2': ['id1', 'id2']})
def test_implicit_fail(self):
f = MergingReceive(('col1', 'col2'), [('a', Value('id1')), ('d', Value('id2'))])
with self.assertRaises(WrongInputError):
f._run(**self.kwargs)
def test_explicit_fail_column(self):
f = MergingReceive(('col1', 'col2'), [(1, 'a', Value('id1')), (2, 'd', Value('id2'))])
with self.assertRaises(WrongInputError):
f._run(**self.kwargs)
def test_explicit_fail_composition(self):
f = MergingReceive(('col1', 'col2'), [(1, 'a', Value('id1')), (3, 'c', Value('id2'))])
with self.assertRaises(WrongInputError):
f._run(**self.kwargs)
class ExtractingReceiveTest(unittest.TestCase):
def setUp(self):
environment = {'receive': lambda: self.results}
self.kwargs = {':environment:' : environment}
self.results = {1: {'a': 42,
'b': 32},
2: {'b': 0,
'c': 21}}
def test_implicit(self):
f = ExtractingReceive('a', 'b')
f._run(**self.kwargs)
self.assertEqual(f.out, {'a': 42, 'b': 32})
def test_explicit(self):
f = ExtractingReceive((1, 'a'), (2, 'b'))
f._run(**self.kwargs)
self.assertEqual(f.out, {'a': 42, 'b': 0})
def test_implicit_fail(self):
f = ExtractingReceive('a', 'd')
with self.assertRaises(WrongInputError):
f._run(**self.kwargs)
def test_explicit_fail_column(self):
f = ExtractingReceive((1, 'a'), (2, 'd'))
with self.assertRaises(WrongInputError):
f._run(**self.kwargs)
def test_explicit_fail_composition(self):
f = ExtractingReceive((1, 'a'), (3, 'c'))
with self.assertRaises(WrongInputError):
f._run(**self.kwargs)
class SendTest(unittest.TestCase):
def test_send(self):
a = [1]
f = Send()
f._run(payload=42,
**{':environment:' : {'send' : lambda data: a.__setitem__(0, data)}})
self.assertEqual(a, [{'payload': 42}])
class StatisticRuntimeEvaluationTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
t = get_json_data('StatisticRuntimeEvaluationFilter')
cls.times = t['times']
cls.expected = t['expected']
def test_statistics(self):
f = StatisticRuntimeEvaluation()
keys = ['averages', 'maximals', 'minimals',
'positive_deviations', 'negative_deviations']
for times, results in zip(StatisticRuntimeEvaluationTest.times,
StatisticRuntimeEvaluationTest.expected):
# output is correctly cleaned up?
self.assertDictEqual(f.out, {})
f._run(times=times)
# contains the right keys?
self.assertItemsEqual(f.out.keys(), keys)
for key in keys:
for actual, expected in zip(f.out[key], results[key]):
self.assertAlmostEqual(actual, expected)
f.reset()
class EvaluationTest(unittest.TestCase):
def test_default_input(self):
e = Evaluation(lambda input: {'result' : input})
e._run(input=42)
self.assertDictEqual(e.out, {'result' : 42})
def test_missing_default_input(self):
e = Evaluation(lambda x: None)
with self.assertRaises(ValueError):
e._run()
def test_missing_input(self):
e = Evaluation(lambda x: x, Types(('value', int)), Types(('value', int)))
with self.assertRaises(ValueError):
e._run()
class BackupTest(unittest.TestCase):
def test_copy(self):
s = "'tis a test string"
with NamedTemporaryFile(delete=False) as f:
path = f.name
write(f, s)
self.assertTrue(os.path.exists(path))
backup_path = '/tmp/penchy-backup-test'
b = BackupFile(backup_path)
b.run(filename=path, **{':environment:' : {}})
# did backup?
with open(backup_path) as f:
self.assertEqual(f.read(), s)
# did not modify backuped file?
with open(path) as f:
self.assertEqual(f.read(), s)
os.remove(path)
os.remove(backup_path)
def test_relative_copy(self):
s = "'tis a test string"
comp = make_system_composition()
comp.node_setting.path = '/tmp'
with NamedTemporaryFile(delete=False) as f:
path = f.name
write(f, s)
self.assertTrue(os.path.exists(path))
backup_file = 'penchy-backup-test'
backup_path = os.path.join(comp.node_setting.path, backup_file)
b = BackupFile(backup_file)
b.run(filename=path, **{':environment:' : {'current_composition' : comp}})
# did backup?
with open(backup_path) as f:
self.assertEqual(f.read(), s)
# did not modify backuped file?
with open(path) as f:
self.assertEqual(f.read(), s)
os.remove(path)
os.remove(os.path.join(comp.node_setting.path, backup_path))
def test_not_existing_path(self):
# create unique not existing path
with NamedTemporaryFile() as f:
path = f.name
b = BackupFile('/tmp/penchy-backup-test')
with self.assertRaises(WrongInputError):
b.run(filename=path, **{':environment:' : {}})
class SaveTest(unittest.TestCase):
def test_save_relative(self):
s = "'tis a test string"
save_file = 'penchy-save-test'
comp = make_system_composition()
comp.node_setting.path = '/tmp'
save_path = os.path.join(comp.node_setting.path, save_file)
save = Save(save_file)
save.run(data=s, **{':environment:' : {'current_composition': comp}})
with open(save_path) as f:
self.assertEqual(f.read(), s)
os.remove(save_path)
def test_save_absolute(self):
s = "'tis a test string"
save_path = '/tmp/penchy-save-test'
save = Save(save_path)
save.run(data=s, **{':environment:' : {}})
with open(save_path) as f:
self.assertEqual(f.read(), s)
os.remove(save_path)
class ReadTest(unittest.TestCase):
def test_read(self):
s = "'tis a test string"
with NamedTemporaryFile() as f:
write(f, s)
f.flush()
r = Read('utf8')
r.run(paths=[f.name])
self.assertListEqual(r.out['data'], [s])
class ServerFlowSystemFilterTest(unittest.TestCase):
def setUp(self):
self.env = {
'job' : 'no file',
'current_composition' : None
}
def test_dump(self):
numbers = [23, 42]
strings = ['a', 'b', 'c']
d = Dump()
d._run(numbers=numbers, strings=strings, **{':environment:' : self.env})
dump = json.loads(d.out['dump'])
self.assertIn('job', dump['system'])
self.assertNotIn('jvm', dump['system'])
self.assertIn('numbers', dump['data'])
self.assertIn('strings', dump['data'])
self.assertItemsEqual(numbers, dump['data']['numbers'])
self.assertItemsEqual(strings, dump['data']['strings'])
def test_save_and_backup(self):
data = "'tis the end"
with tempdir(delete=True):
s = Save('save')
s._run(data=data, **{':environment:' : self.env})
b = BackupFile('backup')
b._run(filename='save', **{':environment:' : self.env})
with open('save') as f:
self.assertEqual(f.read(), data)
with open('backup') as f:
self.assertEqual(f.read(), data)
class MeanTest(unittest.TestCase):
def test_against_numpy_integers(self):
rnd = random_integers(-20, 20, 50)
f = Mean()
f._run(values=rnd)
self.assertAlmostEqual(f.out['mean'], average(rnd))
def test_against_numpy_floats(self):
rnd = random_sample(20)
f = Mean()
f._run(values=rnd)
self.assertAlmostEqual(f.out['mean'], average(rnd))
class StandardDeviationTest(unittest.TestCase):
def test_against_numpy_integes(self):
rnd = random_integers(-20, 20, 50)
f = StandardDeviation(ddof=1)
f._run(values=rnd)
self.assertAlmostEqual(f.out['standard_deviation'], std(rnd, ddof=1))
def test_against_numpy_floats(self):
rnd = random_sample(20)
f = StandardDeviation(ddof=1)
f._run(values=rnd)
self.assertAlmostEqual(f.out['standard_deviation'], std(rnd, ddof=1))
class SumTest(unittest.TestCase):
def test_integers(self):
rnd = random_integers(-20, 20, 50)
f = Sum()
f._run(values=rnd)
self.assertEqual(f.out['sum'], sum(rnd))
def test_against_numpy_floats(self):
rnd = random_sample(20)
f = Sum()
f._run(values=rnd)
self.assertAlmostEqual(f.out['sum'], sum(rnd))
class EnumerateTest(unittest.TestCase):
def test_preserves_input(self):
f = Enumerate()
f._run(values=[1, 2, 3])
self.assertEqual(f.out['values'], [1, 2, 3])
def test_enumerate(self):
f = Enumerate(start=3, step=2)
f._run(values=['a', 'b', 'c'])
self.assertEqual(f.out['numbers'], [3, 5, 7])
class UnpackTest(unittest.TestCase):
def test_valid(self):
f = Unpack()
f._run(singleton=[1])
self.assertEqual(f.out['result'], 1)
def test_list_too_long(self):
f = Unpack()
with self.assertRaises(WrongInputError):
f._run(singleton=[1, 2, 3])
def test_list_too_short(self):
f = Unpack()
with self.assertRaises(WrongInputError):
f._run(singleton=[])
class MapTest(unittest.TestCase):
def test_idenity(self):
identity = Evaluation(lambda x: {'x': x}, Types(('x', object)), Types(('x', object)))
f = Map(identity)
f._run(values=[1, 2, 3])
self.assertEqual(f.out['values'], [1, 2, 3])
def test_multidimensional(self):
multi = Evaluation(lambda x: {'x': [x]}, Types(('x', object)), Types(('x', object)))
f = Map(multi)
f._run(values=[1, 2, 3])
self.assertEqual(f.out['values'], [[1], [2], [3]])
def test_wrong_inputs(self):
wrong = Evaluation(lambda x, y: {'x': x}, Types(('x', object), ('y', object)), Types(('x', object)))
with self.assertRaises(TypeCheckError):
Map(wrong)
def test_wrong_outputs(self):
wrong = Evaluation(lambda x: {'x': x, 'y': x}, Types(('x', object)), Types(('x', object), ('y', object)))
with self.assertRaises(TypeCheckError):
Map(wrong)
def test_with_all_arguments(self):
identity = Evaluation(lambda c: {'d': c}, Types(('c', object)), Types(('d', object)))
f = Map(identity, 'a', 'b', 'c', 'd')
f._run(a=[1, 2, 3])
self.assertEqual(f.out['b'], [1, 2, 3])
class DecorateTest(unittest.TestCase):
def test_valid(self):
f = Decorate("{0}")
f._run(values=[1, 2, 3])
self.assertEqual(f.out['values'], ["1", "2", "3"])
def test_nothing_to_interplolate(self):
f = Decorate("")
f._run(values=[1, 2, 3])
self.assertEqual(f.out['values'], ["", "", ""])
class DropFirstTest(unittest.TestCase):
def test_valid(self):
f = DropFirst()
f._run(values=[1, 2, 3])
self.assertEqual(f.out['values'], [2, 3])
class ZipTest(unittest.TestCase):
def test_valid(self):
f = Zip()
f._run(values=[[1, 2], [3, 4], [5, 6]])
self.assertEqual(f.out['values'], [[1, 3, 5], [2, 4, 6]])
class SliceTest(unittest.TestCase):
def test_slice1(self):
f = Slice(0, 2)
f._run(values=[1, 2, 3])
self.assertEqual(f.out['values'], [1, 2])
def test_reverse(self):
f = Reverse()
f._run(values=[1, 2, 3])
self.assertEqual(f.out['values'], [3, 2, 1])
class ReduceTest(unittest.TestCase):
def test_reduce(self):
f = Reduce(lambda x, y: x + y, 0)
f._run(values=[1, 2, 3])
self.assertEqual(f.out['values'], 6)
class SteadyStateTest(unittest.TestCase):
def test_one_invocation(self):
f = SteadyState(k=5, threshold=0.3)
f._run(values=[[30, 33, 4, 16, 29, 34, 10, 44, 12, 25, 22, 25, 36, 49, 32, 24, 39, 36, 34, 38]])
self.assertEqual(f.out['values'], [[36, 49, 32, 24, 39]])
def test_two_invocations(self):
f = SteadyState(k=5, threshold=0.3)
f._run(values=[[30, 33, 4, 16, 29, 34, 10, 44, 12, 25, 22, 25, 36, 49, 32, 24, 39, 36, 34, 38],
[15, 36, 21, 1, 2, 15, 47, 7, 19, 28, 39, 29, 32, 17, 15, 18, 14, 8, 39, 0]])
self.assertEqual(f.out['values'], [[36, 49, 32, 24, 39], [19, 28, 39, 29, 32] ])
class ConfidenceIntervalMeanTest(unittest.TestCase):
def test_small_sample_set(self):
f = ConfidenceIntervalMean(significance_level=0.9)
f._run(values=[1, 2, 3])
for actual, expected in zip(f.out['interval'], (1.9179390061550845, 2.0820609938449155)):
self.assertAlmostEqual(actual, expected)
def test_large_sample_set(self):
f = ConfidenceIntervalMean(significance_level=0.9)
f._run(values=range(31))
for actual, expected in zip(f.out['interval'], (14.794795879876117, 15.205204120123883)):
self.assertAlmostEqual(actual, expected)
class CI2AlternativesTest(unittest.TestCase):
def test_small_sample_set(self):
f = CI2Alternatives(significance_level=0.9)
f._run(xs=[1, 2, 3], ys=range(31))
for actual, expected in zip(f.out['interval'], (-13.219442204882425, -12.780557795117575)):
self.assertAlmostEqual(actual, expected)
def test_large_sample_set(self):
f = CI2Alternatives(significance_level=0.9)
f._run(xs=range(31), ys=range(31))
for actual, expected in zip(f.out['interval'], (-0.29020244973403198, 0.29020244973403198)):
self.assertAlmostEqual(actual, expected)
class SortTest(unittest.TestCase):
def test_valid(self):
f = Sort("values")
f._run(values=[1, 3, 2], names=['a', 'c', 'b'])
self.assertEqual(f.out['values'], [1, 2, 3])
self.assertEqual(f.out['names'], ['a', 'b', 'c'])
def test_sort_by_multiple_cols(self):
f = Sort(["a", "b"])
f._run(a=[3, 1, 1], b=['b', 'c', 'a'], c=[1, 2, 3])
self.assertEqual(f.out['a'], [1, 1, 3])
self.assertEqual(f.out['b'], ['a', 'c', 'b'])
self.assertEqual(f.out['c'], [3, 2, 1])
class AccumulateTest(unittest.TestCase):
def test_valid(self):
f = Accumulate('a')
f._run(a=[1, 2, 3])
self.assertEqual(f.out['accum'], [1, 3, 6])
class NormalizeTest(unittest.TestCase):
def test_valid(self):
f = Normalize()
f._run(values=[67, 22, 7, 5, 3, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], norm=126)
self.assertAlmostEqual(1.0 - sum(f.out['values']), 0.0)
class ComposerTest(unittest.TestCase):
def setUp(self):
self.dacapo = DacapoHarness()
self.composed = Composer(self.dacapo, Print, 'a', Print, ('a', 'b'))
self.composed2 = Composer(self.dacapo, (Slice, 42, {'step' : 2}),
'a', Print, ('a', 'b'))
@staticmethod
def elements_of(pipeline):
elements = set()
for edge in pipeline.edges:
elements.add(edge.source)
elements.add(edge.sink)
return elements
def test_unchanged_instance(self):
self.assertIn(self.dacapo, self.elements_of(self.composed >> Print()))
def test_new_generated_instances(self):
self.assertNotEqual(id((self.composed >> Print()).edges[0].sink),
id((self.composed >> Print()).edges[0].sink))
def test_generation_with_arguments(self):
slice = (self.composed2 >> Print()).edges[0].sink
self.assertEqual(slice.start, 42)
self.assertEqual(slice.step, 2)
def test_passing_of_maps(self):
maps = [edge.map_ for edge in (self.composed >> Print()).edges]
self.assertItemsEqual([None, [('a', 'a')], [('a', 'b')]], maps)
class ExportTest(unittest.TestCase):
def setUp(self):
self.tempfile = tempfile.mkstemp()[1]
def test_simple(self):
f = Export(self.tempfile, ['test1', 'test2', 'values'],
[['v1', 'v2'].__getitem__, ['z1', 'z2'].__getitem__])
f._run(values=[[1, 2], [3, 4]])
expected = "test1\ttest2\tvalues\r\n" \
"v1\tz1\t1\r\n" \
"v1\tz2\t2\r\n" \
"v2\tz1\t3\r\n" \
"v2\tz2\t4\r\n"
actual = open(self.tempfile).read()
try:
self.assertMultiLineEqual(actual, expected)
finally:
os.remove(self.tempfile)
def test_function_for_value(self):
f = Export(self.tempfile, ['test1', 'test2', 'values'],
[['v1', 'v2'].__getitem__, ['z1', 'z2'].__getitem__],
lambda x: "small" if x < 2 else "big")
f._run(values=[[1, 2], [3, 4]])
expected = "test1\ttest2\tvalues\r\n" \
"v1\tz1\tsmall\r\n" \
"v1\tz2\tbig\r\n" \
"v2\tz1\tbig\r\n" \
"v2\tz2\tbig\r\n"
actual = open(self.tempfile).read()
try:
self.assertMultiLineEqual(actual, expected)
finally:
os.remove(self.tempfile)
def test_without_functions(self):
f = Export(self.tempfile, ['test1', 'test2', 'values'])
f._run(values=[[1, 2], [3, 4]])
expected = "test1\ttest2\tvalues\r\n" \
"0\t0\t1\r\n" \
"0\t1\t2\r\n" \
"1\t0\t3\r\n" \
"1\t1\t4\r\n"
actual = open(self.tempfile).read()
try:
self.assertMultiLineEqual(actual, expected)
finally:
os.remove(self.tempfile)
def test_short_function_list(self):
f = Export(self.tempfile, ['bench', 'iteration', 'times'],
[['batik', 'fop'].__getitem__])
f._run(values=[[1, 2], [3, 4]])
expected = "bench\titeration\ttimes\r\n" \
"batik\t0\t1\r\n" \
"batik\t1\t2\r\n" \
"fop\t0\t3\r\n" \
"fop\t1\t4\r\n"
actual = open(self.tempfile).read()
try:
self.assertMultiLineEqual(actual, expected)
finally:
os.remove(self.tempfile)
def test_unbalanced_values(self):
f = Export(self.tempfile, ['test1', 'test2', 'values'],
[['v1', 'v2'].__getitem__, ['z1', 'z2'].__getitem__])
with self.assertRaises(ValueError):
f._run(values=[[1, [2]], [3, 4]])
| |
# Copyright 2013 OpenStack Foundation.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from six.moves import xrange
from glance.domain import proxy
import glance.tests.utils as test_utils
UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d'
TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df'
class FakeProxy(object):
def __init__(self, base, *args, **kwargs):
self.base = base
self.args = args
self.kwargs = kwargs
class FakeRepo(object):
def __init__(self, result=None):
self.args = None
self.kwargs = None
self.result = result
def fake_method(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
return self.result
get = fake_method
list = fake_method
add = fake_method
save = fake_method
remove = fake_method
class TestProxyRepoPlain(test_utils.BaseTestCase):
def setUp(self):
super(TestProxyRepoPlain, self).setUp()
self.fake_repo = FakeRepo()
self.proxy_repo = proxy.Repo(self.fake_repo)
def _test_method(self, name, base_result, *args, **kwargs):
self.fake_repo.result = base_result
method = getattr(self.proxy_repo, name)
proxy_result = method(*args, **kwargs)
self.assertEqual(proxy_result, base_result)
self.assertEqual(self.fake_repo.args, args)
self.assertEqual(self.fake_repo.kwargs, kwargs)
def test_get(self):
self._test_method('get', 'snarf', 'abcd')
def test_list(self):
self._test_method('list', ['sniff', 'snarf'], 2, filter='^sn')
def test_add(self):
self._test_method('add', 'snuff', 'enough')
def test_save(self):
self._test_method('save', 'snuff', 'enough')
def test_remove(self):
self._test_method('add', None, 'flying')
class TestProxyRepoWrapping(test_utils.BaseTestCase):
def setUp(self):
super(TestProxyRepoWrapping, self).setUp()
self.fake_repo = FakeRepo()
self.proxy_repo = proxy.Repo(self.fake_repo,
item_proxy_class=FakeProxy,
item_proxy_kwargs={'a': 1})
def _test_method(self, name, base_result, *args, **kwargs):
self.fake_repo.result = base_result
method = getattr(self.proxy_repo, name)
proxy_result = method(*args, **kwargs)
self.assertIsInstance(proxy_result, FakeProxy)
self.assertEqual(proxy_result.base, base_result)
self.assertEqual(len(proxy_result.args), 0)
self.assertEqual(proxy_result.kwargs, {'a': 1})
self.assertEqual(self.fake_repo.args, args)
self.assertEqual(self.fake_repo.kwargs, kwargs)
def test_get(self):
self.fake_repo.result = 'snarf'
result = self.proxy_repo.get('some-id')
self.assertIsInstance(result, FakeProxy)
self.assertEqual(self.fake_repo.args, ('some-id',))
self.assertEqual(self.fake_repo.kwargs, {})
self.assertEqual(result.base, 'snarf')
self.assertEqual(result.args, tuple())
self.assertEqual(result.kwargs, {'a': 1})
def test_list(self):
self.fake_repo.result = ['scratch', 'sniff']
results = self.proxy_repo.list(2, prefix='s')
self.assertEqual(self.fake_repo.args, (2,))
self.assertEqual(self.fake_repo.kwargs, {'prefix': 's'})
self.assertEqual(len(results), 2)
for i in xrange(2):
self.assertIsInstance(results[i], FakeProxy)
self.assertEqual(results[i].base, self.fake_repo.result[i])
self.assertEqual(results[i].args, tuple())
self.assertEqual(results[i].kwargs, {'a': 1})
def _test_method_with_proxied_argument(self, name, result):
self.fake_repo.result = result
item = FakeProxy('snoop')
method = getattr(self.proxy_repo, name)
proxy_result = method(item)
self.assertEqual(self.fake_repo.args, ('snoop',))
self.assertEqual(self.fake_repo.kwargs, {})
if result is None:
self.assertIsNone(proxy_result)
else:
self.assertIsInstance(proxy_result, FakeProxy)
self.assertEqual(proxy_result.base, result)
self.assertEqual(proxy_result.args, tuple())
self.assertEqual(proxy_result.kwargs, {'a': 1})
def test_add(self):
self._test_method_with_proxied_argument('add', 'dog')
def test_add_with_no_result(self):
self._test_method_with_proxied_argument('add', None)
def test_save(self):
self._test_method_with_proxied_argument('save', 'dog')
def test_save_with_no_result(self):
self._test_method_with_proxied_argument('save', None)
def test_remove(self):
self._test_method_with_proxied_argument('remove', 'dog')
def test_remove_with_no_result(self):
self._test_method_with_proxied_argument('remove', None)
class FakeImageFactory(object):
def __init__(self, result=None):
self.result = None
self.kwargs = None
def new_image(self, **kwargs):
self.kwargs = kwargs
return self.result
class TestImageFactory(test_utils.BaseTestCase):
def setUp(self):
super(TestImageFactory, self).setUp()
self.factory = FakeImageFactory()
def test_proxy_plain(self):
proxy_factory = proxy.ImageFactory(self.factory)
self.factory.result = 'eddard'
image = proxy_factory.new_image(a=1, b='two')
self.assertEqual(image, 'eddard')
self.assertEqual(self.factory.kwargs, {'a': 1, 'b': 'two'})
def test_proxy_wrapping(self):
proxy_factory = proxy.ImageFactory(self.factory,
proxy_class=FakeProxy,
proxy_kwargs={'dog': 'bark'})
self.factory.result = 'stark'
image = proxy_factory.new_image(a=1, b='two')
self.assertIsInstance(image, FakeProxy)
self.assertEqual(image.base, 'stark')
self.assertEqual(self.factory.kwargs, {'a': 1, 'b': 'two'})
class FakeImageMembershipFactory(object):
def __init__(self, result=None):
self.result = None
self.image = None
self.member_id = None
def new_image_member(self, image, member_id):
self.image = image
self.member_id = member_id
return self.result
class TestImageMembershipFactory(test_utils.BaseTestCase):
def setUp(self):
super(TestImageMembershipFactory, self).setUp()
self.factory = FakeImageMembershipFactory()
def test_proxy_plain(self):
proxy_factory = proxy.ImageMembershipFactory(self.factory)
self.factory.result = 'tyrion'
membership = proxy_factory.new_image_member('jaime', 'cersei')
self.assertEqual(membership, 'tyrion')
self.assertEqual(self.factory.image, 'jaime')
self.assertEqual(self.factory.member_id, 'cersei')
def test_proxy_wrapped_membership(self):
proxy_factory = proxy.ImageMembershipFactory(
self.factory, member_proxy_class=FakeProxy,
member_proxy_kwargs={'a': 1})
self.factory.result = 'tyrion'
membership = proxy_factory.new_image_member('jaime', 'cersei')
self.assertIsInstance(membership, FakeProxy)
self.assertEqual(membership.base, 'tyrion')
self.assertEqual(membership.kwargs, {'a': 1})
self.assertEqual(self.factory.image, 'jaime')
self.assertEqual(self.factory.member_id, 'cersei')
def test_proxy_wrapped_image(self):
proxy_factory = proxy.ImageMembershipFactory(
self.factory, image_proxy_class=FakeProxy)
self.factory.result = 'tyrion'
image = FakeProxy('jaime')
membership = proxy_factory.new_image_member(image, 'cersei')
self.assertEqual(membership, 'tyrion')
self.assertEqual(self.factory.image, 'jaime')
self.assertEqual(self.factory.member_id, 'cersei')
def test_proxy_both_wrapped(self):
class FakeProxy2(FakeProxy):
pass
proxy_factory = proxy.ImageMembershipFactory(
self.factory,
member_proxy_class=FakeProxy,
member_proxy_kwargs={'b': 2},
image_proxy_class=FakeProxy2)
self.factory.result = 'tyrion'
image = FakeProxy2('jaime')
membership = proxy_factory.new_image_member(image, 'cersei')
self.assertIsInstance(membership, FakeProxy)
self.assertEqual(membership.base, 'tyrion')
self.assertEqual(membership.kwargs, {'b': 2})
self.assertEqual(self.factory.image, 'jaime')
self.assertEqual(self.factory.member_id, 'cersei')
class FakeImage(object):
def __init__(self, result=None):
self.result = result
def get_member_repo(self):
return self.result
class TestImage(test_utils.BaseTestCase):
def setUp(self):
super(TestImage, self).setUp()
self.image = FakeImage()
def test_normal_member_repo(self):
proxy_image = proxy.Image(self.image)
self.image.result = 'mormont'
self.assertEqual(proxy_image.get_member_repo(), 'mormont')
def test_proxied_member_repo(self):
proxy_image = proxy.Image(self.image,
member_repo_proxy_class=FakeProxy,
member_repo_proxy_kwargs={'a': 10})
self.image.result = 'corn'
member_repo = proxy_image.get_member_repo()
self.assertIsInstance(member_repo, FakeProxy)
self.assertEqual(member_repo.base, 'corn')
class TestTaskFactory(test_utils.BaseTestCase):
def setUp(self):
super(TestTaskFactory, self).setUp()
self.factory = mock.Mock()
self.fake_type = 'import'
self.fake_owner = "owner"
def test_proxy_plain(self):
proxy_factory = proxy.TaskFactory(self.factory)
proxy_factory.new_task(
type=self.fake_type,
owner=self.fake_owner
)
self.factory.new_task.assert_called_once_with(
type=self.fake_type,
owner=self.fake_owner
)
def test_proxy_wrapping(self):
proxy_factory = proxy.TaskFactory(
self.factory,
task_proxy_class=FakeProxy,
task_proxy_kwargs={'dog': 'bark'})
self.factory.new_task.return_value = 'fake_task'
task = proxy_factory.new_task(
type=self.fake_type,
owner=self.fake_owner
)
self.factory.new_task.assert_called_once_with(
type=self.fake_type,
owner=self.fake_owner
)
self.assertIsInstance(task, FakeProxy)
self.assertEqual(task.base, 'fake_task')
| |
from __future__ import unicode_literals
from celery import shared_task
from isisdata.models import *
from isisdata.tasks import _get_filtered_object_queryset
from django.apps import apps
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.models import User
from django.db import models
import logging
import smart_open
import csv
from datetime import datetime
from dateutil.tz import tzlocal
import time
from past.utils import old_div
import haystack
import math
COLUMN_NAME_ATTR_SUBJ_ID = 'ATT Subj ID'
COLUMN_NAME_ATTR_RELATED_NAME = 'Related Record Name'
COLUMN_NAME_ATTR_TYPE = 'ATT Type'
COLUMN_NAME_ATTR_VALUE = 'ATT Value'
COLUMN_NAME_ATTR_DATE_FREE = 'ATT DateFree'
COLUMN_NAME_ATTR_DATE_BEGIN = 'ATT DateBegin'
COLUMN_NAME_ATTR_DATE_END = 'ATT DateEnd'
COLUMN_NAME_ATTR_PLACE_NAME = 'ATT PlaceName'
COLUMN_NAME_ATTR_PLACE_LINK = 'ATT PlaceLink'
COLUMN_NAME_ATTR_NOTES = 'ATT Notes'
logger = logging.getLogger(__name__)
@shared_task
def delete_duplicate_attributes(user_id, filter_params_raw, task_id=None, object_type='AUTHORITY'):
queryset, task = _get_task(filter_params_raw, user_id, task_id, object_type)
current_count = 0
for i, obj in enumerate(queryset):
existing_attributes = []
for attribute in obj.attributes.all():
attr_type = attribute.type_controlled
key = attr_type.name + "_" + str(attribute.value.cvalue()) + str(attribute.value_freeform)
if key not in existing_attributes:
existing_attributes.append(key)
else:
# attribute with same values already exist, so remove it
print("Deleting attribute " + attribute.pk + " on object " + obj.pk)
attribute.delete()
current_count = _update_count(current_count, task)
@shared_task
def reindex_authorities(user_id, filter_params_raw, task_id=None, object_type='AUTHORITY'):
queryset, _ = _get_filtered_object_queryset(filter_params_raw, user_id, object_type)
if task_id:
task = AsyncTask.objects.get(pk=task_id)
task.max_value = queryset.count()
_inc = max(2, math.floor(old_div(task.max_value, 200.)))
task.save()
else:
task = None
try: # Report all exceptions as a task failure.
for i, obj in enumerate(queryset):
if task and (i % _inc == 0 or i == (task.max_value - 1)):
task.current_value = i
task.save()
haystack.connections[settings.HAYSTACK_DEFAULT_INDEX].get_unified_index().get_index(Authority).update_object(obj)
task.state = 'SUCCESS'
task.save()
except Exception as E:
print('bulk_update_citations failed for %s' % filter_params_raw, end=' ')
print(E)
task.state = 'FAILURE'
task.save()
@shared_task
def merge_authorities(file_path, error_path, task_id, user_id):
logging.info('Merging duplicate authorities and redirecting.')
SUCCESS = 'SUCCESS'
ERROR = 'ERROR'
COL_MASTER_AUTH = 'CBA ID Master'
COL_DUPLICATE_AUTH = 'CBA ID Duplicate'
COL_NOTE = 'Note'
with smart_open.open(file_path, 'rb', encoding='utf-8') as f:
reader = csv.reader(f)
task = AsyncTask.objects.get(pk=task_id)
results = []
row_count = _count_rows(f, results)
task.max_value = row_count
task.save()
current_count = 0
not_matching_subject_names = []
current_time_obj = datetime.now(tzlocal())
try:
for row in csv.DictReader(f):
master_id = row[COL_MASTER_AUTH]
duplicate_id = row[COL_DUPLICATE_AUTH]
note = row[COL_NOTE]
try:
master = Authority.objects.get(pk=master_id)
except Exception as e:
logger.error('Authority with id %s does not exist. Skipping.' % (master_id))
results.append((ERROR, master_id, 'Authority record does not exist.', ""))
current_count = _update_count(current_count, task)
continue
try:
duplicate = Authority.objects.get(pk=duplicate_id)
except Exception as e:
logger.error('Authority with id %s does not exist. Skipping.' % (duplicate_id))
results.append((ERROR, duplicate_id, 'Authority record does not exist.', ""))
current_count = _update_count(current_count, task)
continue
for attr in duplicate.attributes.all():
attr.source = master
_add_change_note(attr, task_id, 'source', 'source', master_id, duplicate_id, user_id, current_time_obj)
attr.record_history += '\n' + note
attr.save()
for ld in duplicate.linkeddata_entries.all():
ld.subject = master
_add_change_note(ld, task_id, 'source', 'source', master_id, duplicate_id, user_id, current_time_obj)
ld.record_history += '\n' + note
ld.save()
for acr in duplicate.acrelations.all():
acr.authority = master
_add_change_note(acr, task_id, 'source', 'source', master_id, duplicate_id, user_id, current_time_obj)
acr.record_history += '\n' + note
acr.save()
# change duplicate record to redirect
duplicate.redirect_to = master
old_status = duplicate.record_status_value
duplicate.record_status_value = CuratedMixin.REDIRECT
_add_change_note(duplicate, task_id, 'record_status_value', 'record_status_value', "Redirect to %s"%(master_id), old_status, user_id, current_time_obj)
duplicate.record_history += '\n' + note
duplicate.save()
results.append((SUCCESS, "Records Merged", "%s and %s were successfully merged. Master is %s."%(master_id, duplicate_id, master_id), ""))
current_count = _update_count(current_count, task)
except Exception as e:
logger.error("There was an unexpected error processing the CSV file.")
logger.exception(e)
results.append((ERROR, "unexpected error", "There was an unexpected error processing the CSV file: " + repr(e), ""))
_save_results(error_path, results, ('Type', 'Title', 'Message', ''))
task.state = 'SUCCESS'
task.save()
@shared_task
def add_attributes_to_authority(file_path, error_path, task_id, user_id):
logging.info('Adding attributes from %s.' % (file_path))
# this is a hack but the best I can come up with right now :op
logging.debug('Make AuthorityValue exists in ContentType table...')
ContentType.objects.get_or_create(model='authorityvalue', app_label='isisdata')
SUCCESS = 'SUCCESS'
ERROR = 'ERROR'
with smart_open.open(file_path, 'rb', encoding='utf-8') as f:
reader = csv.reader(f)
task = AsyncTask.objects.get(pk=task_id)
results = []
row_count = _count_rows(f, results)
task.max_value = row_count
task.save()
current_count = 0
not_matching_subject_names = []
current_time_obj = datetime.now(tzlocal())
try:
for row in csv.DictReader(f):
subject_id = row[COLUMN_NAME_ATTR_SUBJ_ID]
try:
authority = Authority.objects.get(pk=subject_id)
except Authority.DoesNotExist:
logger.error('Authority with id %s does not exist. Skipping attribute.' % (subject_id))
results.append((ERROR, subject_id, subject_id, 'Authority record does not exist.'))
current_count = _update_count(current_count, task)
continue
related_name = row[COLUMN_NAME_ATTR_RELATED_NAME]
if authority.name != related_name:
not_matching_subject_names.append((subject_id, authority.name, related_name))
attribute_type = row[COLUMN_NAME_ATTR_TYPE]
atype = AttributeType.objects.filter(name=attribute_type)
if not atype:
logger.error('Attribute type with name %s does not exist. Skipping attribute.' % (attribute_type))
results.append((ERROR, subject_id, attribute_type, 'Attribute type does not exist.'))
current_count = _update_count(current_count, task)
continue
# we can be pretty sure there is just one
atype = atype.first()
# get source content type (authority in this case)
ctype = ContentType.objects.filter(model=type(authority).__name__.lower()).first()
# content type of value
vctype = atype.value_content_type
avmodel_class = vctype.model_class()
att_init_values = {
'type_controlled': atype,
'source_content_type': ctype,
'source_instance_id': subject_id,
'value_freeform': row[COLUMN_NAME_ATTR_DATE_FREE],
'administrator_notes': row[COLUMN_NAME_ATTR_NOTES]
}
val_init_values = {}
if row[COLUMN_NAME_ATTR_VALUE]:
val_init_values.update({
'value': row[COLUMN_NAME_ATTR_VALUE]
})
if row[COLUMN_NAME_ATTR_DATE_BEGIN]:
val_init_values.update({
'start': ISODateValue.convert(row[COLUMN_NAME_ATTR_DATE_BEGIN])
})
if row[COLUMN_NAME_ATTR_DATE_END]:
val_init_values.update({
'end': ISODateValue.convert(row[COLUMN_NAME_ATTR_DATE_END])
})
if row[COLUMN_NAME_ATTR_PLACE_NAME]:
val_init_values.update({
'name': row[COLUMN_NAME_ATTR_PLACE_NAME]
})
att_init_values['value_freeform'] = row[COLUMN_NAME_ATTR_PLACE_NAME]
if row[COLUMN_NAME_ATTR_PLACE_LINK]:
try:
place = Authority.objects.get(pk=row[COLUMN_NAME_ATTR_PLACE_LINK])
val_init_values.update({
'value': place
})
except:
logger.error('Authority with id %s does not exist.' % (row[COLUMN_NAME_ATTR_PLACE_LINK]))
results.append((ERROR, subject_id, row[COLUMN_NAME_ATTR_PLACE_LINK], 'Adding place link. Authority does not exist.'))
current_count = _update_count(current_count, task)
continue
_add_creation_note(att_init_values, task_id, user_id, current_time_obj)
attribute = Attribute(**att_init_values)
attribute.save()
results.append((SUCCESS, subject_id, attribute.id, 'Added'))
val_init_values.update({
'attribute': attribute
})
value = avmodel_class(**val_init_values)
value.save()
current_count = _update_count(current_count, task)
except Exception as e:
logger.error("There was an unexpected error processing the CSV file.")
logger.exception(e)
results.append((ERROR, "unexpected error", "", "There was an unexpected error processing the CSV file: " + repr(e)))
_save_results(error_path, results, ('Type', 'ATT Subj ID', 'Affected object', 'Message'))
task.state = 'SUCCESS'
task.save()
def _get_task(filter_params_raw, user_id, task_id, object_type):
queryset, _ = _get_filtered_object_queryset(filter_params_raw, user_id, object_type)
if task_id:
task = AsyncTask.objects.get(pk=task_id)
task.max_value = queryset.count()
_inc = max(2, math.floor(old_div(task.max_value, 200.)))
task.save()
else:
task = None
return queryset, task
def _add_creation_note(properties, task_id, user_id, created_on):
user = User.objects.get(pk=user_id)
mod_time = created_on.strftime("%m/%d/%y %r %Z")
properties.update({
RECORD_HISTORY: "This record was created as part of the bulk creation #%s by %s on %s."%(task_id, user.username, mod_time),
'modified_by_id': user_id,
})
ELEMENT_TYPES = {
'Attribute': Attribute,
'LinkedData': LinkedData,
}
ALLOWED_FIELDS = {
Attribute: ['description', 'value_freeform', 'value__value', 'record_status_value', 'record_status_explanation'],
LinkedData: ['description', 'universal_resource_name', 'resource_name', 'url', 'administrator_notes', 'record_status_value', 'record_status_explanation'],
ACRelation: ['citation_id', 'authority_id', 'name_for_display_in_citation', 'description', 'type_controlled', 'data_display_order', 'confidence_measure','administrator_notes', 'record_status_value', 'record_status_explanation'],
CCRelation: ['subject_id', 'object_id', 'name', 'description', 'type_controlled', 'belongs_to_id', 'data_display_order', 'administrator_notes', 'record_status_value', 'record_status_explanation']
}
FIELD_MAP = {
Attribute: {
'ATT Description': 'description',
'ATT Value': 'value__value',
'ATT Value Freeform': 'value_freeform',
'ATT Status': 'record_status_value',
'ATT RecordStatusExplanation': 'record_status_explanation',
'ATT DateFree': 'value_freeform',
'ATT DateBegin': 'value__start',
'ATT DateEnd': 'value__end',
'ATT PlaceName' : 'value__name',
'ATT PlaceLink' : 'value__value',
'ATT Notes': 'administrator_notes',
},
LinkedData: {
'LED URN': 'universal_resource_name',
'LED URL': 'url',
'LED Resource': 'resource_name',
'LED Notes': 'administrator_notes',
'LED Status': 'record_status_value',
'LED RecordStatusExplanation': 'record_status_explanation',
'LED Subj ID': 'typed:subject',
},
ACRelation: {
'ACR ID Auth': 'authority_id',
'ACR ID Cit': 'citation_id',
'ACR NameDisplay': 'name_for_display_in_citation',
'ACR Type': 'type_controlled',
'ACR DataDisplayOrder': 'data_display_order',
'ACR ConfidenceMeasure': 'confidence_measure',
'ACR Notes': 'administrator_notes',
'ACR Status': 'record_status_value',
'ACR RecordStatusExplanation': 'record_status_explanation',
},
CCRelation: {
'CCR ID Cit Subj': 'subject_id',
'CCR ID Cit Obj': 'object_id',
'CCR Name': 'name',
'CCR Description': 'description',
'CCR Type': 'type_controlled',
'CCR DisplayOrder': 'data_display_order',
'CCR Dataset': 'find:Dataset:name:belongs_to',
'CCR Notes': 'administrator_notes',
'CCR Status': 'record_status_value',
'CCR RecordStatusExplanation': 'record_status_explanation',
},
Authority: {
'CBA Type': 'type_controlled',
'CBA Name': 'name',
'CBA Redirect': 'redirect_to_id',
'CBA ClassCode': 'classification_code',
'CBA ClassHier': 'classification_hierarchy',
'CBA ClassSystem': 'classification_system',
'CBA Description': 'description',
'CBA Dataset': 'find:Dataset:name:belongs_to',
'CBA Notes': 'administrator_notes',
'CBA Status': 'record_status_value',
'CBA RecordStatusExplanation': 'record_status_explanation',
'CBA First': 'personal_name_first',
'CBA Last': 'personal_name_last',
'CBA Suff': 'personal_name_suffix',
'CBA Preferred': 'personal_name_preferred',
},
Citation: {
'CBB Type': 'type_controlled',
'CBB Title': 'title',
'CBB Abstract': 'abstract',
'CBB Description': 'description',
'CBB EditionDetails': 'edition_details',
'CBB Language': 'find:Language:name:language:multi',
'CBB PhysicalDetails': 'physical_details',
'CBB IssueBegin':'part_details__issue_begin',
'CBB IssueEnd': 'part_details__issue_end',
'CBB IssueFreeText': 'part_details__issue_free_text',
'CBB PageBegin': 'part_details__page_begin',
'CBB PageEnd': 'part_details__page_end',
'CBB PagesFreeText': 'part_details__pages_free_text',
'CBB VolumeBegin': 'part_details__volume_begin',
'CBB VolumeEnd': 'part_details__volume_end',
'CBB VolumeFreeText': 'part_details__volume_free_text',
'CBB Extent': 'part_details__extent',
'CBB ExtentNote': 'part_details__extent_note',
'CBB Dataset': 'find:Dataset:name:belongs_to',
'CBB Notes': 'administrator_notes',
'CBB Status': 'record_status_value',
'CBB RecordStatusExplanation': 'record_status_explanation',
}
}
COLUMN_NAME_TYPE = 'Table'
COLUMN_NAME_ID = "Id"
COLUMN_NAME_FIELD = "Field"
COLUMN_NAME_VALUE = "Value"
ADMIN_NOTES = 'administrator_notes'
RECORD_HISTORY = 'record_history'
TYPED_PREFIX = 'typed:'
FIND_PREFIX = 'find:'
@shared_task
def update_elements(file_path, error_path, task_id, user_id):
logging.info('Updating elements from %s.' % (file_path))
SUCCESS = 'SUCCESS'
ERROR = 'ERROR'
result_file_headers = ('Status', 'Type', 'Element Id', 'Message', 'Modification Date')
with smart_open.open(file_path, 'rb', encoding='utf-8') as f:
reader = csv.reader(f)
task = AsyncTask.objects.get(pk=task_id)
results = []
row_count = _count_rows(f, results)
task.max_value = row_count
task.save()
current_count = 0
try:
current_time_obj = datetime.now(tzlocal())
current_time = current_time_obj.isoformat()
for row in csv.DictReader(f):
# update timestamp for long running processes
current_time = datetime.now(tzlocal()).isoformat()
elem_type = row[COLUMN_NAME_TYPE]
element_id = row[COLUMN_NAME_ID]
try:
type_class = apps.get_model(app_label='isisdata', model_name=elem_type)
except Exception as e:
results.append((ERROR, elem_type, element_id, '%s is not a valid type.'%(elem_type), current_time))
current_count = _update_count(current_count, task)
continue
try:
element = type_class.objects.get(pk=element_id)
# we need special handling of persons, this is ugly but ahh well
if elem_type == "Authority" and element.type_controlled == Authority.PERSON:
element = Person.objects.get(pk=element_id)
except ObjectDoesNotExist:
results.append((ERROR, elem_type, element_id, '%s with id %s does not exist.'%(type_class, element_id), current_time))
current_count = _update_count(current_count, task)
continue
field_to_change = row[COLUMN_NAME_FIELD]
new_value = row[COLUMN_NAME_VALUE]
if field_to_change in FIELD_MAP[type_class]:
field_in_csv = field_to_change
field_to_change = FIELD_MAP[type_class][field_to_change]
# if we change a field that directly belongs to the class
if '__' not in field_to_change:
# if there are choices make sure they are respected
is_valid = _is_value_valid(element, field_to_change, new_value)
if not is_valid:
results.append((ERROR, elem_type, element_id, '%s is not a valid value.'%(new_value), current_time))
else:
try:
if field_to_change == ADMIN_NOTES:
_add_to_administrator_notes(element, new_value, task.id, user_id, current_time_obj)
else:
# in some cases we have authority or citation as relation
# this is in cases like subject of linkeddata
# it needs to be amended if there are objects that can link to other types
# than authorities/citations
if field_to_change.startswith(TYPED_PREFIX):
field_to_change = field_to_change[len(TYPED_PREFIX):]
if new_value.startswith(Authority.ID_PREFIX):
linked_element = Authority.objects.get(pk=new_value)
else:
linked_element = Citation.objects.get(pk=new_value)
new_value = linked_element
if field_to_change.startswith(FIND_PREFIX):
field_to_change, new_value = _find_value(field_to_change, new_value, element)
# check if field to change is a ManyToManyField (IEXP-232)
# if class is a subclass, this won't work, but we only have Person so far that Subclasses
# Authority, which doesn't have any many to many relationships
if field_to_change in element.__class__.__dict__ and isinstance(element.__class__.__dict__[field_to_change], models.fields.related_descriptors.ManyToManyDescriptor):
# all this is really ugly, but we have to store the old list for the
# administrator notes
old_value = element.__getattribute__(field_to_change).all()
old_value_list = list(old_value)
element.__getattribute__(field_to_change).add(new_value)
new_value = list(element.__getattribute__(field_to_change).all())
old_value = old_value_list
else:
old_value = getattr(element, field_to_change)
setattr(element, field_to_change, new_value)
# some fields need special handling
_specific_post_processing(element, field_to_change, new_value, old_value)
_add_change_note(element, task.id, field_in_csv, field_to_change, new_value, old_value, user_id, current_time_obj)
setattr(element, 'modified_by_id', user_id)
element.save()
results.append((SUCCESS, element_id, field_in_csv, 'Successfully updated', element.modified_on))
except Exception as e:
logger.error(e)
logger.exception(e)
results.append((ERROR, elem_type, element_id, 'Something went wrong. %s was not changed.'%(field_to_change), current_time))
# otherwise
else:
object, field_name = field_to_change.split('__')
try:
object_to_change = getattr(element, object)
object_to_update_timestamp = object_to_change
# if we have an attribute, we need to convert the value first
if type_class == Attribute:
object_to_change = object_to_change.get_child_class()
object_to_update_timestamp = element
if field_name in ['value', 'start', 'end']:
new_value = object_to_change.__class__.convert(new_value)
# this is a hack, but ahh well
if type(object_to_change) == PartDetails:
object_to_update_timestamp = element
# if there are choices make sure they are respected
is_valid = _is_value_valid(object_to_change, field_name, new_value)
if not is_valid:
results.append((ERROR, elem_type, element_id, '%s is not a valid value.'%(new_value), current_time))
else:
old_value = getattr(object_to_change, field_name)
if field_to_change == ADMIN_NOTES:
_add_to_administrator_notes(object_to_change, new_value, task.id, user_id, current_time_obj)
old_value = old_value[:10] + "..."
else:
setattr(object_to_change, field_name, new_value)
object_to_change.save()
_add_change_note(object_to_update_timestamp, task.id, field_in_csv, field_name, new_value, old_value, user_id, current_time_obj)
setattr(object_to_update_timestamp, 'modified_by_id', user_id)
object_to_update_timestamp.save()
results.append((SUCCESS, element_id, field_in_csv, 'Successfully updated', object_to_update_timestamp.modified_on))
except Exception as e:
logger.error(e)
logger.exception(e)
results.append((ERROR, type, element_id, 'Field %s cannot be changed. %s does not exist.'%(field_to_change, object), current_time))
else:
results.append((ERROR, elem_type, element_id, 'Field %s cannot be changed.'%(field_to_change), current_time))
current_count = _update_count(current_count, task)
except KeyError as e:
logger.exception("There was a column error processing the CSV file.")
results.append((ERROR, "column error", "", "There was a column error processing the CSV file. Have you provided the correct column headers? " + repr(e), current_time))
except Exception as e:
logger.error("There was an unexpected error processing the CSV file.")
logger.exception(e)
results.append((ERROR, "unexpected error", "", "There was an unexpected error processing the CSV file: " + repr(e), current_time))
_save_csv_file(error_path, result_file_headers, results)
task.state = 'SUCCESS'
task.save()
def _specific_post_processing(element, field_name, new_value, old_value):
# turn authority non-person into person
if type(element) == Authority and field_name == 'type_controlled':
if new_value == Authority.PERSON and old_value != Authority.PERSON:
try:
# is object already a person
element.person
except Person.DoesNotExist:
# if not make it one
person = Person(authority_ptr_id=element.pk)
person.__dict__.update(element.__dict__)
person.save()
if type(element) == Citation and field_name == 'type_controlled':
if new_value in [Citation.ARTICLE, Citation.BOOK, Citation.REVIEW, Citation.CHAPTER, Citation.THESIS]:
if not hasattr(element, 'part_details'):
element.part_details = PartDetails()
# to specify a find operation, fields need to be in format find:type:field:linking_field (e.g. find:Dataset:name:belongs_to_id)
def _find_value(field_to_change, new_value, element):
field_parts = field_to_change.split(":")
model = apps.get_model("isisdata." + field_parts[1])
filter_params = { field_parts[2]:new_value }
linked_element = model.objects.filter(**filter_params).first()
if len(field_parts) > 4:
if field_parts[4] == "multi":
old_value = getattr(element, field_parts[3])
# IEXP-232: looks like we can't just replace the old list, but have to add new element
# so we will not return a new list but just the element to add.
#linked_element = list(old_value.all()) + [linked_element]
return field_parts[3], linked_element
def _get_old_multi_value(field_to_change, element):
field_parts = field_to_change.split(":")
print(field_parts)
if len(field_parts) <= 4 or field_parts[4] != "multi":
return None
print(field_parts[3])
getattr(element, field_parts[3])
def _add_to_administrator_notes(element, value, task_nr, modified_by, modified_on):
note = getattr(element, ADMIN_NOTES)
if note:
note += '\n\n'
user = User.objects.get(pk=modified_by)
mod_time = modified_on.strftime("%m/%d/%y %r %Z")
note += "%s added the following in bulk change #%s on %s:"%(user.username, task_nr, mod_time)
note += '\n'
note += value
setattr(element, ADMIN_NOTES, note)
def _add_change_note(element, task_nr, field, field_name, value, old_value, modified_by, modified_on):
user = User.objects.get(pk=modified_by)
mod_time = modified_on.strftime("%m/%d/%y %r %Z")
note = getattr(element, RECORD_HISTORY) + '\n\n' if getattr(element, RECORD_HISTORY) else ''
note += 'This record was changed as part of bulk change #%s. "%s" was changed from "%s" to "%s" by %s on %s.'%(task_nr, field, old_value, value, user.username, mod_time)
setattr(element, RECORD_HISTORY, note)
element._history_user=user
def _is_value_valid(element, field_to_change, new_value):
if ":" in field_to_change:
return True
choices = element._meta.get_field(field_to_change).choices
if choices:
if new_value not in dict(choices):
return False
return True
def _update_count(current_count, task):
current_count += 1
task.current_value = current_count
task.save()
return current_count
def _count_rows(f, results):
# we want to avoid loading everything in memory, in case it's a large file
# we do not count the header, so we start at -1
row_count = -1
try:
for row in csv.DictReader(f):
row_count += 1
except Exception as e:
logger.error("There was an unexpected error processing the CSV file.")
logger.exception(e)
results.append(('ERROR', "unexpected error", "", "There was an unexpected error processing the CSV file: " + repr(e)))
# reset file cursor to first data line
f.seek(0)
return row_count
def _save_csv_file(path, headers, data):
with smart_open.open(path, 'w') as f:
writer = csv.writer(f)
writer.writerow(headers)
for line in data:
writer.writerow(line)
def _save_results(path, results, headings):
with smart_open.open(path, 'w') as f:
writer = csv.writer(f)
writer.writerow(headings)
for result in results:
writer.writerow(result)
| |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
impl.py
~~~~~~~
TAP file handling implementation.
* 'range' is a tuple of two numbers. 'plan' is a string.
They both represent TAP testcase numberings.
* 'actual' in identifiers refers to the absolute number of testcases
which must not correspond to the testcases specified by the plan::
1..50
ok 1 first
ok 25 second
Actual number of testcases is 2. Number of testcases is 50.
* '1..0' exceptionally represents '0 testcases'. In general
a negative range triggers a warning if lenient is set to
False (non-default).
(c) BSD 3-clause.
"""
from __future__ import division, absolute_import
from __future__ import print_function, unicode_literals
from .exc import TapParseError, TapBailout, TapMissingPlan, TapInvalidNumbering
import re
import os
import sys
import copy
import codecs
import logging
import yamlish
import collections
__all__ = ['YamlData', 'TapTestcase', 'TapNumbering', 'TapActualNumbering',
'TapDocument', 'TapDocumentValidator', 'TapDocumentIterator',
'TapDocumentActualIterator', 'TapDocumentFailedIterator',
'TapDocumentTokenizer', 'TapDocumentParser', 'TapProtocol',
'TapWrapper', 'merge']
STR_ENC = 'utf-8'
class YamlData(object):
"""YAML data storage"""
def __init__(self, data):
self.data = data
def __eq__(self, other):
return self.data == other
def __iter__(self):
return iter(self.data)
def __unicode__(self):
return yamlish.dumps(self.data)
class TapTestcase(object):
"""Object representation of an entry in a TAP file"""
is_testcase = True
is_bailout = False
def __init__(self, field=None, number=None, description=u''):
# test line
self._field = field
self._number = number
self.description = description
self._directives = {'skip': [], 'todo': []}
# data
self._data = []
@staticmethod
def indent(text, indent=2):
"""Indent all lines of ``text`` by ``indent`` spaces"""
return re.sub('(^|\n)(?!\n|$)', '\\1' + (' ' * indent), text)
@property
def field(self):
"""A TAP field specifying whether testcase succeeded"""
return self._field
@field.setter
def field(self, value):
errmsg = "field value must be 'ok' or 'not ok', not {!r}".format(value)
try:
if value in [None, True, False]:
self._field = value
elif value.rstrip() == 'ok':
self._field = True
elif value.rstrip() == 'not ok':
self._field = False
else:
raise ValueError(errmsg)
except AttributeError:
raise ValueError(errmsg)
@field.deleter
def field(self):
self._field = None
@property
def number(self):
"""A TAP testcase number"""
return self._number
@number.setter
def number(self, value):
if value is None:
self._number = value
return
try:
value = int(value)
except TypeError:
raise ValueError("Argument must be integer")
if value < 0:
raise ValueError("Testcase number must not be negative")
self._number = value
@number.deleter
def number(self):
self._number = None
@property
def directive(self):
"""A TAP directive like 'TODO work in progress'"""
out = u''
for skip_msg in self._directives['skip']:
out += u'SKIP {} '.format(skip_msg.strip())
for todo_msg in self._directives['todo']:
out += u'TODO {} '.format(todo_msg.strip())
return out and out[:-1] or u''
@directive.setter
def directive(self, value):
# reset
self._directives['skip'] = []
self._directives['todo'] = []
if not value:
return
delimiters = ['skip', 'todo']
value = value.lstrip('#\t ')
parts = re.split('(' + '|'.join(delimiters) + ')', value, flags=re.I)
parts = [p for p in parts if p]
if not parts or parts[0].lower() not in delimiters:
raise ValueError('Directive must start with SKIP or TODO')
key = None
key_just_set = False
for val in parts:
if val.lower() in delimiters:
key = val.lower()
if key_just_set:
self._directives[key] = u''
key_just_set = True
else:
if key is None:
msg = 'Directive must be sequence of TODOs and SKIPs'
raise ValueError(msg + ' but is {}'.format(value))
self._directives[key].append(val)
key_just_set = False
@directive.deleter
def directive(self):
self._directives = {}
@property
def data(self):
"""Annotated data (eg. a backtrace) to the testcase"""
return self._data
@data.setter
def data(self, value):
msg = "If you set data explicitly, it has to be a list"
assert hasattr(value, '__iter__'), msg
self._data = copy.deepcopy(value)
@data.deleter
def data(self):
self._data = []
@property
def todo(self):
"""Is a TODO flag annotated to this testcase?"""
return bool(self._directives['todo'])
@todo.setter
def todo(self, what):
"""Add a TODO flag to this testcase.
:param unicode what: Which work is still left?
"""
self._directives['todo'].append(what) if what else None
@property
def skip(self):
"""Is a SKIP flag annotated to this testcase?"""
return bool(self._directives['skip'])
@skip.setter
def skip(self, why):
"""Add a SKIP flag to this testcase.
:param unicode why: Why shall this testcase be skipped?
"""
self._directives['skip'].append(why) if why else None
def copy(self):
"""Return a copy of myself"""
tc = TapTestcase()
tc.__setstate__(self.__getstate__())
return tc
def __eq__(self, other):
"""Test equality"""
conds = [self.field == other.field, self.number == other.number,
self.description == other.description,
self.directive == other.directive, self.data == self.data]
# if one number is None and the other not, it's fine
is_none = [self.number is None, other.number is None]
if is_none.count(True) == 1:
conds[1] = True
return all(conds)
def __getstate__(self):
"""Return object state for external storage"""
return {
'field': self.field,
'number': self.number,
'description': self.description or u'',
'directives': self._directives,
'data': self.data
}
def __setstate__(self, obj):
"""Import data using the provided object"""
self.field = obj['field']
self.number = obj['number']
self.description = obj['description']
self._directives = obj['directives']
self.data = obj['data']
def __repr__(self):
"""Representation of this object"""
field = 'ok' if self.field else 'not ok'
num = '' if self.number is None else ' #{}'.format(self._number)
todo_skip = ''
if self.todo and self.skip:
todo_skip = ' with TODO and SKIP flag'
elif self.todo:
todo_skip = ' with TODO flag'
elif self.skip:
todo_skip = ' with SKIP flag'
return u'<TapTestcase {}{}{}>'.format(field, num, todo_skip)
def __unicode__(self):
"""TAP testcase representation as a unicode object"""
num, desc, directive = self.number, self.description, self.directive
out = u'ok ' if self.field else u'not ok '
if num is not None:
out += unicode(num) + u' '
if desc:
out += u'- {} '.format(desc)
if directive:
out += u' # {} '.format(directive)
out = out.rstrip()
if self.data:
data = [unicode(d) for d in self.data]
out += os.linesep + (os.linesep).join(data)
if out.endswith(os.linesep):
return out
else:
return out + os.linesep
def __str__(self):
return unicode(self).encode(STR_ENC)
class TapNumbering(object):
"""TAP testcase numbering. In TAP documents it is called 'the plan'."""
def __init__(self, first=None, last=None, tests=None, lenient=True):
"""Constructor. Provide `first` and `last` XOR a number of `tests`.
`first` and `last` are testcase numbers. Both inclusive.
If `lenient` is False, a decreasing range (except '1..0')
will raise a TapInvalidNumbering Exception.
Otherwise it will just be normalized (set `last` to `first`).
"""
arg_errmsg = 'Either provide a first and last or a number of tests'
if first and last and tests:
raise ValueError(arg_errmsg)
if first is not None and last is not None:
self.first = int(first)
self.length = int(last) - int(first) + 1
if int(last) == 0 and int(first) == 1:
self.length = 0
elif int(last) < int(first):
self.length = 0
if not lenient:
msg = 'range {}..{} is decreasing'.format(first, last)
msg = 'Invalid testcase numbering: ' + msg
raise TapInvalidNumbering(msg)
elif tests is not None:
self.first = 1
self.length = int(tests)
else:
raise ValueError(arg_errmsg)
assert(self.first >= 0 and self.length >= 0)
def __len__(self):
return self.length
def __nonzero__(self):
return True
def __contains__(self, tc_number):
"""Is `tc_number` within this TapNumbering range?"""
return self.first <= tc_number and tc_number < self.first + self.length
def enumeration(self):
"""Get enumeration for the actual tap plan."""
return list(range(self.first, self.first + self.length))
def inc(self):
"""Increase numbering for one new testcase"""
self.length += 1
def normalized_plan(self):
"""Return a normalized plan where first=1"""
return '{:d}..{:d}'.format(1, self.length)
def range(self):
"""Get range of this numbering: (min, max)"""
return (self.first, self.first + self.length - 1)
def __getstate__(self):
return {'first': self.first, 'length': self.length}
def __setstate__(self, state):
self.first = state['first']
self.length = state['length']
def __iter__(self):
return iter(range(self.first, self.first + self.length))
def __unicode__(self):
"""Return unicode representation of plan.
If it was initially a decreasing range, first=last now.
"""
return '{:d}..{:d}'.format(self.first, self.first + self.length - 1)
def __repr__(self):
return '<TapNumbering {}>'.format((self.first, self.length))
class TapActualNumbering(list):
"""TAP testcase numbering. Wrapper for a sequence of testcase numbers."""
pass
class TapDocument(object):
"""An object representing a TAP document. Also acts as context manager."""
DEFAULT_VERSION = 13
def __init__(self, version=DEFAULT_VERSION, skip=False):
# testcases and bailouts
self.entries = []
self.metadata = {
# version line
'version': version,
'version_written': False,
# comment lines before first testcase
'header_comment': [],
# TAP plan
'numbering': None,
'plan_at_beginning': True,
'skip': bool(skip),
'skip_comment': u''
}
def __nonzero__(self):
return True
@property
def version(self):
"""Get TAP version for this document"""
return self.metadata['version']
@property
def skip(self):
"""Was this document skipped in the test run?"""
return self.metadata['skip']
# set information
def set_version(self, version=DEFAULT_VERSION):
"""Set TAP version of this document"""
self.metadata['version'] = int(version)
def set_skip(self, skip_comment=u''):
"""Set skip annotation for this document"""
if skip_comment:
self.metadata['skip'] = True
self.metadata['skip_comment'] = skip_comment
else:
self.metadata['skip'] = False
def add_version_line(self, version=DEFAULT_VERSION):
"""Add information of version lines like 'TAP version 13'"""
self.set_version(version)
self.metadata['version_written'] = True
def add_header_line(self, line):
"""Add header comment line for TAP document"""
if line.count(os.linesep) > 1:
raise ValueError("Header line must only be 1 (!) line")
line = unicode(line).rstrip() + os.linesep
self.metadata['header_comment'] += [line]
def add_plan(self, first, last, skip_comment=u'', at_beginning=True):
"""Add information of a plan like '1..3 # SKIP wip'"""
self.metadata['plan_at_beginning'] = bool(at_beginning)
self.metadata['numbering'] = TapNumbering(first=first, last=last)
if skip_comment:
self.set_skip(skip_comment)
def add_testcase(self, tc):
"""Add a ``TapTestcase`` instance `tc` to this document"""
self.entries.append(tc.copy())
def add_bailout(self, bo):
"""Add a ``TapBailout`` instance `bo` to this document"""
self.entries.append(bo.copy())
# processing
@staticmethod
def create_plan(first, last, comment=u'', skip=False):
plan = u'{:d}..{:d}'.format(first, last)
if os.linesep in comment:
raise ValueError('Plan comment must not contain newline')
if skip:
if not comment.strip():
comment = ' # SKIP'
elif 'skip' not in comment.lower():
comment = ' # SKIP ' + comment
else:
comment = ' # ' + comment.strip()
else:
comment = ''
return plan + comment
# retrieve information
def __len__(self):
"""Return number of testcases in this document"""
if self.metadata['numbering']:
return len(self.metadata['numbering'])
return self.actual_length()
def actual_length(self):
"""Return actual number of testcases in this document"""
count = 0
for entry in self.entries:
if entry.is_testcase:
count += 1
return count
def range(self):
"""Get range like ``(1, 2)`` for this document"""
if not self.metadata['numbering']:
return (1, 0)
return self.metadata['numbering'].range()
def actual_range(self):
"""Get actual range"""
if not self.metadata['numbering'] or not self.entries:
return (1, 0)
validator = TapDocumentValidator(self)
enum = validator.enumeration()
return (min(enum), max(enum))
def plan(self, comment=u'', skip=False):
"""Get plan for this document"""
options = {'comment': self.metadata['skip_comment'],
'skip': self.metadata['skip']}
return self.create_plan(*self.range(), **options)
def actual_plan(self):
"""Get actual plan for this document"""
options = {'comment': self.metadata['skip_comment'],
'skip': self.metadata['skip']}
return self.create_plan(*self.actual_range(), **options)
def count_not_ok(self):
"""How many testcases which are 'not ok' are there?"""
count = 0
for entry in self.entries:
if entry.is_testcase and not entry.field:
count += 1
return count
def count_ok(self):
"""How many testcases which are 'ok' are there?"""
count = 0
for entry in self.entries:
if entry.is_testcase and entry.field:
count += 1
return count
def count_todo(self):
"""How many testcases are still 'todo'?"""
count = 0
for entry in self.entries:
if entry.is_testcase and entry.todo:
count += 1
return count
def count_skip(self):
"""How many testcases got skipped in this document?"""
count = 0
for entry in self.entries:
if entry.is_testcase and entry.skip:
count += 1
return count
def bailed(self):
"""Was a Bailout called at some point in time?"""
for entry in self.entries:
if entry.is_bailout:
return True
return False
def bailout_message(self):
"""Return the first bailout message of document or None"""
for entry in self.entries:
if entry.is_bailout:
return entry.message
return None
def valid(self):
"""Is this document valid?"""
validator = TapDocumentValidator(self)
return validator.valid()
def __contains__(self, num):
"""Does testcase exist in document?
It exists iff a testcase object with this number or number 'None'
exists as entry in doc which corresponds to this number.
"""
validator = TapDocumentValidator(self)
enum = validator.enumeration()
try:
if self.entries[enum.index(int(num))] is None:
return False
else:
return True
except (ValueError, IndexError):
return False
def __getitem__(self, num):
"""Return testcase with the given number.
- Requires validation and therefore plan beforehand
- Returns copy of testcase or None (if range specifies existence)
- Raises IndexError (if testcase does not exist at all)
:param num: Testcase number to look up
"""
try:
num = int(num)
except ValueError:
raise IndexError('Indexing requires testcase number')
validator = TapDocumentValidator(self)
enum = validator.enumeration()
try:
index = enum.index(num)
except ValueError:
doc_range = self.range()
if doc_range[0] <= num <= doc_range[1]:
return None
msg = "Testcase with number {} does not exist"
raise IndexError(msg.format(num))
nr = 0
for entry in self.entries:
if entry.is_testcase:
if nr == index:
e = copy.deepcopy(entry)
e.number = num
return e
nr += 1
def __iter__(self):
"""Get iterator for testcases"""
return TapDocumentIterator(self)
def __getstate__(self):
"""Return state of this object"""
state = copy.copy(self.metadata)
state['entries'] = [entry.__getstate__() for entry in self.entries]
if state['numbering']:
state['numbering'] = state['numbering'].__getstate__()
return state
def __setstate__(self, state):
"""Restore object's state from `state`"""
self.entries = []
self.metadata = {}
for key, value in state.iteritems():
if key == u'entries':
for entry in value:
tc = TapTestcase()
tc.__setstate__(entry)
self.entries.append(tc)
elif key == u'numbering':
if value is None:
self.metadata[key] = None
else:
self.metadata[key] = TapNumbering(tests=0)
self.metadata[key].__setstate__(value)
else:
self.metadata[key] = value
keys_exist = ['version', 'version_written', 'header_comment',
'numbering', 'skip', 'skip_comment']
for key in keys_exist:
if key not in self.metadata:
raise ValueError('Missing key {} in state'.format(key))
def copy(self):
"""Return a copy of this object"""
obj = TapDocument()
obj.__setstate__(self.__getstate__())
return obj
def __enter__(self):
"""Return context for this document"""
self.ctx = TapWrapper(self)
return self.ctx
def __exit__(self, exc_type, exc_value, tracebk):
"""Finalize context for this document"""
self.ctx.finalize()
def __str__(self):
"""String representation of TAP document"""
return unicode(self).encode(STR_ENC)
def __unicode__(self):
"""Unicode representation of TAP document"""
out = u''
# version line
if self.metadata['version_written'] or \
self.metadata['version'] != self.DEFAULT_VERSION:
out += u'TAP version {:d}'.format(self.metadata['version'])
out += os.linesep
# header comments
for comment in self.metadata['header_comment']:
out += unicode(comment)
# [possibly] plan
if self.metadata['plan_at_beginning']:
out += self.plan() + os.linesep
# testcases and bailouts
for entry in self.entries:
out += unicode(entry)
# [possibly] plan
out += self.plan() if not self.metadata['plan_at_beginning'] else u''
return out
class TapDocumentValidator(object):
"""TAP testcase numbering. In TAP documents it is called 'the plan'."""
def __init__(self, doc, lenient=True):
"""Constructor.
:param TapDocument doc: the TAP document to validate
"""
self.lenient = lenient
self.skip = doc.skip
self.bailed = doc.bailed()
if not doc.metadata['numbering']:
raise TapMissingPlan("Plan required before document validation")
# retrieve numbers and range
self.numbers = []
self.validity = True
for entry in doc.entries:
if entry.is_testcase:
self.numbers.append(entry.number)
if not entry.field and not entry.skip:
self.validity = False
self.range = doc.range()
# prepare enumeration
self.enum = None
def test_range_validity(self):
"""Is `range` valid for `numbers`?"""
# more testcases than allowed
length = self.range[1] - self.range[0] + 1
if length < len(self.numbers):
msg = "More testcases provided than allowed by plan"
raise TapInvalidNumbering(msg)
# Is some given number outside of range?
for nr in self.numbers:
if nr is not None:
if not (self.range[0] <= nr <= self.range[1]):
msg = "Testcase number {} is outside of plan {}..{}"
raise TapInvalidNumbering(msg.format(nr, *self.range))
## Is some given number used twice?
## Remark. Is tested by enumerate
#numbers = set()
#for index, nr in enumerate(self.numbers):
# if nr is not None:
# if nr in numbers:
# msg = "Testcase number {} used twice at indices {} and {}"
# first_index = self.numbers.index(nr)
# raise ValueError(msg.format(nr, index, first_index))
# numbers.add(nr)
@staticmethod
def enumerate(numbers, first=1, lenient=False):
"""Take a sequence of positive numbers and assign numbers,
where None is given::
>>> enumerate([1, 2, None, 4])
[1, 2, 3, 4]
>>> enumerate([None, None, 2])
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: Testcase number 2 was already used
>>> enumerate([None, None, 2], lenient=True)
[1, 3, 2]
Post conditions:
* Always the smallest possible integers are assigned (starting with `first`).
But if a high integer is given, this one is used instead.
* Returns a sequence of positive numbers or raises a ValueError.
"""
assigned = set()
fixed = set()
sequence = []
next_number = 1
reuse_errmsg = "Testcase number {} was already used"
def get_next_number(nr):
nr = first
while nr in assigned or nr in fixed:
nr += 1
return nr
for nr in numbers:
if nr is None:
next_number = get_next_number(next_number)
assigned.add(next_number)
sequence.append(next_number)
next_number += 1
else:
if nr in fixed:
raise ValueError(reuse_errmsg.format(nr))
elif nr in assigned:
if not lenient:
raise ValueError(reuse_errmsg.format(nr))
next_number = get_next_number(next_number)
# replace "nr" with "next_number" in assigned and sequence
assigned.remove(nr)
fixed.add(next_number)
sequence = [e == nr and next_number or e for e in sequence]
sequence.append(nr)
next_number += 1
else:
fixed.add(nr)
sequence.append(nr)
if nr > next_number:
next_number = nr + 1
return sequence
def all_exist(self):
"""Do all testcases in specified `range` exist?"""
self.enumeration()
try:
for i in range(self.range[0], self.range[1] + 1):
self.enum.index(i)
return True
except ValueError:
return False
def __nonzero__(self):
return self.valid()
def enumeration(self, lenient=True):
"""Get enumeration for given `self.numbers`. Enumeration is the list
of testcase numbers like `self.numbers` but with Nones eliminated.
Thus it maps all indices of testcase entries to testcase numbers.
:param bool lenient: Shall I fix simple errors myself?
"""
if not self.enum:
self.test_range_validity()
self.enum = self.enumerate(self.numbers, self.range[0], lenient)
return self.enum
def __iter__(self):
return iter(self.enumeration())
def __repr__(self):
return '<TapDocumentValidator {} {}{}>'.format(self.numbers,
self.range, self.enum and ' with enumeration' or '')
def sanity_check(self, lenient=True):
"""Raise any errors which indicate that this document is wrong.
This method performs a subset of checks of `valid`, but raises errors
with meaningful messages unlike `valid` which just returns False.
:param bool lenient: Shall I ignore more complex errors?
"""
self.test_range_validity()
self.enumerate(self.numbers, self.range[0], lenient)
def valid(self, lenient=True):
"""Is the given document valid, meaning that `numbers` and
`range` match?
"""
if self.bailed:
return False
elif self.skip:
return True
elif self.enum:
return self.validity and self.all_exist()
else:
try:
self.enumeration(lenient)
return self.validity and self.all_exist()
except ValueError:
return False
class TapDocumentIterator(object):
"""Iterator over enumerated testcase entries of TAP document.
Returns None for non-defined testcases.
Raises Bailouts per default.
"""
def __init__(self, doc, raise_bailout=True):
self.skip = doc.skip
self.entries = copy.deepcopy(doc.entries)
self.enum = TapDocumentValidator(doc).enumeration()
self.current, self.end = doc.range()
self.raise_bailout = raise_bailout
def __iter__(self):
return self
def lookup(self, num):
"""Return testcase for given number or None"""
try:
entries_index = self.enum.index(num)
except ValueError:
if self.raise_bailout:
entries_index = -1
else:
return None
i = 0
for entry in self.entries:
if entry.is_testcase:
if entries_index == i:
entry.number = num
return entry
i += 1
elif self.raise_bailout:
raise entry
if entries_index == -1:
return None
def next(self):
if self.skip:
raise StopIteration("Document gets skipped")
if self.current > self.end:
raise StopIteration("End of entries reached")
self.current += 1
return self.lookup(self.current - 1)
class TapDocumentActualIterator(object):
"""Iterator over actual *un*enumerated testcases. Raises Bailouts."""
def __init__(self, doc, raise_bailout=True):
self.skip = doc.skip
self.entries = copy.deepcopy(doc.entries)
self.current = 0
self.raise_bailout = raise_bailout
def __iter__(self):
return self
def next(self):
if self.skip:
raise StopIteration("Document gets skipped")
if self.current >= len(self.entries):
raise StopIteration("All entries iterated")
else:
entry = self.entries[self.current]
self.current += 1
if entry.is_testcase:
return entry
elif self.raise_bailout:
raise entry
class TapDocumentFailedIterator(object):
"""Iterate over all failed testcases; the ones that are 'not ok'.
Numbers stay 'None'. Ignores Bailouts.
"""
def __init__(self, doc):
self.current = 0
self.doc = doc
def __iter__(self):
return self
def next(self):
if self.doc.skip:
raise StopIteration("No entries available")
while True:
if self.current >= len(self.doc.entries):
raise StopIteration("All entries iterated")
else:
entry = self.doc.entries[self.current]
self.current += 1
if entry.is_testcase and not entry.field:
return copy.deepcopy(entry)
class TapDocumentTokenizer(object):
"""Lexer for TAP document."""
# just for documentation
TOKENS = set(['VERSION_LINE', 'DATA', 'PLAN', 'TESTCASE', 'BAILOUT',
'WARN_VERSION_LINE', 'WARN_PLAN', 'WARN_TESTCASE'])
# regexi to match lines
VERSION_REGEX = re.compile(r'TAP version (?P<version>\d+)\s*$', flags=re.I)
PLAN_REGEX = re.compile(
r'(?P<first>\d+)\.\.(?P<last>\d+)\s*'
r'(?P<comment>#.*?)?$'
)
TESTCASE_REGEX = re.compile((
r'(?P<field>(not )?ok)'
r'(\s+(?P<number>\d+))?'
r'(\s+(?P<description>[^\n]*?)'
r'(\s+#(?P<directive>(\s+(TODO|SKIP).*?)+?))?)?\s*$'),
flags=re.IGNORECASE
)
BAILOUT_REGEX = re.compile(
r'Bail out!(?P<comment>.*)',
flags=re.MULTILINE | re.IGNORECASE
)
# lookalike matches
VERSION_LOOKALIKE = 'tap version'
PLAN_LOOKALIKE = '1..'
TESTCASE_LOOKALIKE = ['not ok ', 'ok ']
def __init__(self):
self.pipeline = collections.deque()
self.last_indentation = 0
@classmethod
def strip_comment(cls, cmt):
if cmt is None:
return u''
return cmt.lstrip().lstrip('#-').lstrip().rstrip()
def parse_line(self, line):
"""Parse one line of a TAP file"""
match1 = self.VERSION_REGEX.match(line)
match2 = self.PLAN_REGEX.match(line)
match3 = self.TESTCASE_REGEX.match(line)
match4 = self.BAILOUT_REGEX.match(line)
add = lambda *x: self.pipeline.append(x)
if match1:
add('VERSION_LINE', int(match1.group('version')))
self.last_indentation = None
elif match2:
add('PLAN', (int(match2.group('first')), int(match2.group('last'))),
self.strip_comment(match2.group('comment')))
self.last_indentation = None
elif match3:
number = match3.group('number')
number = int(number) if number else None
add('TESTCASE', match3.group('field').lower() == 'ok',
number, self.strip_comment(match3.group('description')),
match3.group('directive'))
self.last_indentation = 0
elif match4:
add('BAILOUT', match4.group('comment').strip())
self.last_indentation = None
else:
sline = line.lower().strip()
lookalike = 'Line "{}" looks like a {}, but does not match syntax'
if sline.startswith(self.VERSION_LOOKALIKE):
add('WARN_VERSION_LINE', lookalike.format(sline, 'version line'))
elif sline.startswith(self.PLAN_LOOKALIKE):
add('WARN_PLAN', lookalike.format(sline, 'plan'))
elif sline.startswith(self.TESTCASE_LOOKALIKE[0]):
add('WARN_TESTCASE', lookalike.format(sline, 'test line'))
elif sline.startswith(self.TESTCASE_LOOKALIKE[1]):
add('WARN_TESTCASE', lookalike.format(sline, 'test line'))
add('DATA', line)
def from_file(self, filepath, encoding='utf-8'):
"""Read TAP file using `filepath` as source."""
with codecs.open(filepath, encoding=encoding) as fp:
for line in fp.readlines():
self.parse_line(line.rstrip('\n\r'))
def from_string(self, string):
"""Read TAP source code from the given `string`."""
for line in string.splitlines():
self.parse_line(line.rstrip('\n\r'))
def __iter__(self):
return self
def next(self):
try:
while True:
return self.pipeline.popleft()
except IndexError:
raise StopIteration("All tokens consumed.")
class TapDocumentParser(object):
"""Parser for TAP documents"""
def __init__(self, tokenizer, lenient=True, logger=None):
self.tokenizer = tokenizer
self.lenient_parsing = lenient
self.doc = None
if logger:
self.log = logger
else:
logging.basicConfig()
self.log = logging.getLogger(self.__class__.__name__)
@classmethod
def parse_data(cls, lines):
"""Give me some lines and I will parse it as data"""
data = []
yaml_mode = False
yaml_cache = u''
for line in lines:
if line.strip() == '---':
yaml_mode = True
elif line.strip() == '...':
data.append(YamlData(yamlish.load(yaml_cache)))
yaml_cache = u''
yaml_mode = False
else:
if yaml_mode:
yaml_cache += line + os.linesep
else:
line = line.rstrip('\r\n')
if len(data) > 0 and isinstance(data[-1], basestring):
data[-1] += line + os.linesep
else:
data.append(line + os.linesep)
return data
def warn(self, msg):
"""Raise a warning with text `msg`"""
if self.lenient_parsing:
self.log.warn(msg)
else:
raise TapParseError(msg)
def parse(self):
"""Parse the tokens provided by `self.tokenizer`."""
self.doc = TapDocument()
state = 0
plan_written = False
comment_cache = []
def flush_cache(comment_cache):
if comment_cache:
if self.doc.entries:
self.doc.entries[-1].data += self.parse_data(comment_cache)
else:
for line in self.parse_data(comment_cache):
self.doc.metadata['header_comment'] += [line]
comment_cache = []
return comment_cache
for tok in self.tokenizer:
if tok[0] == 'VERSION_LINE':
if state != 0:
msg = ("Unexpected version line. "
"Must only occur as first line.")
raise TapParseError(msg)
self.doc.add_version_line(tok[1])
state = 1
elif tok[0] == 'PLAN':
comment_cache = flush_cache(comment_cache)
if plan_written:
msg = "Plan must not occur twice in one document."
raise TapParseError(msg)
if tok[1][0] > tok[1][1] and not (tok[1] == (1, 0)):
self.warn("Plan defines a decreasing range.")
self.doc.add_plan(tok[1][0], tok[1][1], tok[2], state <= 1)
state = 2
plan_written = True
elif tok[0] == 'TESTCASE':
comment_cache = flush_cache(comment_cache)
tc = TapTestcase()
tc.field = tok[1]
tc.number = tok[2] if tok[2] else None
tc.description = tok[3] if tok[3] else None
tc.directive = tok[4] if tok[4] else None
self.doc.add_testcase(tc)
state = 2
elif tok[0] == 'BAILOUT':
comment_cache = flush_cache(comment_cache)
self.doc.add_bailout(TapBailout(tok[1]))
state = 2
elif tok[0] == 'DATA':
comment_cache.append(tok[1])
state = 2
elif tok[0] in ['WARN_VERSION_LINE', 'WARN_PLAN', 'WARN_TESTCASE']:
self.warn(tok[1])
state = 2
else:
raise ValueError("Unknown token: {}".format(tok))
comment_cache = flush_cache(comment_cache)
@property
def document(self):
if not self.doc:
self.parse()
return self.doc
class TapProtocol:
def __init__(self, version=TapDocument.DEFAULT_VERSION):
return NotImplemented
def plan(self, first, last, skip=u''):
raise NotImplementedError()
def testcase(self, ok, description=u'', skip=u'', todo=u''):
raise NotImplementedError()
def bailout(self, comment):
raise NotImplementedError()
def write(self, line):
raise NotImplementedError()
def finalize(self):
return NotImplemented
class TapWrapper(object, TapProtocol):
"""One of the nice TAP APIs. See ``api`` module for others.
Wraps a `TapDocument` and provides the nicer `TapProtocol` API.
All methods besides `write` and `get` return self;
thus allowing method chaining. `plan` can be called at any time
unlike the TAP file format specification defines.
"""
def __init__(self, doc=None, version=TapDocument.DEFAULT_VERSION):
"""Take a `doc` (or create a new one)"""
self.doc = doc or TapDocument(version)
self._plan = None
def plan(self, first=None, last=None, skip=u'', tests=None):
"""Define how many tests are run. Either provide `first` & `last`
or `tests` as integer attributes. `skip` is an optional message.
If set, the test run was skipped because of the reason given by `skip`.
"""
if self._plan is not None:
raise RuntimeError("Only one plan per document allowed")
err_msg = "Provide either first and last params or tests param"
if all([v is None for v in [first, last, tests]]):
raise ValueError(err_msg)
else:
if tests is not None:
first = 1
last = tests
elif first is not None and last is not None:
pass
else:
raise ValueError(err_msg)
self._plan = (first, last, skip)
return self
def write(self, line):
"""Add a comment `line` at the current position."""
if self.doc.entries:
self.doc.entries[-1].data += [line]
else:
self.doc.add_header_line(line)
return self
def testcase(self, ok=True, description=u'', skip=False, todo=False):
"""Add a testcase entry to the TapDocument"""
tc = TapTestcase()
tc.field = ok
tc.description = description
if skip:
tc.skip = skip
if todo:
tc.todo = todo
self.doc.add_testcase(tc)
return self
def ok(self, description=u'', skip=False, todo=False):
"""Add a succeeded testcase entry to the TapDocument"""
self.testcase(True, description, skip, todo)
return self
def not_ok(self, description=u'', skip=False, todo=False):
"""Add a failed testcase entry to the TapDocument"""
self.testcase(False, description, skip, todo)
return self
def unwrap(self):
"""Retrieve a copy of the current document"""
self.finalize()
return self.doc.copy()
def bailout(self, comment=u''):
"""Trigger a bailout"""
self.doc.add_bailout(comment)
return self
def out(self, stream=sys.stderr):
"""Write the document to stderr. Requires finalization."""
self.finalize()
print(unicode(self.doc), file=stream)
def finalize(self):
"""Finalize document. Just checks whether plan has been written.
Any operation afterwards besides `out` and `unwrap` is
undefined behavior.
"""
if not self._plan:
raise TapMissingPlan("Cannot finalize document. Plan required.")
self.doc.add_plan(first=self._plan[0], last=self._plan[1],
skip_comment=self._plan[2])
return self
def __str__(self):
return unicode(self.doc).encode(STR_ENC)
def __unicode__(self):
return unicode(self.doc)
def merge(*docs):
"""Merge TAP documents provided as argument.
Takes maximum TAP document version. Testcase numbers are
incremented by consecutive offsets based on the TAP plan.
"""
# this is a incredible complex algorithm, just sayin'
if not docs:
return None
doc = TapDocument()
doc.set_version(max([d.metadata['version'] for d in docs]))
for d in docs:
if d.metadata['header_comment']:
comments = [c for c in d.metadata['header_comment'] if c.strip()]
doc.metadata['header_comment'] += comments
# normalize ranges
ranges, offset = [], 1
minimum, maximum, count = float('inf'), 0, 0
for d in docs:
r = list(d.range())
r[1] = max(r[1], r[0] + len(d) - 1)
r = map(lambda x: x + offset - r[0], r)
offset = r[1] + 1
ranges.append(tuple(r))
for d_id, d in enumerate(docs):
# create copies and assign normalized numbers
numbers, count_assignments = [], 0
for entry in d.entries:
c = entry.copy()
if entry.is_testcase:
if c.number is not None:
c.number -= d.range()[0]
c.number += ranges[d_id][0]
numbers.append(c.number)
doc.entries.append(c)
count_assignments += 1
# use `enumerate` to compute assignments
enums = TapDocumentValidator.enumerate(numbers, first=ranges[d_id][0])
if enums:
minimum = min(minimum, min(enums))
maximum = max(maximum, max(enums))
# assign numbers
index = 0
for entry in doc.entries[-count_assignments or len(doc.entries):]:
if not entry.is_testcase:
continue
number = enums[index]
entry.number = number
minimum, maximum = min(minimum, number), max(maximum, number)
index += 1
count += 1
skip_comments = []
for d in docs:
if d.metadata['skip'] and d.metadata['skip_comment']:
skip_comments.append(d.metadata['skip_comment'])
pab = any([d.metadata['plan_at_beginning'] for d in docs])
if count == 0:
minimum, maximum = 1, 0
elif minimum == float('inf'):
minimum, maximum = 1, count
else:
maximum = max(maximum, minimum + count - 1)
doc.add_plan(minimum, maximum, '; '.join(skip_comments), pab)
return doc
| |
# Wormy
# By Elvis Sun
'''
{'x': startx + 3, 'y': starty},
{'x': startx + 2, 'y': starty},
{'x': startx + 1, 'y': starty},
{'x': startx, 'y': starty},
{'x': startx, 'y': starty + 1},
{'x': startx, 'y': starty + 2},
{'x': startx, 'y': starty + 3},
{'x': startx, 'y': starty + 4},
{'x': startx, 'y': starty + 5},
{'x': startx - 1, 'y': starty + 5},
{'x': startx - 2, 'y': starty + 5},
{'x': startx - 3, 'y': starty + 5},
{'x': startx - 4, 'y': starty + 5},
{'x': startx - 5, 'y': starty + 5},
{'x': startx - 5, 'y': starty + 6},
{'x': startx - 5, 'y': starty + 7},
{'x': startx - 5, 'y': starty + 8},
{'x': startx - 5, 'y': starty + 9},
{'x': startx - 5, 'y': starty + 10},
{'x': startx - 5, 'y': starty + 11},
{'x': startx - 5, 'y': starty + 12},
{'x': startx - 5, 'y': starty + 13},
{'x': startx - 5, 'y': starty + 14},
{'x': startx - 5, 'y': starty + 15},
{'x': startx - 5, 'y': starty + 16},
{'x': startx - 5, 'y': starty + 17},
{'x': startx - 5, 'y': starty + 18},
{'x': startx - 5, 'y': starty + 19},
{'x': startx - 5, 'y': starty + 20},
{'x': startx - 4, 'y': starty + 20},
{'x': startx - 3, 'y': starty + 20},
{'x': startx - 2, 'y': starty + 20},
{'x': startx - 1, 'y': starty + 20},
{'x': startx - 0, 'y': starty + 20},
{'x': startx + 1, 'y': starty + 20},
{'x': startx + 2, 'y': starty + 20},
{'x': startx + 3, 'y': starty + 20},
{'x': startx + 4, 'y': starty + 20},
{'x': startx + 5, 'y': starty + 20},
{'x': startx + 6, 'y': starty + 20},
{'x': startx + 7, 'y': starty + 20},
{'x': startx + 8, 'y': starty + 20},
{'x': startx + 9, 'y': starty + 20},
{'x': startx + 10, 'y': starty + 20},
{'x': startx + 11, 'y': starty + 20},
{'x': startx + 12, 'y': starty + 20},
{'x': startx + 13, 'y': starty + 20},
{'x': startx + 14, 'y': starty + 20},
{'x': startx + 15, 'y': starty + 20},
{'x': startx + 16, 'y': starty + 20},
{'x': startx + 17, 'y': starty + 20},
{'x': startx + 18, 'y': starty + 20},
{'x': startx + 19, 'y': starty + 20},
{'x': startx + 19, 'y': starty + 19},
{'x': startx + 18, 'y': starty + 19},
{'x': startx + 17, 'y': starty + 19},
{'x': startx + 16, 'y': starty + 19},
{'x': startx + 15, 'y': starty + 19},
{'x': startx + 14, 'y': starty + 19},
{'x': startx + 13, 'y': starty + 19},
{'x': startx + 12, 'y': starty + 19},
{'x': startx + 11, 'y': starty + 19},
{'x': startx + 10, 'y': starty + 19},
{'x': startx + 10, 'y': starty + 18},
{'x': startx + 11, 'y': starty + 18},
{'x': startx + 12, 'y': starty + 18},
{'x': startx + 13, 'y': starty + 18},
{'x': startx + 14, 'y': starty + 18},
{'x': startx + 15, 'y': starty + 18},
{'x': startx + 16, 'y': starty + 18},
{'x': startx + 17, 'y': starty + 18},
{'x': startx + 18, 'y': starty + 18},
{'x': startx + 19, 'y': starty + 18},
{'x': startx + 19, 'y': starty + 17},
{'x': startx + 18, 'y': starty + 17},
{'x': startx + 17, 'y': starty + 17},
{'x': startx + 16, 'y': starty + 17},
{'x': startx + 15, 'y': starty + 17},
{'x': startx + 14, 'y': starty + 17},
{'x': startx + 13, 'y': starty + 17},
{'x': startx + 12, 'y': starty + 17},
{'x': startx + 11, 'y': starty + 17},
{'x': startx + 10, 'y': starty + 17}
'''
import random, pygame, sys, operator, os, time
from pygame.locals import *
FPS = 60
WINDOWWIDTH = 1700
WINDOWHEIGHT = 1000
CELLSIZE = 50
assert WINDOWWIDTH % CELLSIZE == 0, "Window width must be a multiple of cell size."
assert WINDOWHEIGHT % CELLSIZE == 0, "Window height must be a multiple of cell size."
CELLWIDTH = int(WINDOWWIDTH / CELLSIZE)
CELLHEIGHT = int(WINDOWHEIGHT / CELLSIZE)
# R G B
WHITE = (255, 255, 255)
GREY = (200, 200, 200)
PINK = (198, 134, 156)
BLACK = ( 17, 18, 13)
RED = (255, 0, 0)
GREEN = ( 0, 255, 0)
DARKGREEN = ( 0, 155, 0)
DARKGRAY = ( 40, 40, 40)
ORANGE = (255, 155, 111)
BGCOLOR = BLACK
UP = 'up'
DOWN = 'down'
LEFT = 'left'
RIGHT = 'right'
HEAD = 0 # syntactic sugar: index of the worm's head
def main():
global FPSCLOCK, DISPLAYSURF, BASICFONT
global wallCoords,softWallCoords
wallCoords = []
softWallCoords = []
softWallCoords = findSoftWall()
wallCoords = findWall()
pygame.init()
FPSCLOCK = pygame.time.Clock()
DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))
BASICFONT = pygame.font.Font('freesansbold.ttf', 18)
pygame.display.set_caption('Wormy')
# OUTPUT PRINT TO FILE
#os.remove("test.txt")
#sys.stdout=open("test.txt","w")
while True:
runGame()
showGameOverScreen()
def runGame():
global stalling
stalling = False
stallingCount = -1
# Set a random start point.
#startx = random.randint(5, CELLWIDTH - 6)
#starty = random.randint(5, CELLHEIGHT - 6)
startx = 5
starty = 0
wormCoords = [{'x': startx + 6, 'y': starty},
{'x': startx + 5, 'y': starty},
{'x': startx + 4, 'y': starty},
]
direction = RIGHT
directionList = [RIGHT]
PATH = []
# Start the apple in a random place.
#apple = getRandomLocation(wormCoords)
apple = {'x': startx+8, 'y': starty}
#apple = {'x': startx-1, 'y': starty-1}
lastApple = {'x':startx-1, 'y': starty -1}
PATH = calculatePath(wormCoords,apple,True)
directionList = calcDirection(PATH)
lastWall = 0
while True: # main game loop
for event in pygame.event.get(): # event handling loop
if event.type == QUIT:
terminate()
elif event.type == KEYDOWN:
if event.key == K_ESCAPE:
terminate()
# check if the worm has hit itself or the edge
if wormCoords[HEAD]['x'] == -1 or wormCoords[HEAD]['x'] == CELLWIDTH or wormCoords[HEAD]['y'] == -1 or wormCoords[HEAD]['y'] == CELLHEIGHT:
terminate()
return # game over
for wormBody in wormCoords[1:]:
if wormBody['x'] == wormCoords[HEAD]['x'] and wormBody['y'] == wormCoords[HEAD]['y']:
terminate()
return # game over
# check if worm has eaten an apply
if wormCoords[HEAD]['x'] == apple['x'] and wormCoords[HEAD]['y'] == apple['y']:
# don't remove worm's tail segment
lastApple = apple
apple = getRandomLocation(wormCoords) # set a new apple somewhere
drawApple(apple,lastApple) #for testing purposes
#drawApple(lastApple)
#sectionBreak() #just print some crap
PATH = calculatePath(wormCoords,apple,True) #calculate path to go
if not PATH:
stalling = True
stallingCount = 10000
elif PATH == 'stall':
stalling = True
stallingCount = int(len(wormCoords)/2)
else:
directionList = calcDirection(PATH)
else:
del wormCoords[-1] # remove worm's tail segment
lastDirection = direction
'''finding next direction'''
#if stalling and len(directionList) == 1 and wormCoords[0] in softWallCoords:
#print('special case')
#directionList.extend(findBetterDirection(wormCoords,direction,0))
if stalling and not directionList:
onlyDirection = calcOnlyDirection(wormCoords)
if onlyDirection and onlyDirection == lastDirection:
directionList.append(onlyDirection)
print('only direction:', direction)
else:
if safeToGo(wormCoords,direction,lastWall):
#print('safe')
directionList.append(direction) #continue the previous direction
elif (not findNewHead(direction,wormCoords) in wormCoords) or (findNewHead(direction,wormCoords) in wallCoords):
directionList.append(direction)
else:
lastDirection = direction
#check if path can be found, if yes override previous calcualtion
PATH = calculatePath(wormCoords,apple,False)
if PATH != [] and PATH != 'stall':
stalling = False
stallingCount = -1
directionList = calcDirection(PATH)
else:
if checkLastWall(wormCoords):
lastWall = checkLastWall(wormCoords)
directionList.extend(findBetterDirection(wormCoords,direction,lastWall))
if calcArea(findNewHead(directionList[0],wormCoords), wormCoords, lastWall)<3:
directionList = [lastDirection]
#print(directionList)
stallingCount = stallingCount - 1
#print ('stalling Count:', stallingCount)
if stallingCount < 1:
#print('stalling Count',stallingCount)
stalling = False
prevLastWall = lastWall
lastWall = 0
directionList.append(lastDirection)
PATH = calculatePath(wormCoords,apple,True) #calculate path to go
if not PATH:
stalling = True
stallingCount = 10000
lastWall = prevLastWall
elif PATH == 'stall':
stalling = True
stallingCount = int(len(wormCoords)/2)
lastWall = prevLastWall
else:
directionList = calcDirection(PATH)
nextHead = findNewHead(directionList[0],wormCoords)
'''
if nextHead in wormCoords or nextHead in wallCoords or nextHead in getNextWallCoords(lastWall): #if gonig to die go into tunnel
lastWall = 0
directionList = findNextDirection(wormCoords, directionList[0],0)
print('going into tunnel')
'''
if stalling:
if AreaIsTooSmall(CELLWIDTH,nextHead, wormCoords, lastWall): #return true if the area going in is too small
lastWall = 0
directionList = findNextDirection(wormCoords, directionList[0],0)
print('almost died, recalcualting...',wormCoords[0],directionList)
direction = directionList.pop(0)
newHead = findNewHead(direction, wormCoords)
wormCoords.insert(0, newHead)
DISPLAYSURF.fill(BGCOLOR)
drawGrid()
drawWorm(wormCoords)
drawApple(apple,lastApple)
drawScore(len(wormCoords) - 3)
pygame.display.update()
FPSCLOCK.tick(FPS)
def calcOnlyDirection(worm):
count = 4
ways = getNeighborhood(worm[0])
theWay = 0
for each in ways:
if each in worm:
count = count - 1
else:
theWay = each
if count == 1:
return calcDirection([worm[0],theWay])
else:
return 0
def getNextWallCoords(lastWall):
walls = []
#append LEFT RIGHT walls
loopcount = 0
for _ in range(CELLHEIGHT):
if lastWall == RIGHT:
walls.append({'x':0, 'y':loopcount})
if lastWall == LEFT:
walls.append({'x':CELLWIDTH-1, 'y':loopcount})
loopcount = loopcount + 1
#append TOP BOTTOM walls
loopcount = 0
for _ in range(CELLWIDTH):
if lastWall == DOWN:
walls.append({'x':loopcount, 'y':0})
if lastWall == UP:
walls.append({'x':loopcount, 'y':CELLHEIGHT-1})
loopcount = loopcount + 1
return walls
def safeToGo(worm,direction,lastWall):
listOfNo = wallCoords + worm
listOfNo.extend(getNextWallCoords(lastWall))
head = worm[0]
forward = worm[0]
forwardLeft = worm[0]
forwardRight = worm[0]
left = worm[0]
right = worm[0]
if direction == UP:
newHead = {'x': worm[HEAD]['x'], 'y': worm[HEAD]['y'] - 1}
forward = {'x': worm[HEAD]['x'], 'y': worm[HEAD]['y'] - 2}
forwardLeft = {'x': worm[HEAD]['x']-1, 'y': worm[HEAD]['y'] - 1}
forwardRight = {'x': worm[HEAD]['x']+1, 'y': worm[HEAD]['y'] - 1}
left = {'x': worm[HEAD]['x']-1, 'y': worm[HEAD]['y']}
right = {'x': worm[HEAD]['x']+1, 'y': worm[HEAD]['y']}
elif direction == DOWN:
newHead = {'x': worm[HEAD]['x'], 'y': worm[HEAD]['y'] + 1}
forward = {'x': worm[HEAD]['x'], 'y': worm[HEAD]['y'] + 2}
forwardLeft = {'x': worm[HEAD]['x']-1, 'y': worm[HEAD]['y'] + 1}
forwardRight = {'x': worm[HEAD]['x']+1, 'y': worm[HEAD]['y'] + 1}
left = {'x': worm[HEAD]['x']-1, 'y': worm[HEAD]['y']}
right = {'x': worm[HEAD]['x']+1, 'y': worm[HEAD]['y']}
elif direction == LEFT:
newHead = {'x': worm[HEAD]['x'] - 1, 'y': worm[HEAD]['y']}
forward = {'x': worm[HEAD]['x'] - 2, 'y': worm[HEAD]['y']}
forwardLeft = {'x': worm[HEAD]['x']-1, 'y': worm[HEAD]['y'] + 1}
forwardRight = {'x': worm[HEAD]['x']-1, 'y': worm[HEAD]['y'] - 1}
left = {'x': worm[HEAD]['x'], 'y': worm[HEAD]['y']+1}
right = {'x': worm[HEAD]['x'], 'y': worm[HEAD]['y']-1}
elif direction == RIGHT:
newHead = {'x': worm[HEAD]['x'] + 1, 'y': worm[HEAD]['y']}
forward = {'x': worm[HEAD]['x'] + 2, 'y': worm[HEAD]['y']}
forwardLeft = {'x': worm[HEAD]['x']+1, 'y': worm[HEAD]['y'] - 1}
forwardRight = {'x': worm[HEAD]['x']+1, 'y': worm[HEAD]['y'] + 1}
left = {'x': worm[HEAD]['x'], 'y': worm[HEAD]['y']-1}
right = {'x': worm[HEAD]['x'], 'y': worm[HEAD]['y']+1}
#print ('newhead',newHead,'no go:',listOfNo)
if (forwardLeft in listOfNo and not left in listOfNo) or (forwardRight in listOfNo and not right in listOfNo):
#print ('forwardleft left detected',forwardLeft,left,'right:',forwardRight,right)
return False
if newHead in listOfNo:
return False
waysToGo = []
waysToGo = getNeighborhood(newHead)
count = len(waysToGo)
for each in waysToGo:
if each in listOfNo:
count = count - 1
#print (waysToGo,count)
if count < 1:
return False
elif count < 2 and not (forward in listOfNo):
return False
else:
return True
def checkLastWall(worm):
x = worm[0]['x']
y = worm[0]['y']
if x == 0:
return LEFT
elif x == CELLWIDTH - 1:
return RIGHT
elif y == 0:
return UP
elif y == CELLHEIGHT -1:
return DOWN
else:
return 0
def checkSmartTurn(worm,listOfNo,direction1,direction2):
if direction1 == UP or direction1 == DOWN:
if direction2 == RIGHT:
if {'x': worm[HEAD]['x']+3, 'y': worm[HEAD]['y']} in listOfNo and (not {'x': worm[HEAD]['x']+2, 'y': worm[HEAD]['y']} in listOfNo):
return True
else:
return False
if direction2 == LEFT:
if {'x': worm[HEAD]['x']-3, 'y': worm[HEAD]['y']} in listOfNo and (not {'x': worm[HEAD]['x']-2, 'y': worm[HEAD]['y']} in listOfNo):
return True
else:
return False
if direction1 == LEFT or direction1 == RIGHT:
if direction2 == UP:
if {'x': worm[HEAD]['x'], 'y': worm[HEAD]['y']-3} in listOfNo and (not {'x': worm[HEAD]['x'], 'y': worm[HEAD]['y']-2} in listOfNo):
return True
else:
return False
if direction2 == DOWN:
if {'x': worm[HEAD]['x'], 'y': worm[HEAD]['y']+3} in listOfNo and (not {'x': worm[HEAD]['x'], 'y': worm[HEAD]['y']+2} in listOfNo):
return True
else:
return False
def findBetterDirection(worm, direction,lastWall):
listOfNo = list(worm)
smartTurn = False #dont kill yourself in the corner
if direction == UP:
areaLeft = calcArea({'x': worm[HEAD]['x']-1, 'y': worm[HEAD]['y']},worm,lastWall)
areaRight = calcArea({'x': worm[HEAD]['x']+1, 'y': worm[HEAD]['y']},worm,lastWall)
if areaLeft == 0 and areaRight == 0:
return [direction]
areaStraight = calcArea({'x': worm[HEAD]['x'], 'y': worm[HEAD]['y']-1},worm,lastWall)
maxArea = max(areaLeft,areaRight,areaStraight)
print ('Options:', 'left:',areaLeft,'right:',areaRight,'straight:',areaStraight)
if maxArea == areaStraight:
return [direction]
elif maxArea == areaLeft:
if checkSmartTurn(worm,listOfNo,direction,LEFT):
print('Smart Turn Enabled')
return [LEFT, LEFT]
else:
return [LEFT, DOWN]
else:
if checkSmartTurn(worm,listOfNo,direction,RIGHT):
print('Smart Turn Enabled')
return [RIGHT, RIGHT]
else:
return [RIGHT,DOWN]
if direction == DOWN:
areaLeft = calcArea({'x': worm[HEAD]['x']-1, 'y': worm[HEAD]['y']},worm,lastWall)
areaRight = calcArea({'x': worm[HEAD]['x']+1, 'y': worm[HEAD]['y']},worm,lastWall)
if areaLeft == 0 and areaRight == 0:
return [direction]
areaStraight = calcArea({'x': worm[HEAD]['x'], 'y': worm[HEAD]['y']+1},worm,lastWall)
maxArea = max(areaLeft,areaRight,areaStraight)
print ('Options:','left:',areaLeft,'right:',areaRight,'straight:',areaStraight)
if maxArea == areaStraight:
return [direction]
elif areaLeft == maxArea:
if checkSmartTurn(worm,listOfNo,direction,LEFT):
print('Smart Turn Enabled')
return [LEFT, LEFT]
else:
return [LEFT, UP]
else:
if checkSmartTurn(worm,listOfNo,direction,RIGHT):
print('Smart Turn Enabled')
return [RIGHT, RIGHT]
else:
return [RIGHT,UP]
elif direction == LEFT:
areaUp = calcArea({'x': worm[HEAD]['x'], 'y': worm[HEAD]['y'] - 1},worm,lastWall)
areaDown = calcArea({'x': worm[HEAD]['x'], 'y': worm[HEAD]['y'] + 1},worm,lastWall)
if areaUp == 0 and areaDown == 0:
return [direction]
areaStraight = calcArea({'x': worm[HEAD]['x']-1, 'y': worm[HEAD]['y']},worm,lastWall)
maxArea = max(areaStraight,areaUp,areaDown)
print ('Options:','up:',areaUp,'down:',areaDown,'straight:',areaStraight)
if maxArea == areaStraight:
return [direction]
elif maxArea == areaUp:
if checkSmartTurn(worm,listOfNo,direction,UP):
print('Smart Turn Enabled')
return [UP, UP]
else:
return [UP,RIGHT]
else:
if checkSmartTurn(worm,listOfNo,direction,DOWN):
print('Smart Turn Enabled')
return [DOWN, DOWN]
else:
return [DOWN,RIGHT]
elif direction == RIGHT:
areaUp = calcArea({'x': worm[HEAD]['x'], 'y': worm[HEAD]['y'] - 1},worm,lastWall)
areaDown = calcArea({'x': worm[HEAD]['x'], 'y': worm[HEAD]['y'] + 1},worm,lastWall)
if areaUp == 0 and areaDown == 0:
return [direction]
areaStraight = calcArea({'x': worm[HEAD]['x']+1, 'y': worm[HEAD]['y']},worm,lastWall)
maxArea = max(areaStraight,areaUp,areaDown)
print ('Options:','up:',areaUp,'down:',areaDown,'straight:',areaStraight)
if maxArea == areaStraight:
return [direction]
elif areaUp ==maxArea:
if checkSmartTurn(worm,listOfNo,direction,UP):
print('Smart Turn Enabled')
return [UP, UP]
else:
return [UP,LEFT]
else:
if checkSmartTurn(worm,listOfNo,direction,DOWN):
print('Smart Turn Enabled')
return [DOWN, DOWN]
else:
return [DOWN,LEFT]
def findNextDirection(worm, direction,lastWall):
listOfNo = list(worm)
areaLeft = calcArea({'x': worm[HEAD]['x']-1, 'y': worm[HEAD]['y']},worm,lastWall)
areaRight = calcArea({'x': worm[HEAD]['x']+1, 'y': worm[HEAD]['y']},worm,lastWall)
areaUp = calcArea({'x': worm[HEAD]['x'], 'y': worm[HEAD]['y'] - 1},worm,lastWall)
areaDown = calcArea({'x': worm[HEAD]['x'], 'y': worm[HEAD]['y'] + 1},worm,lastWall)
maxArea = max(areaLeft,areaRight,areaUp,areaDown)
if maxArea == areaUp:
return [UP]
elif maxArea == areaDown:
return [DOWN]
elif maxArea == areaLeft:
return [LEFT]
else:
return [RIGHT]
def calcArea(point, worm, lastWall):
nextWall = getNextWallCoords(lastWall)
if point in worm or point in wallCoords or point in nextWall:
return 0
tailBonus = 0
q = []
searchPoints = []
searchPoints.append(point)
while (searchPoints):
i = searchPoints.pop()
for each in getNeighborhood(i):
if not each in q:
if not (each in worm or each in wallCoords or point in nextWall):
searchPoints.append(each)
if each == worm[-1]:
tailBonus = 200
q.append(i)
return len(q)+tailBonus
def AreaIsTooSmall(bound,point, worm, lastWall):
nextWall = getNextWallCoords(lastWall)
if point in worm or point in wallCoords or point in nextWall:
return True
tailBonus = 0
q = []
searchPoints = []
searchPoints.append(point)
while (searchPoints):
i = searchPoints.pop()
for each in getNeighborhood(i):
if not each in q:
if not (each in worm or each in wallCoords or point in nextWall):
searchPoints.append(each)
if each == worm[-1]:
tailBonus = 200
q.append(i)
if (len(q) + tailBonus) > bound:
return False
return True
def calcCost(point,worm):
print ('calculating cost of point', point)
neibors = getNeighborhood(point)
for each in neibors:
if each in worm[1:]:
return worm.index(each)
return 999
def calcDirection(path):
'''Converting point-path to step by step direction'''
lastPoint = path[0]
directions = []
nextDirection = ''
for currentPoint in path:
if (currentPoint['x'] > lastPoint['x']):
nextDirection = RIGHT
elif (currentPoint['x'] < lastPoint['x']):
nextDirection = LEFT
else:
if (currentPoint['y'] > lastPoint['y']):
nextDirection = DOWN
elif (currentPoint['y'] < lastPoint['y']):
nextDirection = UP
else:
#print ('Apple Found...')
continue
#print ('Last Point:', lastPoint, 'currentPoint:', currentPoint, ' --> ', nextDirection)
lastPoint = currentPoint
directions.append(nextDirection)
#print (directions)
return directions
def calculatePath(worm,apple,softCalculation):
oldWorm = list(worm)
#print(newWorm)
path = mainCalculation(worm,apple,softCalculation)
if not path:
return []
else:
pathCopy = list(path)
pathCopy.reverse()
newWorm = pathCopy + oldWorm
pathOut = mainCalculation(newWorm,newWorm[-1],False)
if not pathOut:
print('No path out, dont go for apple')
return 'stall'
else:
return path
def mainCalculation(worm,apple,softCalculation):
pointsToPath= []
discoverEdge = []
newPoints = []
exhaustedPoints = []
numberOfPoints = 1 #if all point tested go back one point
findingPath = True #false
listOfNo = getListOfNo(worm)
softListOfNo = getSoftListOfNo(worm)
softListOfNo.extend(softWallCoords)
discoverEdge.append(worm[0])
exhaustedPoints.append(worm[0])
lastPoint = discoverEdge[-1]
pointsToPath.append(lastPoint)
if (apple in softWallCoords) or (apple in softListOfNo):
softCalculation = False
#calculate avialable path
while(findingPath and softCalculation):
lastPoint = discoverEdge[-1]
newPoints = getNeighborhood(lastPoint)
newPoints = sorted(newPoints, key = lambda k: calcDistance(k,apple), reverse = True) #sort newPoints
numberOfPoints = len(newPoints)
for point in newPoints:
if point in softListOfNo:
#print ('No Go Point:', point)
numberOfPoints = numberOfPoints -1
elif point in exhaustedPoints:
#print ('considered already:', point)
numberOfPoints = numberOfPoints -1
else:
discoverEdge.append(point) #new points --> discoverEdge, closest one last in
pointsToPath.append(lastPoint)
exhaustedPoints.append(lastPoint)
#print (point)
#exhaustedPoints.append(point)
if numberOfPoints == 0:
#backtrack
exhaustedPoints.append(discoverEdge.pop())
exhaustedPoints.append(pointsToPath.pop())
if apple in discoverEdge:
findingPath = 0
if not discoverEdge:
softCalculation = False
break
#print ('softCalculation: ', softCalculation)
if not softCalculation:
pointsToPath= []
discoverEdge = []
newPoints = []
exhaustedPoints = []
numberOfPoints = 1 #if all point tested go back one point
findingPath = True #false
listOfNo = getListOfNo(worm)
discoverEdge.append(worm[0])
exhaustedPoints.append(worm[0])
lastPoint = discoverEdge[-1]
pointsToPath.append(lastPoint)
#calculate avialable path
while(findingPath):
lastPoint = discoverEdge[-1]
newPoints = getNeighborhood(lastPoint)
newPoints = sorted(newPoints, key = lambda k: calcDistance(k,apple), reverse = True) #sort newPoints
numberOfPoints = len(newPoints)
for point in newPoints:
if point in listOfNo:
#print ('No Go Point:', point)
numberOfPoints = numberOfPoints -1
elif point in exhaustedPoints:
#print ('considered already:', point)
numberOfPoints = numberOfPoints -1
else:
discoverEdge.append(point) #new points --> discoverEdge, closest one last in
pointsToPath.append(lastPoint)
exhaustedPoints.append(lastPoint)
#print (point)
#exhaustedPoints.append(point)
if numberOfPoints == 0:
#backtrack
exhaustedPoints.append(discoverEdge.pop())
exhaustedPoints.append(pointsToPath.pop())
if apple in discoverEdge:
findingPath = 0
if not discoverEdge:
#print ('stalling...') #should start stalling since no path found
return []
'''
#Debugging................
#Draw path found
DISPLAYSURF.fill(BGCOLOR)
drawGrid()
drawWorm(worm)
#drawEdgeOfDiscovery(discoverEdge)
drawEdgeOfDiscovery(pointsToPath)
drawEdgeOfDiscovery(listOfNo)
drawApple(apple)
pygame.display.update()
pauseGame()
print ('points to path')
print (pointsToPath)
'''
##WHEN DISCOVER EDGE IS EMPTY, TRY FIND TAIL
pointsToPath.append(apple) #adding in the last point
return pointsToPath
def getNeighborhood(point): ### NOT NEGATIVE
neighborhood = []
if point['x'] < CELLWIDTH:
neighborhood.append({'x':point['x']+1,'y':point['y']})
if point['x'] > 0:
neighborhood.append({'x':point['x']-1,'y':point['y']})
if point['y'] < CELLHEIGHT:
neighborhood.append({'x':point['x'],'y':point['y']+1})
if point['y'] >0:
neighborhood.append({'x':point['x'],'y':point['y']-1})
return neighborhood
def calcDistance(point, apple):
distance = abs(point['x'] - apple['x']) + abs(point['y'] - apple['y'])
return distance
def getSoftListOfNo(worm):
listOfNo = []
listOfNo.extend(getWormSurroundings(worm))
#listOfNo.extend(softWallCoords)
#remove duplicates
return listOfNo
def getWormSurroundings(worm):
listOfNo = []
headx = worm[0]['x']
heady = worm[0]['y']
count = 0
for each in worm:
if count == 0:
listOfNo.append(each)
else:
dist = abs (each['x'] - headx) + abs(each['y']-heady)
countFromBehind = len(worm) - count
if dist < (countFromBehind+1):
listOfNo.append(each)
listOfNo.append({'x':each['x']+1,'y':each['y']})
listOfNo.append({'x':each['x']-1,'y':each['y']})
listOfNo.append({'x':each['x'],'y':each['y']+1})
listOfNo.append({'x':each['x'],'y':each['y']-1})
listOfNo.append({'x':each['x']+1,'y':each['y']+1})
listOfNo.append({'x':each['x']-1,'y':each['y']-1})
listOfNo.append({'x':each['x']-1,'y':each['y']+1})
listOfNo.append({'x':each['x']+1,'y':each['y']-1})
count = count + 1
seen = set()
newList = []
for d in listOfNo:
t = tuple(d.items())
if t not in seen:
seen.add(t)
newList.append(d)
return newList
def getListOfNo(worm):
listOfNo = []
headx = worm[0]['x']
heady = worm[0]['y']
count = 0
for each in worm:
dist = abs (each['x'] - headx) + abs(each['y']-heady)
countFromBehind = len(worm) - count
count = count + 1
if dist < (countFromBehind+1):
listOfNo.append(each)
listOfNo.extend(wallCoords)
#print ('List of No Go:')
#print (listOfNo)
return listOfNo
def findWall():
walls = []
#append LEFT RIGHT walls
loopcount = 0
for _ in range(CELLHEIGHT):
walls.append({'x':-1 , 'y':loopcount})
walls.append({'x':CELLWIDTH, 'y':loopcount})
loopcount = loopcount + 1
#append TOP BOTTOM walls
loopcount = 0
for _ in range(CELLWIDTH):
walls.append({'x':loopcount, 'y':-1})
walls.append({'x':loopcount, 'y':CELLHEIGHT})
loopcount = loopcount + 1
#print (walls)
return walls
def findSoftWall():
walls = []
#append LEFT RIGHT walls
loopcount = 0
for _ in range(CELLHEIGHT):
walls.append({'x':0 , 'y':loopcount})
walls.append({'x':CELLWIDTH-1, 'y':loopcount})
loopcount = loopcount + 1
#append TOP BOTTOM walls
loopcount = 0
for _ in range(CELLWIDTH):
walls.append({'x':loopcount, 'y':0})
walls.append({'x':loopcount, 'y':CELLHEIGHT-1})
loopcount = loopcount + 1
#print (walls)
return walls
def drawEdgeOfDiscovery(points):
for point in points:
x = point['x'] * CELLSIZE
y = point['y'] * CELLSIZE
wormSegmentRect = pygame.Rect(x, y, CELLSIZE, CELLSIZE)
pygame.draw.rect(DISPLAYSURF, ORANGE, wormSegmentRect)
lastPointRect = pygame.Rect(points[-1]['x']*CELLSIZE, points[-1]['y']*CELLSIZE, CELLSIZE, CELLSIZE)
pygame.draw.rect(DISPLAYSURF, (255,255,255), wormSegmentRect)
#print('Drawing Edge of Discovery...')
#time.sleep(0.05)
def sectionBreak():
print('AAAAAAAAAAAAAAAAAAAA')
print('AAAAAAAAAAAAAAAAAAAA')
print('AAAAAAAAAAAAAAAAAAAA')
print('AAAAAAAAAAAAAAAAAAAA')
print('AAAAAAAAAAAAAAAAAAAA')
print('AAAAAAAAAAAAAAAAAAAA')
print('AAAAAAAAAAAAAAAAAAAA')
def pauseGame():
pauseGame = True
while (pauseGame):
for event in pygame.event.get():
if event.type == KEYDOWN:
if event.key == K_SPACE:
pauseGame = False
def oppositeDirection(direction):
if direction == UP:
return DOWN
elif direction == DOWN:
return UP
elif direction == LEFT:
return RIGHT
elif direction == RIGHT:
return LEFT
def findNewHead(direction,wormCoords):
if direction == UP:
newHead = {'x': wormCoords[HEAD]['x'], 'y': wormCoords[HEAD]['y'] - 1}
elif direction == DOWN:
newHead = {'x': wormCoords[HEAD]['x'], 'y': wormCoords[HEAD]['y'] + 1}
elif direction == LEFT:
newHead = {'x': wormCoords[HEAD]['x'] - 1, 'y': wormCoords[HEAD]['y']}
elif direction == RIGHT:
newHead = {'x': wormCoords[HEAD]['x'] + 1, 'y': wormCoords[HEAD]['y']}
return newHead
"""
////////////////////////////////////////////////////////////////////////////
"""
def drawPressKeyMsg():
pressKeySurf = BASICFONT.render('Press a key to play.', True, DARKGRAY)
pressKeyRect = pressKeySurf.get_rect()
pressKeyRect.topleft = (WINDOWWIDTH - 200, WINDOWHEIGHT - 30)
DISPLAYSURF.blit(pressKeySurf, pressKeyRect)
def checkForKeyPress():
if len(pygame.event.get(QUIT)) > 0:
terminate()
keyUpEvents = pygame.event.get(KEYUP)
if len(keyUpEvents) == 0:
return None
if keyUpEvents[0].key == K_ESCAPE:
terminate()
return keyUpEvents[0].key
def showStartScreen():
titleFont = pygame.font.Font('freesansbold.ttf', 100)
titleSurf1 = titleFont.render('Wormy!', True, WHITE, DARKGREEN)
titleSurf2 = titleFont.render('Wormy!', True, GREEN)
degrees1 = 0
degrees2 = 0
while True:
DISPLAYSURF.fill(BGCOLOR)
rotatedSurf1 = pygame.transform.rotate(titleSurf1, degrees1)
rotatedRect1 = rotatedSurf1.get_rect()
rotatedRect1.center = (WINDOWWIDTH / 2, WINDOWHEIGHT / 2)
DISPLAYSURF.blit(rotatedSurf1, rotatedRect1)
rotatedSurf2 = pygame.transform.rotate(titleSurf2, degrees2)
rotatedRect2 = rotatedSurf2.get_rect()
rotatedRect2.center = (WINDOWWIDTH / 2, WINDOWHEIGHT / 2)
DISPLAYSURF.blit(rotatedSurf2, rotatedRect2)
drawPressKeyMsg()
if checkForKeyPress():
pygame.event.get() # clear event queue
return
pygame.display.update()
FPSCLOCK.tick(FPS)
degrees1 += 3 # rotate by 3 degrees each frame
degrees2 += 7 # rotate by 7 degrees each frame
def terminate():
print('YOU DIED!')
pauseGame()
pygame.quit()
sys.exit()
def getRandomLocation(worm):
location = {'x': random.randint(0, CELLWIDTH - 1), 'y': random.randint(0, CELLHEIGHT - 1)}
while(location in worm):
location = {'x': random.randint(0, CELLWIDTH - 1), 'y': random.randint(0, CELLHEIGHT - 1)}
return location
def showGameOverScreen():
gameOverFont = pygame.font.Font('freesansbold.ttf', 150)
gameSurf = gameOverFont.render('Game', True, WHITE)
overSurf = gameOverFont.render('Over', True, WHITE)
gameRect = gameSurf.get_rect()
overRect = overSurf.get_rect()
gameRect.midtop = (WINDOWWIDTH / 2, 10)
overRect.midtop = (WINDOWWIDTH / 2, gameRect.height + 10 + 25)
DISPLAYSURF.blit(gameSurf, gameRect)
DISPLAYSURF.blit(overSurf, overRect)
drawPressKeyMsg()
pygame.display.update()
pygame.time.wait(500)
checkForKeyPress() # clear out any key presses in the event queue
while True:
if checkForKeyPress():
pygame.event.get() # clear event queue
return
def drawScore(score):
scoreSurf = BASICFONT.render('Score: %s' % (score), True, WHITE)
scoreRect = scoreSurf.get_rect()
scoreRect.topleft = (WINDOWWIDTH - 120, 10)
DISPLAYSURF.blit(scoreSurf, scoreRect)
def drawWorm(wormCoords):
for coord in wormCoords:
x = coord['x'] * CELLSIZE
y = coord['y'] * CELLSIZE
#wormSegmentRect = pygame.Rect(x, y, CELLSIZE, CELLSIZE)
#pygame.draw.rect(DISPLAYSURF, WHITE, wormSegmentRect)
wormInnerSegmentRect = pygame.Rect(x + 1, y + 1, CELLSIZE - 2, CELLSIZE - 2)
pygame.draw.rect(DISPLAYSURF, WHITE, wormInnerSegmentRect)
'''
x = wormCoords[0]['x'] * CELLSIZE
y = wormCoords[0]['y'] * CELLSIZE
wormSegmentRect = pygame.Rect(x, y, CELLSIZE, CELLSIZE)
pygame.draw.rect(DISPLAYSURF, WHITE, wormSegmentRect)
x = wormCoords[-1]['x'] * CELLSIZE
y = wormCoords[-1]['y'] * CELLSIZE
wormSegmentRect = pygame.Rect(x, y, CELLSIZE, CELLSIZE)
pygame.draw.rect(DISPLAYSURF, WHITE, wormSegmentRect)
'''
def drawApple(coord,lastApple):
x = coord['x'] * CELLSIZE
y = coord['y'] * CELLSIZE
appleRect = pygame.Rect(x, y, CELLSIZE, CELLSIZE)
pygame.draw.rect(DISPLAYSURF, RED, appleRect)
#x1 = lastApple['x'] * CELLSIZE
#y1 = lastApple['y'] * CELLSIZE
#appleRect = pygame.Rect(x1, y1, CELLSIZE, CELLSIZE)
#pygame.draw.rect(DISPLAYSURF, RED, appleRect)
def drawGrid():
return #do nothing
for x in range(0, WINDOWWIDTH, CELLSIZE): # draw vertical lines
pygame.draw.line(DISPLAYSURF, DARKGRAY, (x, 0), (x, WINDOWHEIGHT))
for y in range(0, WINDOWHEIGHT, CELLSIZE): # draw horizontal lines
pygame.draw.line(DISPLAYSURF, DARKGRAY, (0, y), (WINDOWWIDTH, y))
if __name__ == '__main__':
main()
| |
# -*- coding: utf-8 -*-
"""
Defines the fields that can be added to redisco models.
"""
import sys
from datetime import datetime, date
from dateutil.tz import tzutc, tzlocal
from calendar import timegm
from redisco.containers import List
from exceptions import FieldValidationError, MissingID
__all__ = ['Attribute', 'CharField', 'ListField', 'DateTimeField',
'DateField', 'ReferenceField', 'Collection', 'IntegerField',
'FloatField', 'BooleanField', 'Counter']
class Attribute(object):
"""Defines an attribute of the model.
The attribute accepts strings and are stored in Redis as
they are - strings.
Options
name -- alternate name of the attribute. This will be used
as the key to use when interacting with Redis.
indexed -- Index this attribute. Unindexed attributes cannot
be used in queries. Default: True.
unique -- validates the uniqueness of the value of the
attribute.
validator -- a callable that can validate the value of the
attribute.
default -- Initial value of the attribute.
"""
def __init__(self,
name=None,
indexed=True,
required=False,
validator=None,
unique=False,
default=None):
self.name = name
self.indexed = indexed
self.required = required
self.validator = validator
self.default = default
self.unique = unique
self.zindexable = False
def __get__(self, instance, owner):
try:
return getattr(instance, '_' + self.name)
except AttributeError:
if callable(self.default):
default = self.default()
else:
default = self.default
self.__set__(instance, default)
return default
def __set__(self, instance, value):
setattr(instance, '_' + self.name, value)
def typecast_for_read(self, value):
"""Typecasts the value for reading from Redis."""
# The redis client encodes all unicode data to utf-8 by default.
return value.decode('utf-8')
def typecast_for_storage(self, value):
"""Typecasts the value for storing to Redis."""
try:
return unicode(value)
except UnicodeError:
return value.decode('utf-8')
def value_type(self):
return unicode
def acceptable_types(self):
return basestring
def validate(self, instance):
val = getattr(instance, self.name)
errors = []
# type_validation
if val is not None and not isinstance(val, self.acceptable_types()):
errors.append((self.name, 'bad type',))
# validate first standard stuff
if self.required:
if val is None or not unicode(val).strip():
errors.append((self.name, 'required'))
# validate uniquness
if val and self.unique:
error = self.validate_uniqueness(instance, val)
if error:
errors.append(error)
# validate using validator
if self.validator:
r = self.validator(self.name, val)
if r:
errors.extend(r)
if errors:
raise FieldValidationError(errors)
def validate_uniqueness(self, instance, val):
encoded = self.typecast_for_storage(val)
matches = instance.__class__.objects.filter(**{self.name: encoded})
if len(matches) > 0:
try:
instance_id = instance.id
no_id = False
except MissingID:
no_id = True
if (len(matches) != 1) or no_id or (
matches.first().id != instance.id):
return (self.name, 'not unique',)
class CharField(Attribute):
def __init__(self, max_length=255, **kwargs):
super(CharField, self).__init__(**kwargs)
self.max_length = max_length
def validate(self, instance):
errors = []
super(CharField, self).validate(instance)
val = getattr(instance, self.name)
if val and len(val) > self.max_length:
errors.append((self.name, 'exceeds max length'))
if errors:
raise FieldValidationError(errors)
class BooleanField(Attribute):
def typecast_for_read(self, value):
return bool(int(value))
def typecast_for_storage(self, value):
if value is None:
return "0"
return "1" if value else "0"
def value_type(self):
return bool
def acceptable_types(self):
return self.value_type()
class IntegerField(Attribute):
def __init__(self, **kwargs):
super(IntegerField, self).__init__(**kwargs)
self.zindexable = True
def typecast_for_read(self, value):
return int(value)
def typecast_for_storage(self, value):
if value is None:
return "0"
return unicode(value)
def value_type(self):
return int
def acceptable_types(self):
return (int, long)
class FloatField(Attribute):
def __init__(self, **kwargs):
super(FloatField, self).__init__(**kwargs)
self.zindexable = True
def typecast_for_read(self, value):
return float(value)
def typecast_for_storage(self, value):
if value is None:
return "0"
return "%f" % value
def value_type(self):
return float
def acceptable_types(self):
return self.value_type()
class DateTimeField(Attribute):
def __init__(self, auto_now=False, auto_now_add=False, **kwargs):
super(DateTimeField, self).__init__(**kwargs)
self.auto_now = auto_now
self.auto_now_add = auto_now_add
self.zindexable = True
def typecast_for_read(self, value):
try:
# We load as if the timestampe was naive
dt = datetime.fromtimestamp(float(value), tzutc())
# And gently override (ie: not convert) to the TZ to UTC
return dt
except TypeError, ValueError:
return None
def typecast_for_storage(self, value):
if not isinstance(value, datetime):
raise TypeError("%s should be datetime object, and not a %s" %
(self.name, type(value)))
if value is None:
return None
# Are we timezone aware ? If no, make it TimeZone Local
if value.tzinfo is None:
value = value.replace(tzinfo=tzlocal())
return "%d.%06d" % (float(timegm(value.utctimetuple())),
value.microsecond)
def value_type(self):
return datetime
def acceptable_types(self):
return self.value_type()
class DateField(Attribute):
def __init__(self, auto_now=False, auto_now_add=False, **kwargs):
super(DateField, self).__init__(**kwargs)
self.auto_now = auto_now
self.auto_now_add = auto_now_add
self.zindexable = True
def typecast_for_read(self, value):
try:
dt = date.fromordinal(int(value))
return dt
except TypeError, ValueError:
return None
def typecast_for_storage(self, value):
if not isinstance(value, date):
raise TypeError("%s should be date object, and not a %s" %
(self.name, type(value)))
if value is None:
return None
return "%d" % value.toordinal()
def value_type(self):
return date
def acceptable_types(self):
return self.value_type()
class ListField(object):
"""Stores a list of objects.
target_type -- can be a Python object or a redisco model class.
If target_type is not a redisco model class, the target_type should
also a callable that casts the (string) value of a list element into
target_type. E.g. str, unicode, int, float.
ListField also accepts a string that refers to a redisco model.
"""
def __init__(self, target_type,
name=None,
indexed=True,
required=False,
validator=None,
default=None):
self._target_type = target_type
self.name = name
self.indexed = indexed
self.required = required
self.validator = validator
self.default = default or []
from base import Model
self._redisco_model = (isinstance(target_type, basestring) or
issubclass(target_type, Model))
def __get__(self, instance, owner):
try:
return getattr(instance, '_' + self.name)
except AttributeError:
if instance.is_new():
val = self.default
else:
key = instance.key(att=self.name)
val = List(key).members
if val is not None:
klass = self.value_type()
if self._redisco_model:
val = filter(lambda o: o is not None,
[klass.objects.get_by_id(v) for v in val])
else:
val = [klass(v) for v in val]
self.__set__(instance, val)
return val
def __set__(self, instance, value):
setattr(instance, '_' + self.name, value)
def value_type(self):
if isinstance(self._target_type, basestring):
t = self._target_type
from base import get_model_from_key
self._target_type = get_model_from_key(self._target_type)
if self._target_type is None:
raise ValueError("Unknown Redisco class %s" % t)
return self._target_type
def validate(self, instance):
val = getattr(instance, self.name)
errors = []
if val:
if not isinstance(val, list):
errors.append((self.name, 'bad type'))
else:
for item in val:
if not isinstance(item, self.value_type()):
errors.append((self.name, 'bad type in list'))
# validate first standard stuff
if self.required:
if not val:
errors.append((self.name, 'required'))
# validate using validator
if self.validator:
r = self.validator(val)
if r:
errors.extend(r)
if errors:
raise FieldValidationError(errors)
class Collection(object):
"""
A simple container that will be replaced by the good imports
and the good filter query.
"""
def __init__(self, target_type):
self.target_type = target_type
def __get__(self, instance, owner):
if not isinstance(self.target_type, str):
raise TypeError("A collection only accepts a string "
"representing the Class")
# __import__ should be something like
# __import__('mymod.mysubmod', fromlist=['MyClass'])
klass_path = self.target_type.split(".")
fromlist = klass_path[-1]
frompath = ".".join(klass_path[0:-1])
# if the path is not empty, then it worth importing the class,
# otherwise, its a local Class and it's already been imported.
if frompath:
mod = __import__(frompath, fromlist=[fromlist])
else:
mod = sys.modules[__name__]
klass = getattr(mod, fromlist)
return klass.objects.filter(
**{instance.__class__.__name__.lower() + '_id': instance.id})
def __set__(self, instance, value):
"""
Prevent the argument to be overriden
"""
raise AttributeError("can't override a collection of object")
class ReferenceField(object):
def __init__(self,
target_type,
name=None,
attname=None,
indexed=True,
required=False,
related_name=None,
default=None,
validator=None):
self._target_type = target_type
self.name = name
self.indexed = indexed
self.required = required
self._attname = attname
self._related_name = related_name
self.validator = validator
self.default = default
def __set__(self, instance, value):
"""
Will set the referenced object unless None is provided
which will simply remove the reference
"""
if not isinstance(value, self.value_type()) and \
value is not None:
raise TypeError
# remove the cached value from the instance
if hasattr(instance, '_' + self.name):
delattr(instance, '_' + self.name)
# Remove the attribute_id reference
setattr(instance, self.attname, None)
# Set it to the new value if any.
if value is not None:
setattr(instance, self.attname, value.id)
def __get__(self, instance, owner):
try:
if not hasattr(instance, '_' + self.name):
o = self.value_type().objects.get_by_id(
getattr(instance, self.attname))
setattr(instance, '_' + self.name, o)
return getattr(instance, '_' + self.name)
except AttributeError:
setattr(instance, '_' + self.name, self.default)
return self.default
def value_type(self):
return self._target_type
@property
def attname(self):
if self._attname is None:
self._attname = self.name + '_id'
return self._attname
@property
def related_name(self):
return self._related_name
def validate(self, instance):
val = getattr(instance, self.name)
errors = []
if val:
if not isinstance(val, self.value_type()):
errors.append((self.name, 'bad type for reference'))
# validate first standard stuff
if self.required:
if not val:
errors.append((self.name, 'required'))
# validate using validator
if self.validator:
r = self.validator(val)
if r:
errors.extend(r)
if errors:
raise FieldValidationError(errors)
class Counter(IntegerField):
def __init__(self, **kwargs):
super(Counter, self).__init__(**kwargs)
if not 'default' in kwargs or self.default is None:
self.default = 0
self.zindexable = True
def __set__(self, instance, value):
raise AttributeError("can't set a counter.")
def __get__(self, instance, owner):
if not instance.is_new():
v = instance.db.hget(instance.key(), self.name)
if v is None:
return 0
return int(v)
else:
return 0
| |
from datetime import datetime
from decimal import Decimal, ROUND_DOWN
import uuid
import base64
import random
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.utils import timezone
import boto3
from botocore.client import Config
from graphene import relay, Int, ObjectType
from events.models import Event
class TotalCountMixin(ObjectType):
@classmethod
def get_connection(cls):
class CountableConnection(relay.Connection):
total_count = Int()
class Meta:
name = '{}Connection'.format(cls._meta.name)
node = cls
@staticmethod
def resolve_total_count(root, args, context, info):
return root.length
return CountableConnection
def uuid_from_b64(encoded):
# UUIDs are based encoded and prefixed with `SomethingNode:`
if encoded is not None:
decoded = base64.b64decode(encoded)
the_uuid = uuid.UUID(str(decoded.split(b":")[1], "utf-8"))
return the_uuid
else:
return None
def int_from_b64(encoded):
# Int's from GraphQL are based encoded and prefixed with `SomethingNode:`
if encoded is not None:
decoded = base64.b64decode(encoded)
the_int = int(decoded.split(b":")[1])
return the_int
else:
return None
def b64_from_uuid(the_uuid, node):
# UUIDs are based encoded and prefixed with `SomethingNode:`
to_encode = "%s:%s" % (node, the_uuid)
encoded = base64.b64encode(to_encode.encode("utf-8"))
return encoded
def parse_schema_non_fk_fields(mutation_data, non_fk_fields, input):
for field in non_fk_fields:
if field in input:
field_value = input.get(field)
mutation_data[field] = field_value
return mutation_data
def parse_schema_fk_fields(mutation_data, fk_fields, input):
for field in fk_fields:
if field == 'updated_by':
pass # handled in get_mutation_data
if field == 'related_model' and 'related_model' in input:
related_model = ContentType.objects.get(
id=int_from_b64(input.get('related_model', None)))
mutation_data["related_model"] = related_model
if field == 'source_model' and 'source_model' in input:
source_model = ContentType.objects.get(
id=int_from_b64(input.get('source_model', None)))
mutation_data["source_model"] = source_model
# if field == 'modelname' and 'modelname' in input:
# modelname = ModelName.objects.get(
# id=uuid_from_b64(input.get('modelname', None)))
# mutation_data['modelname'] = modelname
return mutation_data
def schema_get_mutation_data(fk_fields, non_fk_fields, input, context, update):
mutation_data = {}
# . FK fields
mutation_data = parse_schema_fk_fields(mutation_data, fk_fields, input)
# . Non FK fields
mutation_data = parse_schema_non_fk_fields(mutation_data, non_fk_fields, input) # noqa
# . Context
if context is not None:
if update:
mutation_data["updated_by"] = context.user
else:
mutation_data["created_by"] = context.user
return mutation_data
def schema_update_model(model_instance, mutation_data, fk_fields):
event_data = {}
for field, value in mutation_data.items():
setattr(model_instance, field, value)
# update event data dict and update it to make it json serializable
if field in fk_fields:
# make fk json serializable
event_data[field] = str(value.id)
elif isinstance(value, datetime):
# make date json serializeable
event_data[field] = value.isoformat()
else:
event_data[field] = value
model_instance.save()
return event_data
def schema_define_user(context, username):
if context is None:
user, created = User.objects.get_or_create(username=username)
else:
user = context.user
return user
def schema_create_updated_event(source_model, id, event_data, user):
Event.objects.create(**{
"source_model": source_model,
"source_id": id,
"event_at": timezone.now(),
"event_type": "model.updated",
"event_data": event_data,
"created_by": user
})
def float_to_decimal(val):
"""
Function that turns a Float number into a Decimal (rounded to two decimal
points)
By nature, this is an approximation, so use with caution
"""
# 1. multiply by 10^2 for two decimal point accuracy
val_100 = val * 100
# 2. round this float value into an integer with 0 decimal digits
val_rounded = int(round(val_100)) # int() only for python 2 compatibility
# 3. turn this value into a string
val_str = str(val_rounded)
# 4. pad the string with a zeros until it's at least two chars long
while len(val_str) < 2:
val_str = "0" + val_str
# 4. add back the decimal point to this string
val_str_dec = val_str[:-2] + "." + val_str[-2:]
# 5. turn the string into a decimal value
val_dec = Decimal(val_str_dec)
return val_dec
def load_s3_client():
client = boto3.client(
's3',
config=Config(signature_version='s3v4'),
aws_access_key_id=settings.AWS_ACCESS_KEY_ID,
aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY,
region_name=settings.AWS_S3_REGION_NAME,
)
return client
def objects_as_json(d):
for k, v in d.items():
if isinstance(v, dict):
d[k] = objects_as_json(v)
elif isinstance(v, Decimal):
d[k] = str(v.quantize(Decimal('.01'), rounding=ROUND_DOWN))
elif isinstance(v, uuid.UUID):
d[k] = str(v)
return d
def random_digits(digits):
lower = 10**(digits-1)
upper = 10**digits - 1
return random.randint(lower, upper)
def digits_of(number):
return [int(digit) for digit in str(number)]
def luhn_checksum(the_number):
digits = digits_of(the_number)
odd_digits = digits[-1::-2]
even_digits = digits[-2::-2]
total = sum(odd_digits)
for digit in even_digits:
total += sum(digits_of(2 * digit))
return total % 10
def calculate_luhn(partial_number):
check_digit = luhn_checksum(int(partial_number) * 10)
return check_digit if check_digit == 0 else 10 - check_digit
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests specific to Feature Columns and Keras integration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python import tf2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.feature_column import dense_features_v2
from tensorflow.python.feature_column import feature_column_lib as fc
from tensorflow.python.feature_column import feature_column_v2
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow.python.keras.premade import linear
from tensorflow.python.keras.premade import wide_deep
from tensorflow.python.keras.utils import np_utils
from tensorflow.python.platform import test
class TestDNNModel(keras.models.Model):
def __init__(self, feature_columns, units, name=None, **kwargs):
super(TestDNNModel, self).__init__(name=name, **kwargs)
self._input_layer = fc.DenseFeatures(feature_columns, name='input_layer')
self._dense_layer = keras.layers.Dense(units, name='dense_layer')
def call(self, features):
net = self._input_layer(features)
net = self._dense_layer(net)
return net
class FeatureColumnsIntegrationTest(keras_parameterized.TestCase):
"""Most Sequential model API tests are covered in `training_test.py`.
"""
@keras_parameterized.run_all_keras_modes
def test_sequential_model(self):
columns = [fc.numeric_column('a')]
model = keras.models.Sequential([
fc.DenseFeatures(columns),
keras.layers.Dense(64, activation='relu'),
keras.layers.Dense(20, activation='softmax')
])
model.compile(
optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'],
run_eagerly=testing_utils.should_run_eagerly())
x = {'a': np.random.random((10, 1))}
y = np.random.randint(20, size=(10, 1))
y = np_utils.to_categorical(y, num_classes=20)
model.fit(x, y, epochs=1, batch_size=5)
model.fit(x, y, epochs=1, batch_size=5)
model.evaluate(x, y, batch_size=5)
model.predict(x, batch_size=5)
@keras_parameterized.run_all_keras_modes
def test_sequential_model_with_ds_input(self):
columns = [fc.numeric_column('a')]
model = keras.models.Sequential([
fc.DenseFeatures(columns),
keras.layers.Dense(64, activation='relu'),
keras.layers.Dense(20, activation='softmax')
])
model.compile(
optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'],
run_eagerly=testing_utils.should_run_eagerly())
y = np.random.randint(20, size=(100, 1))
y = np_utils.to_categorical(y, num_classes=20)
x = {'a': np.random.random((100, 1))}
ds1 = dataset_ops.Dataset.from_tensor_slices(x)
ds2 = dataset_ops.Dataset.from_tensor_slices(y)
ds = dataset_ops.Dataset.zip((ds1, ds2)).batch(5)
model.fit(ds, steps_per_epoch=1)
model.fit(ds, steps_per_epoch=1)
model.evaluate(ds, steps=1)
model.predict(ds, steps=1)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_sequential_model_with_crossed_column(self):
feature_columns = []
age_buckets = fc.bucketized_column(
fc.numeric_column('age'),
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
feature_columns.append(age_buckets)
# indicator cols
thal = fc.categorical_column_with_vocabulary_list(
'thal', ['fixed', 'normal', 'reversible'])
crossed_feature = fc.crossed_column([age_buckets, thal],
hash_bucket_size=1000)
crossed_feature = fc.indicator_column(crossed_feature)
feature_columns.append(crossed_feature)
feature_layer = fc.DenseFeatures(feature_columns)
model = keras.models.Sequential([
feature_layer,
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
])
age_data = np.random.randint(10, 100, size=100)
thal_data = np.random.choice(['fixed', 'normal', 'reversible'], size=100)
inp_x = {'age': age_data, 'thal': thal_data}
inp_y = np.random.randint(0, 1, size=100)
ds = dataset_ops.Dataset.from_tensor_slices((inp_x, inp_y)).batch(5)
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'],)
model.fit(ds, epochs=1)
model.fit(ds, epochs=1)
model.evaluate(ds)
model.predict(ds)
@keras_parameterized.run_all_keras_modes
def test_subclassed_model_with_feature_columns(self):
col_a = fc.numeric_column('a')
col_b = fc.numeric_column('b')
dnn_model = TestDNNModel([col_a, col_b], 20)
dnn_model.compile(
optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'],
run_eagerly=testing_utils.should_run_eagerly())
x = {'a': np.random.random((10, 1)), 'b': np.random.random((10, 1))}
y = np.random.randint(20, size=(10, 1))
y = np_utils.to_categorical(y, num_classes=20)
dnn_model.fit(x=x, y=y, epochs=1, batch_size=5)
dnn_model.fit(x=x, y=y, epochs=1, batch_size=5)
dnn_model.evaluate(x=x, y=y, batch_size=5)
dnn_model.predict(x=x, batch_size=5)
@keras_parameterized.run_all_keras_modes
def test_subclassed_model_with_feature_columns_with_ds_input(self):
col_a = fc.numeric_column('a')
col_b = fc.numeric_column('b')
dnn_model = TestDNNModel([col_a, col_b], 20)
dnn_model.compile(
optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'],
run_eagerly=testing_utils.should_run_eagerly())
y = np.random.randint(20, size=(100, 1))
y = np_utils.to_categorical(y, num_classes=20)
x = {'a': np.random.random((100, 1)), 'b': np.random.random((100, 1))}
ds1 = dataset_ops.Dataset.from_tensor_slices(x)
ds2 = dataset_ops.Dataset.from_tensor_slices(y)
ds = dataset_ops.Dataset.zip((ds1, ds2)).batch(5)
dnn_model.fit(ds, steps_per_epoch=1)
dnn_model.fit(ds, steps_per_epoch=1)
dnn_model.evaluate(ds, steps=1)
dnn_model.predict(ds, steps=1)
# TODO(kaftan) seems to throw an error when enabled.
@keras_parameterized.run_all_keras_modes
def DISABLED_test_function_model_feature_layer_input(self):
col_a = fc.numeric_column('a')
col_b = fc.numeric_column('b')
feature_layer = fc.DenseFeatures([col_a, col_b], name='fc')
dense = keras.layers.Dense(4)
# This seems problematic.... We probably need something for DenseFeatures
# the way Input is for InputLayer.
output = dense(feature_layer)
model = keras.models.Model([feature_layer], [output])
optimizer = 'rmsprop'
loss = 'mse'
loss_weights = [1., 0.5]
model.compile(
optimizer,
loss,
metrics=[metrics_module.CategoricalAccuracy(), 'mae'],
loss_weights=loss_weights)
data = ({'a': np.arange(10), 'b': np.arange(10)}, np.arange(10, 20))
model.fit(*data, epochs=1)
# TODO(kaftan) seems to throw an error when enabled.
@keras_parameterized.run_all_keras_modes
def DISABLED_test_function_model_multiple_feature_layer_inputs(self):
col_a = fc.numeric_column('a')
col_b = fc.numeric_column('b')
col_c = fc.numeric_column('c')
fc1 = fc.DenseFeatures([col_a, col_b], name='fc1')
fc2 = fc.DenseFeatures([col_b, col_c], name='fc2')
dense = keras.layers.Dense(4)
# This seems problematic.... We probably need something for DenseFeatures
# the way Input is for InputLayer.
output = dense(fc1) + dense(fc2)
model = keras.models.Model([fc1, fc2], [output])
optimizer = 'rmsprop'
loss = 'mse'
loss_weights = [1., 0.5]
model.compile(
optimizer,
loss,
metrics=[metrics_module.CategoricalAccuracy(), 'mae'],
loss_weights=loss_weights)
data_list = ([{
'a': np.arange(10),
'b': np.arange(10)
}, {
'b': np.arange(10),
'c': np.arange(10)
}], np.arange(10, 100))
model.fit(*data_list, epochs=1)
data_bloated_list = ([{
'a': np.arange(10),
'b': np.arange(10),
'c': np.arange(10)
}, {
'a': np.arange(10),
'b': np.arange(10),
'c': np.arange(10)
}], np.arange(10, 100))
model.fit(*data_bloated_list, epochs=1)
data_dict = ({
'fc1': {
'a': np.arange(10),
'b': np.arange(10)
},
'fc2': {
'b': np.arange(10),
'c': np.arange(10)
}
}, np.arange(10, 100))
model.fit(*data_dict, epochs=1)
data_bloated_dict = ({
'fc1': {
'a': np.arange(10),
'b': np.arange(10),
'c': np.arange(10)
},
'fc2': {
'a': np.arange(10),
'b': np.arange(10),
'c': np.arange(10)
}
}, np.arange(10, 100))
model.fit(*data_bloated_dict, epochs=1)
@keras_parameterized.run_all_keras_modes
def test_string_input(self):
x = {'age': np.random.random((1024, 1)),
'cabin': np.array(['a'] * 1024)}
y = np.random.randint(2, size=(1024, 1))
ds1 = dataset_ops.Dataset.from_tensor_slices(x)
ds2 = dataset_ops.Dataset.from_tensor_slices(y)
dataset = dataset_ops.Dataset.zip((ds1, ds2)).batch(4)
categorical_cols = [fc.categorical_column_with_hash_bucket('cabin', 10)]
feature_cols = ([fc.numeric_column('age')]
+ [fc.indicator_column(cc) for cc in categorical_cols])
layers = [fc.DenseFeatures(feature_cols),
keras.layers.Dense(128),
keras.layers.Dense(1)]
model = keras.models.Sequential(layers)
model.compile(optimizer='sgd',
loss=keras.losses.BinaryCrossentropy())
model.fit(dataset)
def test_serialization_dense_features(self):
dense_feature = fc.DenseFeatures([fc.numeric_column('a')])
config = keras.layers.serialize(dense_feature)
self.assertEqual(config['class_name'], 'DenseFeatures')
revived = keras.layers.deserialize(config)
if tf2.enabled():
self.assertIsInstance(revived, dense_features_v2.DenseFeatures)
else:
self.assertIsInstance(revived, fc.DenseFeatures)
self.assertNotIsInstance(revived, dense_features_v2.DenseFeatures)
def test_serialization_sequence_features(self):
rating = fc.sequence_numeric_column('rating')
sequence_feature = fc.SequenceFeatures([rating])
config = keras.layers.serialize(sequence_feature)
revived = keras.layers.deserialize(config)
self.assertIsInstance(revived, fc.SequenceFeatures)
# This test is an example for a regression on categorical inputs, i.e.,
# the output is 0.4, 0.6, 0.9 when input is 'alpha', 'beta', 'gamma'
# separately.
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_linear_model_with_feature_column(self):
vocab_list = ['alpha', 'beta', 'gamma']
vocab_val = [0.4, 0.6, 0.9]
data = np.random.choice(vocab_list, size=256)
y = np.zeros_like(data, dtype=np.float32)
for vocab, val in zip(vocab_list, vocab_val):
indices = np.where(data == vocab)
y[indices] = val + np.random.uniform(
low=-0.01, high=0.01, size=indices[0].shape)
cat_column = feature_column_v2.categorical_column_with_vocabulary_list(
key='symbol', vocabulary_list=vocab_list)
ind_column = feature_column_v2.indicator_column(cat_column)
dense_feature_layer = dense_features_v2.DenseFeatures([ind_column])
linear_model = linear.LinearModel(
use_bias=False, kernel_initializer='zeros')
combined = keras.Sequential([dense_feature_layer, linear_model])
opt = gradient_descent.SGD(learning_rate=0.1)
combined.compile(opt, 'mse', [])
combined.fit(x={'symbol': data}, y=y, batch_size=32, epochs=10)
self.assertAllClose([[0.4], [0.6], [0.9]],
combined.layers[1].dense_layers[0].kernel.numpy(),
atol=0.01)
# This test is an example for cases where linear and dnn model accepts
# same raw input and same transformed inputs, i.e., the raw input is
# categorical, and both linear and dnn model accept one hot encoding.
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_wide_deep_model_with_single_feature_column(self):
vocab_list = ['alpha', 'beta', 'gamma']
vocab_val = [0.4, 0.6, 0.9]
data = np.random.choice(vocab_list, size=256)
y = np.zeros_like(data, dtype=np.float32)
for vocab, val in zip(vocab_list, vocab_val):
indices = np.where(data == vocab)
y[indices] = val + np.random.uniform(
low=-0.01, high=0.01, size=indices[0].shape)
cat_column = feature_column_v2.categorical_column_with_vocabulary_list(
key='symbol', vocabulary_list=vocab_list)
ind_column = feature_column_v2.indicator_column(cat_column)
dense_feature_layer = dense_features_v2.DenseFeatures([ind_column])
linear_model = linear.LinearModel(
use_bias=False, kernel_initializer='zeros')
dnn_model = keras.Sequential([keras.layers.Dense(units=1)])
wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model)
combined = keras.Sequential([dense_feature_layer, wide_deep_model])
opt = gradient_descent.SGD(learning_rate=0.1)
combined.compile(
opt,
'mse', [],
run_eagerly=testing_utils.should_run_eagerly())
combined.fit(x={'symbol': data}, y=y, batch_size=32, epochs=10)
# This test is an example for cases where linear and dnn model accepts
# same raw input but different transformed inputs, i.e,. the raw input is
# categorical, and linear model accepts one hot encoding, while dnn model
# accepts embedding encoding.
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_wide_deep_model_with_two_feature_columns(self):
vocab_list = ['alpha', 'beta', 'gamma']
vocab_val = [0.4, 0.6, 0.9]
data = np.random.choice(vocab_list, size=256)
y = np.zeros_like(data, dtype=np.float32)
for vocab, val in zip(vocab_list, vocab_val):
indices = np.where(data == vocab)
y[indices] = val + np.random.uniform(
low=-0.01, high=0.01, size=indices[0].shape)
cat_column = feature_column_v2.categorical_column_with_vocabulary_list(
key='symbol', vocabulary_list=vocab_list)
ind_column = feature_column_v2.indicator_column(cat_column)
emb_column = feature_column_v2.embedding_column(cat_column, dimension=5)
linear_feature_layer = dense_features_v2.DenseFeatures([ind_column])
linear_model = linear.LinearModel(
use_bias=False, kernel_initializer='zeros')
combined_linear = keras.Sequential(
[linear_feature_layer, linear_model])
dnn_model = keras.Sequential([keras.layers.Dense(units=1)])
dnn_feature_layer = dense_features_v2.DenseFeatures([emb_column])
combined_dnn = keras.Sequential([dnn_feature_layer, dnn_model])
wide_deep_model = wide_deep.WideDeepModel(combined_linear, combined_dnn)
opt = gradient_descent.SGD(learning_rate=0.1)
wide_deep_model.compile(
opt,
'mse', [],
run_eagerly=testing_utils.should_run_eagerly())
wide_deep_model.fit(x={'symbol': data}, y=y, batch_size=32, epochs=10)
if __name__ == '__main__':
test.main()
| |
# BEGIN_COPYRIGHT
#
# Copyright 2009-2021 CRS4.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
"""\
Client side of the Hadoop pipes protocol.
Ref: ``org.apache.hadoop.mapred.pipes.BinaryProtocol``.
"""
import os
try:
from cPickle import loads
except ImportError:
from pickle import loads
from itertools import groupby
from operator import itemgetter
import pydoop.config as config
from .api import AVRO_IO_MODES, JobConf
PROTOCOL_VERSION = 0
# We can use an enum.IntEnum after dropping Python2 compatibility
START = 0
SET_JOB_CONF = 1
SET_INPUT_TYPES = 2
RUN_MAP = 3
MAP_ITEM = 4
RUN_REDUCE = 5
REDUCE_KEY = 6
REDUCE_VALUE = 7
CLOSE = 8
ABORT = 9
AUTHENTICATION_REQ = 10
OUTPUT = 50
PARTITIONED_OUTPUT = 51
STATUS = 52
PROGRESS = 53
DONE = 54
REGISTER_COUNTER = 55
INCREMENT_COUNTER = 56
AUTHENTICATION_RESP = 57
CMD_REPR = {
START: "START",
SET_JOB_CONF: "SET_JOB_CONF",
SET_INPUT_TYPES: "SET_INPUT_TYPES",
RUN_MAP: "RUN_MAP",
MAP_ITEM: "MAP_ITEM",
RUN_REDUCE: "RUN_REDUCE",
REDUCE_KEY: "REDUCE_KEY",
REDUCE_VALUE: "REDUCE_VALUE",
CLOSE: "CLOSE",
ABORT: "ABORT",
AUTHENTICATION_REQ: "AUTHENTICATION_REQ",
OUTPUT: "OUTPUT",
PARTITIONED_OUTPUT: "PARTITIONED_OUTPUT",
STATUS: "STATUS",
PROGRESS: "PROGRESS",
DONE: "DONE",
REGISTER_COUNTER: "REGISTER_COUNTER",
INCREMENT_COUNTER: "INCREMENT_COUNTER",
AUTHENTICATION_RESP: "AUTHENTICATION_RESP",
}
IS_JAVA_RW = "mapreduce.pipes.isjavarecordwriter"
def get_password():
try:
pass_fn = os.environ["hadoop.pipes.shared.secret.location"]
except KeyError:
return None
with open(pass_fn, "rb") as f:
return f.read()
# _get_* functions to patch the downlink according to the chosen
# deserialization policy (see below)
def _get_LongWritable(downlink):
assert downlink.stream.read_vint() == 8
return downlink.stream.read_long_writable()
def _get_Text(downlink):
return downlink.stream.read_string()
DESERIALIZERS = {
"org.apache.hadoop.io.LongWritable": _get_LongWritable,
"org.apache.hadoop.io.Text": _get_Text,
}
def _get_avro_key(downlink):
raw = downlink.stream.read_bytes()
return downlink.avro_key_deserializer.deserialize(raw)
def _get_avro_value(downlink):
raw = downlink.stream.read_bytes()
return downlink.avro_value_deserializer.deserialize(raw)
def _get_pickled(downlink):
return loads(downlink.stream.read_bytes())
class Downlink(object):
"""\
Reads and executes pipes commands as directed by upstream.
The downlink drives the entire MapReduce task, plugging in user components
and calling their methods as necessary. A task can be either a **map**
task or a **reduce** task, but this is not known until after a few initial
commands, as shown below.
All tasks start with the following commands::
AUTHENTICATION_REQ
START
SET_JOB_CONF
Map tasks follow up with::
RUN_MAP
if java_reader:
SET_INPUT_TYPES
for k, v in input:
MAP_ITEM
CLOSE
Reduce tasks follow up with::
RUN_REDUCE
for k in input:
REDUCE_KEY
for v in values_for(k):
REDUCE_VALUE
CLOSE
In both cases, the inner loop consists of handling the key/value
stream. All the code involved in this process, namely:
* reading and optionally deserializing input keys and values
* calling user methods
* emitting output keys and values back to upstream
must be as efficient as possible. For this reason, rather than having the
``get_{k,v}`` methods go through a complex ``if`` tree at every call, we
patch the class itself by replacing each method with the one appropriate
for the current scenario. Note that we can do this because:
* the deserialization policy (including no deserialization) is the same
for all items of a given kind (key or value), meaning that an ``if``
tree would pick the same branch for the entire process
* there is only one Downlink object in the process, so we don't risk
altering the behavior of other instances
* the Downlink object is not part of the client API (it's not passed to
user code at all)
Job conf deserialization also needs to be somewhat efficient, since it
involves reading thousands of strings.
"""
def __init__(self, istream, context, **kwargs):
self.stream = istream
self.context = context
self.raw_k = kwargs.get("raw_keys", False)
self.raw_v = kwargs.get("raw_values", False)
self.password = get_password()
self.auth_done = False
self.avro_key_deserializer = None
self.avro_value_deserializer = None
def close(self):
self.stream.close()
def read_job_conf(self):
n = self.stream.read_vint()
if n & 1:
raise RuntimeError("number of items is not even")
t = self.stream.read_tuple(n * 's')
return JobConf(t[i: i + 2] for i in range(0, n, 2))
def verify_digest(self, digest, challenge):
if self.password is not None:
self.context._authenticate(self.password, digest, challenge)
# self.password is None: assume reading from cmd file
self.auth_done = True
def setup_record_writer(self, piped_output):
writer = self.context.create_record_writer()
if writer and piped_output:
raise RuntimeError("record writer defined when not needed")
if not writer and not piped_output:
raise RuntimeError("record writer not defined")
def get_k(self):
return self.stream.read_bytes()
def get_v(self):
return self.stream.read_bytes()
def setup_avro_deser(self):
try:
from pydoop.avrolib import AvroDeserializer
except ImportError as e:
raise RuntimeError("cannot handle avro input: %s" % e)
jc = self.context.job_conf
avro_input = jc.get(config.AVRO_INPUT).upper()
if avro_input not in AVRO_IO_MODES:
raise RuntimeError('invalid avro input mode: %s' % avro_input)
if avro_input == 'K' or avro_input == 'KV' and not self.raw_k:
schema = jc.get(config.AVRO_KEY_INPUT_SCHEMA)
self.avro_key_deserializer = AvroDeserializer(schema)
self.__class__.get_k = _get_avro_key
if avro_input == 'V' or avro_input == 'KV' and not self.raw_v:
schema = jc.get(config.AVRO_VALUE_INPUT_SCHEMA)
self.avro_value_deserializer = AvroDeserializer(schema)
self.__class__.get_v = _get_avro_value
def setup_deser(self, key_type, value_type):
if not self.raw_k:
d = DESERIALIZERS.get(key_type)
if d is not None:
self.__class__.get_k = d
if not self.raw_v:
d = DESERIALIZERS.get(value_type)
if d is not None:
self.__class__.get_v = d
def __next__(self):
cmd = self.stream.read_vint()
if cmd != AUTHENTICATION_REQ and not self.auth_done:
raise RuntimeError("%d received before authentication" % cmd)
if cmd == AUTHENTICATION_REQ:
digest, challenge = self.stream.read_tuple('bb')
self.verify_digest(digest, challenge)
elif cmd == START:
v = self.stream.read_vint()
if (v != PROTOCOL_VERSION):
raise RuntimeError("Unknown protocol id: %d" % v)
elif cmd == SET_JOB_CONF:
self.context._job_conf = self.read_job_conf()
if config.AVRO_OUTPUT in self.context.job_conf:
self.context._setup_avro_ser()
elif cmd == RUN_MAP:
self.context.task_type = "m"
split, nred, piped_input = self.stream.read_tuple('bii')
self.context._raw_split = split
reader = self.context.create_record_reader()
if reader and piped_input:
raise RuntimeError("record reader defined when not needed")
if not reader and not piped_input:
raise RuntimeError("record reader not defined")
combiner = self.context.create_combiner()
if nred < 1: # map-only job
if combiner:
raise RuntimeError("combiner defined in map-only job")
self.context._private_encoding = False
piped_output = self.context.job_conf.get_bool(IS_JAVA_RW)
self.setup_record_writer(piped_output)
self.context.nred = nred
self.context.create_mapper()
self.context.create_partitioner()
if reader:
for self.context._key, self.context._value in reader:
self.context.mapper.map(self.context)
self.context.progress_value = reader.get_progress()
self.context.progress()
# no more commands from upstream, not even CLOSE
try:
self.context.close()
finally:
raise StopIteration
elif cmd == SET_INPUT_TYPES:
key_type, value_type = self.stream.read_tuple('ss')
if config.AVRO_INPUT in self.context.job_conf:
self.setup_avro_deser()
else:
self.setup_deser(key_type, value_type)
elif cmd == MAP_ITEM:
self.context._key = self.get_k()
self.context._value = self.get_v()
self.context.mapper.map(self.context)
elif cmd == RUN_REDUCE:
self.context.task_type = "r"
part, piped_output = self.stream.read_tuple('ii')
# for some reason, part is always 0
self.context.create_reducer()
self.setup_record_writer(piped_output)
if self.context._private_encoding:
self.__class__.get_k = _get_pickled
self.__class__.get_v = _get_pickled
for cmd, subs in groupby(self, itemgetter(0)):
if cmd == REDUCE_KEY:
_, self.context._key = next(subs)
if cmd == REDUCE_VALUE:
self.context._values = (v for _, v in subs)
self.context.reducer.reduce(self.context)
if cmd == CLOSE:
try:
self.context.close()
finally:
raise StopIteration
elif cmd == REDUCE_KEY:
k = self.get_k()
return cmd, k # pass on to RUN_REDUCE iterator
elif cmd == REDUCE_VALUE:
v = self.get_v()
return cmd, v # pass on to RUN_REDUCE iterator
elif cmd == ABORT:
raise RuntimeError("received ABORT command")
elif cmd == CLOSE:
if self.context.mapper:
try:
self.context.close()
finally:
raise StopIteration
else:
return cmd, None # pass on to RUN_REDUCE iterator
else:
raise RuntimeError("unknown command: %d" % cmd)
def __iter__(self):
return self
# py2 compat
def next(self):
return self.__next__()
class Uplink(object):
"""\
Writes all information that needs to be sent upstream.
"""
def __init__(self, stream):
self.stream = stream
def flush(self):
self.stream.flush()
def close(self):
self.stream.close()
# pipes commands
def authenticate(self, response_digest):
self.stream.write_tuple("ib", (AUTHENTICATION_RESP, response_digest))
def output(self, k, v):
self.stream.write_output(k, v)
def partitioned_output(self, part, k, v):
self.stream.write_output(k, v, part)
def status(self, msg):
self.stream.write_tuple("is", (STATUS, msg))
def progress(self, p):
self.stream.write_tuple("if", (PROGRESS, p))
def done(self):
self.stream.write_vint(DONE)
def register_counter(self, id, group, name):
self.stream.write_tuple("iiss", (REGISTER_COUNTER, id, group, name))
def increment_counter(self, id, amount):
self.stream.write_tuple("iil", (INCREMENT_COUNTER, id, amount))
| |
from abc import ABC, abstractmethod
from math import cos, sin, pi, sqrt
class Interpolation(ABC):
@abstractmethod
def apply(self, a):
pass
class Linear(Interpolation):
def apply(self, a):
return a
class Smooth(Interpolation):
def apply(self, a):
return a * a * (3 - 2 * a)
class Smooth2(Interpolation):
def apply(self, a):
a = a * a * (3 - 2 * a)
return a * a * (3 - 2 * a)
class Smoother(Interpolation):
def apply(self, a):
a = a * a * a * (a * (a * 6 - 15) + 10)
a = max(0, min(a, 1)) # clamp between 0..1
return a
class Sine(Interpolation):
def apply(self, a):
return (1 - cos(a * pi)) / 2
class SineIn(Interpolation):
def apply(self, a):
return 1 - cos(a * pi / 2)
class SineOut(Interpolation):
def apply(self, a):
return sin(a * pi / 2)
class Circle(Interpolation):
def apply(self, a):
if a <= 0.5:
a *= 2
return (1 - sqrt(1 - a * a)) / 2
a = (a - 1) * 2
return (sqrt(1 - a * a) + 1) / 2
class CircleIn(Interpolation):
def apply(self, a):
return 1 - sqrt(1 - a * a)
class CircleOut(Interpolation):
def apply(self, a):
return sqrt(1 - (a - 1) ** 2)
class Pow(Interpolation):
def __init__(self, power):
self.power = power
def apply(self, a):
if a <= 0.5:
return (a * 2) ** self.power / 2
b = 2 if self.power % 2 else -2
return ((a - 1) * 2) ** self.power / b + 1
class PowIn(Pow):
def apply(self, a):
return a ** self.power
class PowOut(Pow):
def apply(self, a):
b = 1 if self.power % 2 else -1
return (a - 1) ** self.power * b + 1
class Exp(Interpolation):
def __init__(self, value, power):
self.value = value
self.power = power
self.min = value ** -power
self.scale = 1 / (1 - self.min)
def apply(self, a):
if a <= 0.5:
power = self.power * (a * 2 - 1)
return (self.value ** power - self.min) * self.scale / 2
power = -self.power * (a * 2 - 1)
return (2 - (self.value ** power - self.min) * self.scale) / 2
class ExpIn(Exp):
def apply(self, a):
return (self.value ** (self.power * (a - 1)) - self.min) * self.scale
class ExpOut(Exp):
def apply(self, a):
return 1 - (self.value ** (-self.power * a) - self.min) * self.scale
class Elastic(Interpolation):
def __init__(self, value, power, bounces, scale):
self.value = value
self.power = power
self.bounces = bounces * pi * (-1 if bounces % 2 else 1)
self.scale = scale
def apply(self, a):
if a <= 0.5:
a *= 2
power = self.power * (a - 1)
a *= self.bounces
return self.value ** power * sin(a) * self.scale / 2
a = 1 - a
a *= 2
power = self.power * (a - 1)
a *= self.bounces
return 1 - self.value ** power * sin(a) * self.scale / 2
class ElasticIn(Elastic):
def apply(self, a):
if (a >= 0.99):
return 1
power = self.power * (a - 1)
return self.value ** power * sin(a * self.bounces) * self.scale
class ElasticOut(Elastic):
def apply(self, a):
if (a == 0):
return 0
a = 1 - a
power = self.power * (a - 1)
return 1 - self.value ** power * sin(a * self.bounces) * self.scale
class BounceOut(Interpolation):
def __init__(self, bounces):
if bounces > 2 or bounces > 5:
raise AttributeError("Bounces must be between [2,5]")
self.widths = [0.] * bounces
self.heights = [0.] * bounces
self.heights[0] = 1
def bounces2():
self.widths[0] = 0.6
self.widths[1] = 0.4
self.heights[1] = 0.33
def bounces3():
self.widths[0] = 0.4
self.widths[1] = 0.4
self.widths[2] = 0.2
self.heights[1] = 0.33
self.heights[2] = 0.1
def bounces4():
self.widths[0] = 0.34
self.widths[1] = 0.34
self.widths[2] = 0.2
self.widths[3] = 0.15
self.heights[1] = 0.26
self.heights[2] = 0.11
self.heights[3] = 0.03
def bounces5():
self.widths[0] = 0.3
self.widths[1] = 0.3
self.widths[2] = 0.2
self.widths[3] = 0.1
self.widths[4] = 0.1
self.heights[1] = 0.45
self.heights[2] = 0.3
self.heights[3] = 0.15
self.heights[4] = 0.06
{2: bounces2, 3: bounces3, 4: bounces4, 5: bounces5}[bounces]()
self.widths[0] *= 2
def apply(self, a):
if a == 1:
return 1
a += self.widths[0] / 2
width = 0
height = 0
for i, w in enumerate(self.widths):
if a <= w:
width = w
height = self.heights[i]
break
a -= w
a /= width
z = 4 / width * height * a
return 1 - (z - z * a) * width
class Bounce(BounceOut):
def _out(self, a):
test = a + self.widths[0] / 2
if test < self.widths[0]:
return test / (self.widths[0] / 2) - 1
return super().apply(a)
def apply(self, a):
if a <= 0.5:
return (1 - self._out(1 - a * 2)) / 2
return self._out(a * 2 - 1) / 2 + 0.5
class BounceIn(BounceOut):
def apply(self, a):
return 1 - super().apply(1 - a)
class Swing(Interpolation):
def __init__(self, scale):
self.scale = scale * 2
def apply(self, a):
if a <= 0.5:
a *= 2
return a ** 2 * ((self.scale + 1) * a - self.scale) / 2
a = (a - 1) * 2
return a ** 2 * ((self.scale + 1) * a + self.scale) / 2 + 1
class SwingOut(Swing):
def apply(self, a):
a -= 1
return a ** 2 * ((self.scale + 1) * a + self.scale) + 1
class SwingIn(Swing):
def apply(self, a):
return a ** 2 * ((self.scale + 1) * a - self.scale)
| |
# Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import sys
from telemetry.core import util
from telemetry.core import platform as platform_module
from telemetry import decorators
from telemetry.internal.browser import browser_finder
from telemetry.internal.browser import browser_finder_exceptions
from telemetry.internal.browser import browser_options
from telemetry.internal.platform import device_finder
from telemetry.internal.util import binary_manager
from telemetry.internal.util import command_line
from telemetry.internal.util import ps_util
from telemetry.testing import browser_test_case
from telemetry.testing import options_for_unittests
import typ
class RunTestsCommand(command_line.OptparseCommand):
"""Run unit tests"""
usage = '[test_name ...] [<options>]'
def __init__(self):
super(RunTestsCommand, self).__init__()
self.stream = sys.stdout
@classmethod
def CreateParser(cls):
options = browser_options.BrowserFinderOptions()
options.browser_type = 'any'
parser = options.CreateParser('%%prog %s' % cls.usage)
return parser
@classmethod
def AddCommandLineArgs(cls, parser, _):
parser.add_option('--repeat-count', type='int', default=1,
help='Repeats each a provided number of times.')
parser.add_option('--no-browser', action='store_true', default=False,
help='Don\'t require an actual browser to run the tests.')
parser.add_option('-d', '--also-run-disabled-tests',
dest='run_disabled_tests',
action='store_true', default=False,
help='Ignore @Disabled and @Enabled restrictions.')
parser.add_option('--exact-test-filter', action='store_true', default=False,
help='Treat test filter as exact matches (default is '
'substring matches).')
parser.add_option('--client-config', dest='client_config', default=None)
parser.add_option('--disable-logging-config', action='store_true',
default=False, help='Configure logging (default on)')
typ.ArgumentParser.add_option_group(parser,
"Options for running the tests",
running=True,
skip=['-d', '-v', '--verbose'])
typ.ArgumentParser.add_option_group(parser,
"Options for reporting the results",
reporting=True)
@classmethod
def ProcessCommandLineArgs(cls, parser, args, _):
# We retry failures by default unless we're running a list of tests
# explicitly.
if not args.retry_limit and not args.positional_args:
args.retry_limit = 3
if args.no_browser:
return
try:
possible_browser = browser_finder.FindBrowser(args)
except browser_finder_exceptions.BrowserFinderException, ex:
parser.error(ex)
if not possible_browser:
parser.error('No browser found of type %s. Cannot run tests.\n'
'Re-run with --browser=list to see '
'available browser types.' % args.browser_type)
@classmethod
def main(cls, args=None, stream=None): # pylint: disable=arguments-differ
# We override the superclass so that we can hook in the 'stream' arg.
parser = cls.CreateParser()
cls.AddCommandLineArgs(parser, None)
options, positional_args = parser.parse_args(args)
options.positional_args = positional_args
# Must initialize the DependencyManager before calling
# browser_finder.FindBrowser(args)
binary_manager.InitDependencyManager(options.client_config)
cls.ProcessCommandLineArgs(parser, options, None)
obj = cls()
if stream is not None:
obj.stream = stream
return obj.Run(options)
def Run(self, args):
runner = typ.Runner()
if self.stream:
runner.host.stdout = self.stream
if args.no_browser:
possible_browser = None
platform = platform_module.GetHostPlatform()
else:
possible_browser = browser_finder.FindBrowser(args)
platform = possible_browser.platform
# Telemetry seems to overload the system if we run one test per core,
# so we scale things back a fair amount. Many of the telemetry tests
# are long-running, so there's a limit to how much parallelism we
# can effectively use for now anyway.
#
# It should be possible to handle multiple devices if we adjust the
# browser_finder code properly, but for now we only handle one on ChromeOS.
if platform.GetOSName() == 'chromeos':
runner.args.jobs = 1
elif platform.GetOSName() == 'android':
runner.args.jobs = len(device_finder.GetDevicesMatchingOptions(args))
print 'Running tests with %d Android device(s).' % runner.args.jobs
elif platform.GetOSVersionName() == 'xp':
# For an undiagnosed reason, XP falls over with more parallelism.
# See crbug.com/388256
runner.args.jobs = max(int(args.jobs) // 4, 1)
else:
runner.args.jobs = max(int(args.jobs) // 2, 1)
runner.args.metadata = args.metadata
runner.args.passthrough = args.passthrough
runner.args.path = args.path
runner.args.retry_limit = args.retry_limit
runner.args.test_results_server = args.test_results_server
runner.args.test_type = args.test_type
runner.args.top_level_dir = args.top_level_dir
runner.args.write_full_results_to = args.write_full_results_to
runner.args.write_trace_to = args.write_trace_to
runner.args.list_only = args.list_only
runner.args.path.append(util.GetUnittestDataDir())
# Always print out these info for the ease of debugging.
runner.args.timing = True
runner.args.verbose = 3
runner.classifier = GetClassifier(args, possible_browser)
runner.context = args
runner.setup_fn = _SetUpProcess
runner.teardown_fn = _TearDownProcess
runner.win_multiprocessing = typ.WinMultiprocessing.importable
try:
ret, _, _ = runner.run()
except KeyboardInterrupt:
print >> sys.stderr, "interrupted, exiting"
ret = 130
return ret
def GetClassifier(args, possible_browser):
def ClassifyTestWithoutBrowser(test_set, test):
name = test.id()
if (not args.positional_args
or _MatchesSelectedTest(name, args.positional_args,
args.exact_test_filter)):
# TODO(telemetry-team): Make sure that all telemetry unittest that invokes
# actual browser are subclasses of browser_test_case.BrowserTestCase
# (crbug.com/537428)
if issubclass(test.__class__, browser_test_case.BrowserTestCase):
test_set.tests_to_skip.append(typ.TestInput(
name, msg='Skip the test because it requires a browser.'))
else:
test_set.parallel_tests.append(typ.TestInput(name))
def ClassifyTestWithBrowser(test_set, test):
name = test.id()
if (not args.positional_args
or _MatchesSelectedTest(name, args.positional_args,
args.exact_test_filter)):
assert hasattr(test, '_testMethodName')
method = getattr(
test, test._testMethodName) # pylint: disable=protected-access
should_skip, reason = decorators.ShouldSkip(method, possible_browser)
if should_skip and not args.run_disabled_tests:
test_set.tests_to_skip.append(typ.TestInput(name, msg=reason))
elif decorators.ShouldBeIsolated(method, possible_browser):
test_set.isolated_tests.append(typ.TestInput(name))
else:
test_set.parallel_tests.append(typ.TestInput(name))
if possible_browser:
return ClassifyTestWithBrowser
else:
return ClassifyTestWithoutBrowser
def _MatchesSelectedTest(name, selected_tests, selected_tests_are_exact):
if not selected_tests:
return False
if selected_tests_are_exact:
return any(name in selected_tests)
else:
return any(test in name for test in selected_tests)
def _SetUpProcess(child, context): # pylint: disable=unused-argument
ps_util.EnableListingStrayProcessesUponExitHook()
if binary_manager.NeedsInit():
# Typ doesn't keep the DependencyManager initialization in the child
# processes.
binary_manager.InitDependencyManager(context.client_config)
# We need to reset the handlers in case some other parts of telemetry already
# set it to make this work.
logging.getLogger().handlers = []
logging.basicConfig(
level=logging.INFO,
format='(%(levelname)s) %(asctime)s %(module)s.%(funcName)s:%(lineno)d '
'%(message)s')
args = context
if not args.disable_logging_config:
logging.getLogger().handlers = []
logging.basicConfig(
level=logging.INFO,
format='(%(levelname)s) %(asctime)s %(module)s.%(funcName)s:%(lineno)d'
' %(message)s')
if args.device and args.device == 'android':
android_devices = device_finder.GetDevicesMatchingOptions(args)
args.device = android_devices[child.worker_num-1].guid
options_for_unittests.Push(args)
def _TearDownProcess(child, context): # pylint: disable=unused-argument
# It's safe to call teardown_browser even if we did not start any browser
# in any of the tests.
browser_test_case.teardown_browser()
options_for_unittests.Pop()
if __name__ == '__main__':
ret_code = RunTestsCommand.main()
sys.exit(ret_code)
| |
import os
import shutil
from datetime import timedelta
from functools import partial
from itertools import chain
from operator import is_not
import numpy as np
import pandas as pd
import pytz
from catalyst import get_calendar
from catalyst.assets._assets import TradingPair
from catalyst.constants import DATE_TIME_FORMAT, AUTO_INGEST
from catalyst.constants import LOG_LEVEL
from catalyst.data.minute_bars import BcolzMinuteOverlappingData, \
BcolzMinuteBarMetadata
from catalyst.exchange.exchange_bcolz import BcolzExchangeBarReader, \
BcolzExchangeBarWriter
from catalyst.exchange.exchange_errors import EmptyValuesInBundleError, \
TempBundleNotFoundError, \
NoDataAvailableOnExchange, \
PricingDataNotLoadedError, DataCorruptionError, PricingDataValueError
from catalyst.exchange.utils.bundle_utils import range_in_bundle, \
get_bcolz_chunk, get_df_from_arrays, get_assets
from catalyst.exchange.utils.datetime_utils import get_start_dt, \
get_period_label, get_month_start_end, get_year_start_end
from catalyst.exchange.utils.exchange_utils import get_exchange_folder, \
save_exchange_symbols, mixin_market_params, get_catalyst_symbol
from catalyst.utils.cli import maybe_show_progress
from catalyst.utils.paths import ensure_directory
from logbook import Logger
from pytz import UTC
from six import itervalues
log = Logger('exchange_bundle', level=LOG_LEVEL)
BUNDLE_NAME_TEMPLATE = os.path.join('{root}', '{frequency}_bundle')
def _cachpath(symbol, type_):
return '-'.join([symbol, type_])
class ExchangeBundle:
def __init__(self, exchange_name):
self.exchange_name = exchange_name
self.minutes_per_day = 1440
self.default_ohlc_ratio = 1000000
self._writers = dict()
self._readers = dict()
self.calendar = get_calendar('OPEN')
self.exchange = None
def get_reader(self, data_frequency, path=None):
"""
Get a data writer object, either a new object or from cache
Returns
-------
BcolzMinuteBarReader | BcolzDailyBarReader
"""
if path is None:
root = get_exchange_folder(self.exchange_name)
path = BUNDLE_NAME_TEMPLATE.format(
root=root,
frequency=data_frequency
)
if path in self._readers and self._readers[path] is not None:
return self._readers[path]
try:
self._readers[path] = BcolzExchangeBarReader(
rootdir=path,
data_frequency=data_frequency
)
except IOError:
self._readers[path] = None
return self._readers[path]
def update_metadata(self, writer, start_dt, end_dt):
pass
def get_writer(self, start_dt, end_dt, data_frequency):
"""
Get a data writer object, either a new object or from cache
Returns
-------
BcolzMinuteBarWriter | BcolzDailyBarWriter
"""
root = get_exchange_folder(self.exchange_name)
path = BUNDLE_NAME_TEMPLATE.format(
root=root,
frequency=data_frequency
)
if path in self._writers:
return self._writers[path]
ensure_directory(path)
if len(os.listdir(path)) > 0:
metadata = BcolzMinuteBarMetadata.read(path)
write_metadata = False
if start_dt < metadata.start_session:
write_metadata = True
start_session = start_dt
else:
start_session = metadata.start_session
if end_dt > metadata.end_session:
write_metadata = True
end_session = end_dt
else:
end_session = metadata.end_session
self._writers[path] = \
BcolzExchangeBarWriter(
rootdir=path,
start_session=start_session,
end_session=end_session,
write_metadata=write_metadata,
data_frequency=data_frequency
)
else:
self._writers[path] = BcolzExchangeBarWriter(
rootdir=path,
start_session=start_dt,
end_session=end_dt,
write_metadata=True,
data_frequency=data_frequency
)
return self._writers[path]
def filter_existing_assets(self, assets, start_dt, end_dt, data_frequency):
"""
For each asset, get the close on the start and end dates of the chunk.
If the data exists, the chunk ingestion is complete.
If any data is missing we ingest the data.
Parameters
----------
assets: list[TradingPair]
The assets is scope.
start_dt: pd.Timestamp
The chunk start date.
end_dt: pd.Timestamp
The chunk end date.
data_frequency: str
Returns
-------
list[TradingPair]
The assets missing from the bundle
"""
reader = self.get_reader(data_frequency)
missing_assets = []
for asset in assets:
has_data = range_in_bundle(asset, start_dt, end_dt, reader)
if not has_data:
missing_assets.append(asset)
return missing_assets
def _write(self, data, writer, data_frequency):
try:
writer.write(
data=data,
show_progress=False,
invalid_data_behavior='raise'
)
except BcolzMinuteOverlappingData as e:
log.debug('chunk already exists: {}'.format(e))
except Exception as e:
log.warn('error when writing data: {}, trying again'.format(e))
# This is workaround, there is an issue with empty
# session_label when using a newly created writer
del self._writers[writer._rootdir]
writer = self.get_writer(writer._start_session,
writer._end_session, data_frequency)
writer.write(
data=data,
show_progress=False,
invalid_data_behavior='raise'
)
def get_calendar_periods_range(self, start_dt, end_dt, data_frequency):
"""
Get a list of dates for the specified range.
Parameters
----------
start_dt: pd.Timestamp
end_dt: pd.Timestamp
data_frequency: str
Returns
-------
list[datetime]
"""
return self.calendar.minutes_in_range(start_dt, end_dt) \
if data_frequency == 'minute' \
else self.calendar.sessions_in_range(start_dt, end_dt)
def _spot_empty_periods(self, ohlcv_df, asset, data_frequency,
empty_rows_behavior):
problems = []
nan_rows = ohlcv_df[ohlcv_df.isnull().T.any().T].index
if len(nan_rows) > 0:
dates = []
for row_date in nan_rows.values:
row_date = pd.to_datetime(row_date, utc=True)
if row_date > asset.start_date:
dates.append(row_date)
if len(dates) > 0:
end_dt = asset.end_minute if data_frequency == 'minute' \
else asset.end_daily
problem = '{name} ({start_dt} to {end_dt}) has empty ' \
'periods: {dates}'.format(
name=asset.symbol,
start_dt=asset.start_date.strftime(
DATE_TIME_FORMAT),
end_dt=end_dt.strftime(DATE_TIME_FORMAT),
dates=[date.strftime(
DATE_TIME_FORMAT) for date in dates])
if empty_rows_behavior == 'warn':
log.warn(problem)
elif empty_rows_behavior == 'raise':
raise EmptyValuesInBundleError(
name=asset.symbol,
end_minute=end_dt,
dates=dates, )
else:
ohlcv_df.dropna(inplace=True)
else:
problem = None
problems.append(problem)
return problems
def _spot_duplicates(self, ohlcv_df, asset, data_frequency, threshold):
# TODO: work in progress
series = ohlcv_df.reset_index().groupby('close')['index'].apply(
np.array
)
ref_delta = timedelta(minutes=1) if data_frequency == 'minute' \
else timedelta(days=1)
dups = series.loc[lambda values: [len(x) > 10 for x in values]]
for index, dates in dups.iteritems():
prev_date = None
for date in dates:
if prev_date is not None:
delta = (date - prev_date) / 1e9
if delta == ref_delta.seconds:
log.info('pex')
prev_date = date
problems = []
for index, dates in dups.iteritems():
end_dt = asset.end_minute if data_frequency == 'minute' \
else asset.end_daily
problem = '{name} ({start_dt} to {end_dt}) has {threshold} ' \
'identical close values on: {dates}'.format(
name=asset.symbol,
start_dt=asset.start_date.strftime(DATE_TIME_FORMAT),
end_dt=end_dt.strftime(DATE_TIME_FORMAT),
threshold=threshold,
dates=[pd.to_datetime(date).strftime(DATE_TIME_FORMAT)
for date in dates])
problems.append(problem)
return problems
def ingest_df(self, ohlcv_df, data_frequency, asset, writer,
empty_rows_behavior='warn', duplicates_threshold=None):
"""
Ingest a DataFrame of OHLCV data for a given market.
Parameters
----------
ohlcv_df: DataFrame
data_frequency: str
asset: TradingPair
writer:
empty_rows_behavior: str
"""
problems = []
if empty_rows_behavior is not 'ignore':
problems += self._spot_empty_periods(
ohlcv_df, asset, data_frequency, empty_rows_behavior
)
# if duplicates_threshold is not None:
# problems += self._spot_duplicates(
# ohlcv_df, asset, data_frequency, duplicates_threshold
# )
data = []
if not ohlcv_df.empty:
ohlcv_df.sort_index(inplace=True)
data.append((asset.sid, ohlcv_df))
self._write(data, writer, data_frequency)
return problems
def ingest_ctable(self, asset, data_frequency, period,
writer, empty_rows_behavior='strip',
duplicates_threshold=100, cleanup=False):
"""
Merge a ctable bundle chunk into the main bundle for the exchange.
Parameters
----------
asset: TradingPair
data_frequency: str
period: str
writer:
empty_rows_behavior: str
Ensure that the bundle does not have any missing data.
cleanup: bool
Remove the temp bundle directory after ingestion.
Returns
-------
list[str]
A list of problems which occurred during ingestion.
"""
problems = []
# Download and extract the bundle
path = get_bcolz_chunk(
exchange_name=self.exchange_name,
symbol=asset.symbol,
data_frequency=data_frequency,
period=period
)
reader = self.get_reader(data_frequency, path=path)
if reader is None:
try:
log.warn('the reader is unable to use bundle: {}, '
'deleting it.'.format(path))
shutil.rmtree(path)
except Exception as e:
log.warn('unable to remove temp bundle: {}'.format(e))
raise TempBundleNotFoundError(path=path)
start_dt = reader.first_trading_day
end_dt = reader.last_available_dt
if data_frequency == 'daily':
end_dt = end_dt - pd.Timedelta(hours=23, minutes=59)
arrays = None
try:
arrays = reader.load_raw_arrays(
sids=[asset.sid],
fields=['open', 'high', 'low', 'close', 'volume'],
start_dt=start_dt,
end_dt=end_dt
)
except Exception as e:
log.warn('skipping ctable for {} from {} to {}: {}'.format(
asset.symbol, start_dt, end_dt, e
))
if not arrays:
return reader._rootdir
periods = self.get_calendar_periods_range(
start_dt, end_dt, data_frequency
)
df = get_df_from_arrays(arrays, periods)
problems += self.ingest_df(
ohlcv_df=df,
data_frequency=data_frequency,
asset=asset,
writer=writer,
empty_rows_behavior=empty_rows_behavior,
duplicates_threshold=duplicates_threshold
)
if cleanup:
log.debug(
'removing bundle folder following ingestion: {}'.format(
reader._rootdir)
)
shutil.rmtree(reader._rootdir)
return filter(partial(is_not, None), problems)
def get_adj_dates(self, start, end, assets, data_frequency):
"""
Contains a date range to the trading availability of the specified
markets.
Parameters
----------
start: pd.Timestamp
end: pd.Timestamp
assets: list[TradingPair]
data_frequency: str
Returns
-------
pd.Timestamp, pd.Timestamp
"""
earliest_trade = None
last_entry = None
for asset in assets:
if earliest_trade is None or earliest_trade > asset.start_date:
if asset.start_date >= self.calendar.first_session:
earliest_trade = asset.start_date
else:
earliest_trade = self.calendar.first_session
end_asset = asset.end_minute if data_frequency == 'minute' else \
asset.end_daily
if end_asset is not None:
if last_entry is None or end_asset > last_entry:
last_entry = end_asset
else:
end = None
last_entry = None
if start is None or \
(earliest_trade is not None and earliest_trade > start):
start = earliest_trade
if last_entry is not None and (end is None or end > last_entry):
end = last_entry.replace(minute=59, hour=23) \
if data_frequency == 'minute' else last_entry
if end is None or start is None or start > end:
raise NoDataAvailableOnExchange(
exchange=[asset.exchange for asset in assets],
symbol=[asset.symbol for asset in assets],
data_frequency=data_frequency,
)
return start, end
def prepare_chunks(self, assets, data_frequency, start_dt, end_dt):
"""
Split a price data request into chunks corresponding to individual
bundles.
Parameters
----------
assets: list[TradingPair]
data_frequency: str
start_dt: pd.Timestamp
end_dt: pd.Timestamp
Returns
-------
dict[TradingPair, list[dict(str, Object]]]
"""
get_start_end = get_month_start_end \
if data_frequency == 'minute' else get_year_start_end
# Get a reader for the main bundle to verify if data exists
reader = self.get_reader(data_frequency)
chunks = dict()
for asset in assets:
try:
# Checking if the the asset has price data in the specified
# date range
adj_start, adj_end = self.get_adj_dates(
start_dt, end_dt, [asset], data_frequency
)
except NoDataAvailableOnExchange as e:
# If not, we continue to the next asset
log.debug('skipping {}: {}'.format(asset.symbol, e))
continue
dates = pd.date_range(
start=get_period_label(adj_start, data_frequency),
end=get_period_label(adj_end, data_frequency),
freq='MS' if data_frequency == 'minute' else 'AS',
tz=UTC
)
# Adjusting the last date of the range to avoid
# going over the asset's trading bounds
dates.values[0] = adj_start
dates.values[-1] = adj_end
chunks[asset] = []
for index, dt in enumerate(dates):
period_start, period_end = get_start_end(
dt=dt,
first_day=dt if index == 0 else None,
last_day=dt if index == len(dates) - 1 else None
)
# Currencies don't always start trading at midnight.
# Checking the last minute of the day instead.
range_start = period_start.replace(hour=23, minute=59) \
if data_frequency == 'minute' else period_start
# Checking if the data already exists in the bundle
# for the date range of the chunk. If not, we create
# a chunk for ingestion.
has_data = range_in_bundle(
asset, range_start, period_end, reader
)
if not has_data:
period = get_period_label(dt, data_frequency)
chunk = dict(
asset=asset,
period=period,
)
chunks[asset].append(chunk)
# We sort the chunks by end date to ingest most recent data first
chunks[asset].sort(
key=lambda chunk: pd.to_datetime(chunk['period'])
)
return chunks
def ingest_assets(self, assets, data_frequency, start_dt=None, end_dt=None,
show_progress=False, show_breakdown=False,
show_report=False):
"""
Determine if data is missing from the bundle and attempt to ingest it.
Parameters
----------
assets: list[TradingPair]
data_frequency: str
start_dt: pd.Timestamp
end_dt: pd.Timestamp
show_progress: bool
show_breakdown: bool
"""
if start_dt is None:
start_dt = self.calendar.first_session
if end_dt is None:
end_dt = pd.Timestamp.utcnow()
get_start_end = get_month_start_end \
if data_frequency == 'minute' else get_year_start_end
# Assign the first and last day of the period
start_dt, _ = get_start_end(start_dt)
_, end_dt = get_start_end(end_dt)
chunks = self.prepare_chunks(
assets=assets,
data_frequency=data_frequency,
start_dt=start_dt,
end_dt=end_dt
)
problems = []
# This is the common writer for the entire exchange bundle
# we want to give an end_date far in time
writer = self.get_writer(start_dt, end_dt, data_frequency)
if show_breakdown:
if chunks:
for asset in chunks:
with maybe_show_progress(
chunks[asset],
show_progress,
label='Ingesting {frequency} price data for '
'{symbol} on {exchange}'.format(
exchange=self.exchange_name,
frequency=data_frequency,
symbol=asset.symbol
)) as it:
for chunk in it:
problems += self.ingest_ctable(
asset=chunk['asset'],
data_frequency=data_frequency,
period=chunk['period'],
writer=writer,
empty_rows_behavior='strip',
cleanup=True
)
else:
all_chunks = list(chain.from_iterable(itervalues(chunks)))
# We sort the chunks by end date to ingest most recent data first
if all_chunks:
all_chunks.sort(
key=lambda chunk: pd.to_datetime(chunk['period'])
)
with maybe_show_progress(
all_chunks,
show_progress,
label='Ingesting {frequency} price data on '
'{exchange}'.format(
exchange=self.exchange_name,
frequency=data_frequency,
)) as it:
for chunk in it:
problems += self.ingest_ctable(
asset=chunk['asset'],
data_frequency=data_frequency,
period=chunk['period'],
writer=writer,
empty_rows_behavior='strip',
cleanup=True
)
if show_report and len(problems) > 0:
log.info('problems during ingestion:{}\n'.format(
'\n'.join(problems)
))
def ingest_csv(self, path, data_frequency, empty_rows_behavior='strip',
duplicates_threshold=100):
"""
Ingest price data from a CSV file.
Parameters
----------
path: str
data_frequency: str
Returns
-------
list[str]
A list of potential problems detected during ingestion.
"""
log.info('ingesting csv file: {}'.format(path))
if self.exchange is None:
# Avoid circular dependencies
from catalyst.exchange.utils.factory import get_exchange
self.exchange = get_exchange(self.exchange_name)
problems = []
df = pd.read_csv(
path,
header=0,
sep=',',
dtype=dict(
symbol=np.object_,
last_traded=np.object_,
open=np.float64,
high=np.float64,
low=np.float64,
close=np.float64,
volume=np.float64
),
parse_dates=['last_traded'],
index_col=None
)
min_start_dt = None
max_end_dt = None
symbols = df['symbol'].unique()
# Apply the timezone before creating an index for simplicity
df['last_traded'] = df['last_traded'].dt.tz_localize(pytz.UTC)
df.set_index(['symbol', 'last_traded'], drop=True, inplace=True)
assets = dict()
for symbol in symbols:
start_dt = df.index.get_level_values(1).min()
end_dt = df.index.get_level_values(1).max()
end_dt_key = 'end_{}'.format(data_frequency)
market = self.exchange.get_market(symbol)
if market is None:
raise ValueError('symbol not available in the exchange.')
params = dict(
exchange=self.exchange.name,
data_source='local',
exchange_symbol=market['id'],
)
mixin_market_params(self.exchange_name, params, market)
asset_def = self.exchange.get_asset_def(market, True)
if asset_def is not None:
params['symbol'] = asset_def['symbol']
params['start_date'] = asset_def['start_date'] \
if asset_def['start_date'] < start_dt else start_dt
params['end_date'] = asset_def[end_dt_key] \
if asset_def[end_dt_key] > end_dt else end_dt
params['end_daily'] = end_dt \
if data_frequency == 'daily' else asset_def['end_daily']
params['end_minute'] = end_dt \
if data_frequency == 'minute' else asset_def['end_minute']
else:
params['symbol'] = get_catalyst_symbol(market)
params['end_daily'] = end_dt \
if data_frequency == 'daily' else 'N/A'
params['end_minute'] = end_dt \
if data_frequency == 'minute' else 'N/A'
if min_start_dt is None or start_dt < min_start_dt:
min_start_dt = start_dt
if max_end_dt is None or end_dt > max_end_dt:
max_end_dt = end_dt
asset = TradingPair(**params)
assets[market['id']] = asset
save_exchange_symbols(self.exchange_name, assets, True)
writer = self.get_writer(
start_dt=min_start_dt.replace(hour=00, minute=00),
end_dt=max_end_dt.replace(hour=23, minute=59),
data_frequency=data_frequency
)
for symbol in assets:
# here the symbol is the market['id']
asset = assets[symbol]
ohlcv_df = df.loc[
(df.index.get_level_values(0) == asset.symbol)
] # type: pd.DataFrame
ohlcv_df.index = ohlcv_df.index.droplevel(0)
period_start = start_dt.replace(hour=00, minute=00)
period_end = end_dt.replace(hour=23, minute=59)
periods = self.get_calendar_periods_range(
period_start, period_end, data_frequency
)
# We're not really resampling but ensuring that each frame
# contains data
ohlcv_df = ohlcv_df.reindex(periods, method='ffill')
ohlcv_df['volume'] = ohlcv_df['volume'].fillna(0)
problems += self.ingest_df(
ohlcv_df=ohlcv_df,
data_frequency=data_frequency,
asset=asset,
writer=writer,
empty_rows_behavior=empty_rows_behavior,
duplicates_threshold=duplicates_threshold
)
return filter(partial(is_not, None), problems)
def ingest(self, data_frequency, include_symbols=None,
exclude_symbols=None, start=None, end=None, csv=None,
show_progress=True, show_breakdown=True, show_report=True):
"""
Inject data based on specified parameters.
Parameters
----------
data_frequency: str
include_symbols: str
exclude_symbols: str
start: pd.Timestamp
end: pd.Timestamp
show_progress: bool
environ:
"""
if csv is not None:
self.ingest_csv(csv, data_frequency)
else:
if self.exchange is None:
# Avoid circular dependencies
from catalyst.exchange.utils.factory import get_exchange
self.exchange = get_exchange(self.exchange_name)
assets = get_assets(
self.exchange, include_symbols, exclude_symbols
)
for frequency in data_frequency.split(','):
self.ingest_assets(
assets=assets,
data_frequency=frequency,
start_dt=start,
end_dt=end,
show_progress=show_progress,
show_breakdown=show_breakdown,
show_report=show_report
)
def get_history_window_series_and_load(self,
assets,
end_dt,
bar_count,
field,
data_frequency,
algo_end_dt=None,
force_auto_ingest=False
):
"""
Retrieve price data history, ingest missing data.
Parameters
----------
assets: list[TradingPair]
end_dt: pd.Timestamp
bar_count: int
field: str
data_frequency: str
algo_end_dt: pd.Timestamp
force_auto_ingest:
Returns
-------
Series
"""
if AUTO_INGEST or force_auto_ingest:
try:
series = self.get_history_window_series(
assets=assets,
end_dt=end_dt,
bar_count=bar_count,
field=field,
data_frequency=data_frequency,
)
return pd.DataFrame(series)
except PricingDataNotLoadedError:
start_dt = get_start_dt(end_dt, bar_count, data_frequency)
log.info(
'pricing data for {symbol} not found in range '
'{start} to {end}, updating the bundles.'.format(
symbol=[asset.symbol for asset in assets],
start=start_dt,
end=end_dt
)
)
self.ingest_assets(
assets=assets,
start_dt=start_dt,
end_dt=algo_end_dt, # TODO: apply trailing bars
data_frequency=data_frequency,
show_progress=True,
show_breakdown=True
)
series = self.get_history_window_series(
assets=assets,
end_dt=end_dt,
bar_count=bar_count,
field=field,
data_frequency=data_frequency,
reset_reader=True,
)
return series
else:
series = self.get_history_window_series(
assets=assets,
end_dt=end_dt,
bar_count=bar_count,
field=field,
data_frequency=data_frequency,
)
return pd.DataFrame(series)
def get_spot_values(self,
assets,
field,
dt,
data_frequency,
reset_reader=False
):
"""
The spot values for the gives assets, field and date. Reads from
the exchange data bundle.
Parameters
----------
assets: list[TradingPair]
field: str
dt: pd.Timestamp
data_frequency: str
reset_reader:
Returns
-------
float
"""
values = []
try:
reader = self.get_reader(data_frequency)
if reset_reader:
del self._readers[reader._rootdir]
reader = self.get_reader(data_frequency)
for asset in assets:
value = reader.get_value(
sid=asset.sid,
dt=dt,
field=field
)
values.append(value)
return values
except Exception:
symbols = [asset.symbol for asset in assets]
raise PricingDataNotLoadedError(
field=field,
first_trading_day=min([asset.start_date for asset in assets]),
exchange=self.exchange_name,
symbols=symbols,
symbol_list=','.join(symbols),
data_frequency=data_frequency,
start_dt=dt,
end_dt=dt
)
def get_history_window_series(self,
assets,
end_dt,
bar_count,
field,
data_frequency,
reset_reader=False):
start_dt = get_start_dt(end_dt, bar_count, data_frequency, False)
start_dt, _ = self.get_adj_dates(
start_dt, end_dt, assets, data_frequency
)
# This is an attempt to resolve some caching with the reader
# when auto-ingesting data.
# TODO: needs more work
reader = self.get_reader(data_frequency)
if reset_reader:
del self._readers[reader._rootdir]
reader = self.get_reader(data_frequency)
if reader is None:
symbols = [asset.symbol for asset in assets]
raise PricingDataNotLoadedError(
field=field,
first_trading_day=min([asset.start_date for asset in assets]),
exchange=self.exchange_name,
symbols=symbols,
symbol_list=','.join(symbols),
data_frequency=data_frequency,
start_dt=start_dt,
end_dt=end_dt
)
series = dict()
for asset in assets:
asset_start_dt, _ = self.get_adj_dates(
start_dt, end_dt, assets, data_frequency
)
in_bundle = range_in_bundle(
asset, asset_start_dt, end_dt, reader
)
if not in_bundle:
raise PricingDataNotLoadedError(
field=field,
first_trading_day=asset.start_date,
exchange=self.exchange_name,
symbols=asset.symbol,
symbol_list=asset.symbol,
data_frequency=data_frequency,
start_dt=asset_start_dt,
end_dt=end_dt
)
periods = self.get_calendar_periods_range(
asset_start_dt, end_dt, data_frequency
)
# This does not behave well when requesting multiple assets
# when the start or end date of one asset is outside of the range
# looking at the logic in load_raw_arrays(), we are not achieving
# any performance gain by requesting multiple sids at once. It's
# looping through the sids and making separate requests anyway.
arrays = reader.load_raw_arrays(
sids=[asset.sid],
fields=[field],
start_dt=start_dt,
end_dt=end_dt
)
if len(arrays) == 0:
raise DataCorruptionError(
exchange=self.exchange_name,
symbols=asset.symbol,
start_dt=asset_start_dt,
end_dt=end_dt
)
field_values = arrays[0][:, 0]
try:
value_series = pd.Series(field_values, index=periods)
series[asset] = value_series
except ValueError as e:
raise PricingDataValueError(
exchange=asset.exchange,
symbol=asset.symbol,
start_dt=asset_start_dt,
end_dt=end_dt,
error=e
)
return series
def clean(self, data_frequency):
"""
Removing the bundle data from the catalyst folder.
Parameters
----------
data_frequency: str
"""
log.debug('cleaning exchange {}, frequency {}'.format(
self.exchange_name, data_frequency
))
root = get_exchange_folder(self.exchange_name)
symbols = os.path.join(root, 'symbols.json')
if os.path.isfile(symbols):
os.remove(symbols)
local_symbols = os.path.join(root, 'symbols_local.json')
if os.path.isfile(local_symbols):
os.remove(local_symbols)
temp_bundles = os.path.join(root, 'temp_bundles')
if os.path.isdir(temp_bundles):
log.debug('removing folder and content: {}'.format(temp_bundles))
shutil.rmtree(temp_bundles)
log.debug('{} removed'.format(temp_bundles))
frequencies = ['daily', 'minute'] if data_frequency is None \
else [data_frequency]
for frequency in frequencies:
label = '{}_bundle'.format(frequency)
frequency_bundle = os.path.join(root, label)
if os.path.isdir(frequency_bundle):
log.debug(
'removing folder and content: {}'.format(frequency_bundle)
)
shutil.rmtree(frequency_bundle)
log.debug('{} removed'.format(frequency_bundle))
| |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resource printer base class.
Each printer has three main attributes, all accessible as strings in the
--format='NAME[ATTRIBUTES](PROJECTION)' option:
NAME: str, The printer name.
[ATTRIBUTES]: str, An optional [no-]name[=value] list of attributes. Unknown
attributes are silently ignored. Attributes are added to a printer local
dict indexed by name.
(PROJECTION): str, List of resource names to be included in the output
resource. Unknown names are silently ignored. Resource names are
'.'-separated key identifiers with an implicit top level resource name.
Example:
gcloud compute instances list \
--format='table[box](name, networkInterfaces[0].networkIP)'
"""
from googlecloudsdk.core import exceptions as core_exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core.resource import resource_projector
from googlecloudsdk.core.resource import resource_property
# Structured output indentation.
STRUCTURED_INDENTATION = 2
class Error(core_exceptions.Error):
"""Exceptions for this module."""
class ProjectionRequiredError(Error):
"""Format missing required projection exception."""
class _ResourceMarker(object):
"""A marker that can be injected into resource lists."""
def Act(self, printer):
"""Called by ResourcePrinter.Addrecord().
Args:
printer: The printer object.
"""
pass
class FinishMarker(_ResourceMarker):
"""A resource list Finish marker."""
def Act(self, printer):
printer.Finish()
class PageMarker(_ResourceMarker):
"""A resource list Page marker."""
def Act(self, printer):
printer.Page()
def IsResourceMarker(resource):
"""Returns True if resource is a _ResourceMarker."""
return isinstance(resource, _ResourceMarker)
class ResourcePrinter(object):
"""Base class for printing JSON-serializable Python objects.
Attributes:
attributes: Optional printer attribute dict indexed by attribute name.
_by_columns: True if AddRecord() expects a list of columns.
column_attributes: Projection ColumnAttributes().
_console_attr: The console attributes. May be ignored by some printers.
_empty: True if there are no records.
_heading: The list of column heading label strings.
_is_legend_done: True if AddLegend() has already been called and there have
been no more AddRecord() calls since then.
_name: Format name.
_non_empty_projection_required: True if the printer requires a non-empty
projection.
_out: Output stream.
_process_record: The function called to process each record passed to
AddRecord() before calling _AddRecord(). It is called like this:
record = process_record(record)
_printer: The resource_printer.Printer method for nested formats.
Printer attributes:
empty-legend=_SENTENCES_: Prints _SENTENCES_ to the *status* logger if there
are no items. The default *empty-legend* is "Listed 0 items.".
*no-empty-legend* disables the default.
legend=_SENTENCES_: Prints _SENTENCES_ to the *out* logger after the last
item if there is at least one item.
legend-log=_TYPE_: Prints the legend to the _TYPE_ logger instead of the
default. _TYPE_ may be: *out* (the default), *status* (standard error),
*debug*, *info*, *warn*, or *error*.
"""
def __init__(self, out=None, name=None, attributes=None,
column_attributes=None, by_columns=False, process_record=None,
non_empty_projection_required=False, printer=None,
console_attr=None):
"""Constructor.
Args:
out: The output stream, log.out if None. If the 'private' attribute is set
and the output stream is a log._ConsoleWriter then the underlying stream
is used instead to disable output to the log file.
name: The format name.
attributes: Optional printer attribute dict indexed by attribute name.
column_attributes: Projection ColumnAttributes().
by_columns: True if AddRecord() expects a list of columns.
process_record: The function called to process each record passed to
AddRecord() before calling _AddRecord(). It is called like this:
record = process_record(record)
non_empty_projection_required: True if the printer requires a non-empty
projection.
printer: The resource_printer.Printer method for nested formats.
console_attr: The console attributes for the output stream. Ignored by
some printers. If None then printers that require it will initialize it
to match out.
"""
self.attributes = attributes or {}
self._by_columns = by_columns
self.column_attributes = column_attributes
self._console_attr = console_attr
self._empty = True
self._heading = None
self._is_legend_done = False
self._name = name
self._non_empty_projection_required = non_empty_projection_required
self._out = out or log.out
if 'private' in self.attributes:
try:
# Disable log file writes by printing directly to the console stream.
self._out = self._out.GetConsoleWriterStream()
except AttributeError:
pass
self._process_record = (process_record or
resource_projector.Compile().Evaluate)
self._printer = printer
def AddHeading(self, heading):
"""Overrides the default heading.
If the printer does not support headings then this is a no-op.
Args:
heading: List of column heading strings that overrides the default
heading.
"""
self._heading = heading
def _AddRecord(self, record, delimit=True):
"""Format specific AddRecord().
Args:
record: A JSON-serializable object.
delimit: Prints resource delimiters if True.
"""
pass
def AddRecord(self, record, delimit=True):
"""Adds a record for printing.
Streaming formats (e.g., YAML) can print results at each AddRecord() call.
Non-streaming formats (e.g., JSON, table(...)) may cache data at each
AddRecord() call and not print until Finish() is called.
Args:
record: A JSON-serializable object.
delimit: Prints resource delimiters if True.
"""
if IsResourceMarker(record):
record.Act(self)
else:
self._empty = False
# More records enables the legend to be printed multiple times.
self._is_legend_done = False
self._AddRecord(self._process_record(record), delimit)
def AddLegend(self):
"""Prints the table legend if it was specified.
The legend is one or more lines of text printed after the table data.
"""
if self._is_legend_done:
return
self._is_legend_done = True
writers = {
'out': lambda x: self._out.write(x + '\n'),
'status': lambda x: log.status.write(x + '\n'),
'debug': log.debug,
'info': log.info,
'warn': log.warn,
'error': log.error,
}
log_type = self.attributes.get('legend-log')
# TODO(user): drop the 'log' check when the log=TYPE attribute is added.
if not log_type:
log_type = self.attributes.get('log')
if log_type:
log.warn('[log={0}] is deprecated. '
'Use [legend-log={0}] instead.'.format(log_type))
if self._empty:
if not log_type:
log_type = 'status'
legend = self.attributes.get('empty-legend')
if legend is None and 'no-empty-legend' not in self.attributes:
legend = 'Listed 0 items.'
else:
legend = self.attributes.get('legend')
if legend and not log_type:
legend = '\n' + legend
if legend is not None:
writer = writers.get(log_type or 'out')
writer(legend)
def ByColumns(self):
"""Returns True if AddRecord() expects a list of columns.
Returns:
True if AddRecord() expects a list of columns.
"""
return self._by_columns
def Finish(self):
"""Prints the results for non-streaming formats."""
pass
def Page(self):
"""Flushes intermediate results for streaming formats."""
pass
def PrintSingleRecord(self, record):
"""Print one record by itself.
Args:
record: A JSON-serializable object.
"""
self.AddRecord(record, delimit=False)
self.Finish()
def Print(self, resources, single=False, intermediate=False):
"""Prints resources using printer.AddRecord() and printer.Finish().
Args:
resources: A singleton or list of JSON-serializable Python objects.
single: If True then resources is a single item and not a list.
For example, use this to print a single object as JSON.
intermediate: This is an intermediate call, do not call Finish().
Raises:
ProjectionRequiredError: If the projection is empty and the format
requires a non-empty projection.
"""
if 'disable' in self.attributes:
# Disable formatted output and do not consume the resources.
return
if (self._non_empty_projection_required and
not self.column_attributes.Columns()):
raise ProjectionRequiredError(
'Format [{0}] requires a non-empty projection.'.format(
self.column_attributes.Name()))
# Resources may be a generator and since generators can raise exceptions, we
# have to call Finish() in the finally block to make sure that the resources
# we've been able to pull out of the generator are printed before control is
# given to the exception-handling code.
try:
if resources:
if single or not resource_property.IsListLike(resources):
self.AddRecord(resources, delimit=intermediate)
else:
for resource in resources:
self.AddRecord(resource)
finally:
if not intermediate:
self.Finish()
def Printer(self, *args, **kwargs):
"""Calls the resource_printer.Printer() method (for nested printers)."""
return self._printer(*args, **kwargs)
| |
import mock
import sys
from cron_sentry.runner import CommandReporter, DEFAULT_MAX_MESSAGE_LENGTH, run, parser
@mock.patch('cron_sentry.runner.Client')
def test_command_reporter_accepts_parameters(ClientMock):
reporter = CommandReporter(['date', '--invalid-option'], 'http://testdsn', DEFAULT_MAX_MESSAGE_LENGTH)
reporter.run()
client = ClientMock()
assert client.captureMessage.called
@mock.patch('cron_sentry.runner.Client')
def test_command_reporter_works_with_no_params_commands(ClientMock):
reporter = CommandReporter(['date'], 'http://testdsn', DEFAULT_MAX_MESSAGE_LENGTH)
reporter.run()
client = ClientMock()
assert not client.captureMessage.called
@mock.patch('cron_sentry.runner.sys')
@mock.patch('cron_sentry.runner.Client')
def test_command_reporter_keeps_stdout_and_stderr(ClientMock, sys_mock):
command = [sys.executable, '-c', """
import sys
sys.stdout.write("test-out")
sys.stderr.write("test-err")
sys.exit(2)
"""]
reporter = CommandReporter(command, 'http://testdsn', DEFAULT_MAX_MESSAGE_LENGTH)
client = ClientMock()
reporter.run()
sys_mock.stdout.write.assert_called_with('test-out')
sys_mock.stderr.write.assert_called_with('test-err')
client.captureMessage.assert_called_with(
mock.ANY,
time_spent=mock.ANY,
data=mock.ANY,
extra={
'command': command,
'exit_status': 2,
"last_lines_stdout": "test-out",
"last_lines_stderr": "test-err",
})
@mock.patch('cron_sentry.runner.sys')
@mock.patch('cron_sentry.runner.Client')
def test_reports_correctly_to_with_long_messages_but_trims_stdout_and_stderr(ClientMock, sys_mock):
command = [sys.executable, '-c', """
import sys
sys.stdout.write("a" * 20000)
sys.stderr.write("b" * 20000)
sys.exit(2)
"""]
reporter = CommandReporter(command, 'http://testdsn', DEFAULT_MAX_MESSAGE_LENGTH)
client = ClientMock()
reporter.run()
expected_stdout = '...{0}'.format('a' * (DEFAULT_MAX_MESSAGE_LENGTH - 3))
expected_stderr = '...{0}'.format('b' * (DEFAULT_MAX_MESSAGE_LENGTH - 3))
sys_mock.stdout.write.assert_called_with(expected_stdout)
sys_mock.stderr.write.assert_called_with(expected_stderr)
client.captureMessage.assert_called_with(
mock.ANY,
time_spent=mock.ANY,
data=mock.ANY,
extra={
'command': command,
'exit_status': 2,
"last_lines_stdout": expected_stdout,
"last_lines_stderr": expected_stderr,
})
@mock.patch('cron_sentry.runner.sys')
@mock.patch('cron_sentry.runner.CommandReporter')
def test_command_line_should_support_command_args_without_double_dashes(CommandReporterMock, sys_mock):
command = ['--dsn', 'http://testdsn', 'command', '--arg1', 'value1', '--arg2', 'value2']
run(command)
CommandReporterMock.assert_called_with(
cmd=command[2:],
dsn='http://testdsn',
max_message_length=DEFAULT_MAX_MESSAGE_LENGTH,
quiet=False
)
@mock.patch('cron_sentry.runner.sys')
@mock.patch('cron_sentry.runner.CommandReporter')
def test_command_line_should_support_command_with_double_dashes(CommandReporterMock, sys_mock):
command = ['--dsn', 'http://testdsn', '--', 'command', '--arg1', 'value1', '--arg2', 'value2']
run(command)
CommandReporterMock.assert_called_with(
cmd=command[3:],
dsn='http://testdsn',
max_message_length=DEFAULT_MAX_MESSAGE_LENGTH,
quiet=False
)
@mock.patch('cron_sentry.runner.sys')
@mock.patch('argparse._sys')
@mock.patch('cron_sentry.runner.CommandReporter')
def test_should_display_help_text_and_exit_with_1_if_no_command_is_specified(CommandReporterMock, argparse_sys, cron_sentry_sys):
command = []
run(command)
cron_sentry_sys.stderr.write.assert_called_with("ERROR: Missing command parameter!\n")
argparse_sys.stdout.write.assert_called_with(parser.format_usage())
cron_sentry_sys.exit.assert_called_with(1)
assert not CommandReporterMock.called
@mock.patch('cron_sentry.runner.sys')
@mock.patch('cron_sentry.runner.Client')
def test_exit_status_code_should_be_preserved(ClientMock, sys_mock):
command = [sys.executable, '-c', 'import sys; sys.exit(123)']
run(command)
sys_mock.exit.assert_called_with(123)
@mock.patch('cron_sentry.runner.sys')
@mock.patch('cron_sentry.runner.Client')
def test_should_trim_stdout_and_stderr_based_on_command_line(ClientMock, sys_mock):
command = [
'--dsn', 'http://testdsn',
'--max-message-length', '100',
sys.executable, '-c', """
import sys
sys.stdout.write("a" * 20000 + "end")
sys.stderr.write("b" * 20000 + "end")
sys.exit(2)
"""]
run(command)
# -3 refers to "..." and "end"
expected_stdout = '...{0}end'.format('a' * (100 - 3 - 3))
expected_stderr = '...{0}end'.format('b' * (100 - 3 - 3))
sys_mock.stdout.write.assert_called_with(expected_stdout)
sys_mock.stderr.write.assert_called_with(expected_stderr)
client = ClientMock()
client.captureMessage.assert_called_with(
mock.ANY,
time_spent=mock.ANY,
data=mock.ANY,
extra={
'command': mock.ANY,
'exit_status': mock.ANY,
"last_lines_stdout": expected_stdout,
"last_lines_stderr": expected_stderr,
})
@mock.patch('cron_sentry.runner.sys')
@mock.patch('cron_sentry.runner.Client')
def test_should_suppress_stdout_and_stderr_based_on_command_line(ClientMock, sys_mock):
command = [
'--dsn', 'http://testdsn',
'--quiet',
sys.executable, '-c', """
import sys
sys.stdout.write("a" * 100 + "end")
sys.stderr.write("b" * 100 + "end")
sys.exit(2)
"""]
run(command)
expected_stdout = "a" * 100 + "end"
expected_stderr = "b" * 100 + "end"
assert not sys_mock.stdout.write.called
assert not sys_mock.stderr.write.called
client = ClientMock()
client.captureMessage.assert_called_with(
mock.ANY,
time_spent=mock.ANY,
data=mock.ANY,
extra={
'command': mock.ANY,
'exit_status': mock.ANY,
"last_lines_stdout": expected_stdout,
"last_lines_stderr": expected_stderr,
})
| |
import json
import re
from hearthbreaker.cards.base import MinionCard, WeaponCard
from hearthbreaker.game_objects import Weapon, Minion
import tests.card_tests.druid_tests
import tests.card_tests.mage_tests
import tests.card_tests.hunter_tests
import tests.card_tests.paladin_tests
import tests.card_tests.priest_tests
import tests.card_tests.rogue_tests
import tests.card_tests.shaman_tests
import tests.card_tests.warlock_tests
import tests.card_tests.warrior_tests
import tests.card_tests.neutral_tests
with open("card_defs.json", "r") as file:
cards = json.load(file)
class JSONTester:
def define_type(self, card_def):
from hearthbreaker.constants import CHARACTER_CLASS, MINION_TYPE, CARD_RARITY
from hearthbreaker.tags.base import Battlecry, Choice, Deathrattle, Effect, Aura, Buff
import hearthbreaker.cards
def __init__(self):
init_dict = {
'name': card_def['name'],
'mana': card_def['mana'],
'rarity': CARD_RARITY.from_str(card_def['rarity'])
}
if 'character_class' in card_def:
init_dict['character_class'] = CHARACTER_CLASS.from_str(card_def['character_class'])
if 'minion_type' in card_def:
init_dict['minion_type'] = MINION_TYPE.from_str(card_def['minion_type'])
if 'battlecry' in card_def:
init_dict['battlecry'] = tuple(Battlecry.from_json(**battlecry) for battlecry in card_def['battlecry'])
if 'choices' in card_def:
init_dict['choices'] = [Choice.from_json(**choice) for choice in card_def['choices']]
if 'combo' in card_def:
init_dict['combo'] = Battlecry.from_json(**card_def['combo'])
if 'overload' in card_def:
init_dict['overload'] = card_def['overload']
if 'buffs' in card_def:
init_dict['buffs'] = [Buff.from_json(**buff) for buff in card_def['buffs']]
if 'auras' in card_def:
init_dict['auras'] = [Aura.from_json(**aura) for aura in card_def['auras']]
if 'effects' in card_def:
init_dict['effects'] = [Effect.from_json(**effect) for effect in card_def['effects']]
MinionCard.__init__(self, **init_dict)
def __init_weapon__(self):
init_dict = {
'name': card_def['name'],
'mana': card_def['mana'],
'rarity': CARD_RARITY.from_str(card_def['rarity'])
}
if 'character_class' in card_def:
init_dict['character_class'] = CHARACTER_CLASS.from_str(card_def['character_class'])
if 'battlecry' in card_def:
init_dict['battlecry'] = Battlecry.from_json(**card_def['battlecry'])
if 'combo' in card_def:
init_dict['combo'] = Battlecry.from_json(**card_def['combo'])
if 'overload' in card_def:
init_dict['overload'] = card_def['overload']
if 'buffs' in card_def:
init_dict['buffs'] = [Buff.from_json(**buff) for buff in card_def['buffs']]
if 'auras' in card_def:
init_dict['auras'] = [Aura.from_json(**aura) for aura in card_def['auras']]
if 'effects' in card_def:
init_dict['effects'] = [Effect.from_json(**effect) for effect in card_def['effects']]
WeaponCard.__init__(self, **init_dict)
def create_minion(self, player):
create_dict = {
'attack': card_def['attack'],
'health': card_def['health']
}
if "impl" in card_def:
impl = card_def['impl']
if 'effects' in impl:
create_dict['effects'] = [Effect.from_json(**effect) for effect in impl['effects']]
if 'auras' in impl:
create_dict['auras'] = [Aura.from_json(**aura) for aura in impl['auras']]
if 'buffs' in impl:
create_dict['buffs'] = [Buff.from_json(**buff) for buff in impl['buffs']]
if 'enrage' in card_def:
create_dict['enrage'] = [Aura.from_json(**enrage) for enrage in card_def['enrage']]
if 'deathrattle' in card_def:
create_dict['deathrattle'] = [Deathrattle.from_json(**deathrattle)
for deathrattle in card_def['deathrattle']]
return Minion(**create_dict)
def create_weapon(self, player):
create_dict = {
'attack_power': card_def['attack'],
'durability': card_def['durability']
}
if "impl" in card_def:
impl = card_def['impl']
if 'effects' in impl:
create_dict['effects'] = [Effect.from_json(**effect) for effect in impl['effects']]
if 'auras' in impl:
create_dict['auras'] = [Aura.from_json(**aura) for aura in impl['auras']]
if 'buffs' in impl:
create_dict['buffs'] = [Buff.from_json(**buff) for buff in impl['buffs']]
if 'deathrattle' in card_def:
create_dict['deathrattle'] = Deathrattle.from_json(**card_def['deathrattle'])
return Weapon(**create_dict)
if card_def['rarity'] != "Special":
if 'ref_name' in card_def:
name = card_def['ref_name']
else:
name = card_def['name']
name = re.sub("[:'. ()-]", "", name)
name = "".join([word[0].upper() + word[1:] for word in name.split()])
cls_def = getattr(hearthbreaker.cards, name, None)
if cls_def:
if card_def['type'] == 'minion':
self.old_attrs[name] = {
'init': cls_def.__init__,
'create_minion': cls_def.create_minion
}
cls_def.__init__ = __init__
cls_def.create_minion = create_minion
elif card_def['type'] == 'weapon':
self.old_attrs[name] = {
'init': cls_def.__init__,
'create_weapon': cls_def.create_weapon
}
cls_def.__init__ = __init_weapon__
cls_def.create_weapon = create_weapon
def setUp(self):
super().setUp()
self.old_attrs = {}
for card in cards:
self.define_type(card)
def tearDown(self):
super().tearDown()
import hearthbreaker.cards
for name, defn in self.old_attrs.items():
cls_def = getattr(hearthbreaker.cards, name)
if 'create_minion' in defn:
cls_def.__init__ = defn['init']
cls_def.create_minion = defn['create_minion']
else:
cls_def.__init__ = defn['init']
cls_def.create_weapon = defn['create_weapon']
class TestJSONDruid(JSONTester, tests.card_tests.druid_tests.TestDruid):
pass
class TestJSONMage(JSONTester, tests.card_tests.mage_tests.TestMage):
pass
class TestJSONHunter(JSONTester, tests.card_tests.hunter_tests.TestHunter):
pass
class TestJSONPaladin(JSONTester, tests.card_tests.paladin_tests.TestPaladin):
pass
class TestJSONPriest(JSONTester, tests.card_tests.priest_tests.TestPriest):
pass
class TestJSONRogue(JSONTester, tests.card_tests.rogue_tests.TestRogue):
pass
class TestJSONShaman(JSONTester, tests.card_tests.shaman_tests.TestShaman):
pass
class TestJSONWarlock(JSONTester, tests.card_tests.warlock_tests.TestWarlock):
pass
class TestJSONWarrior(JSONTester, tests.card_tests.warrior_tests.TestWarrior):
pass
class TestJSONNeutral(JSONTester, tests.card_tests.neutral_tests.TestCommon):
pass
| |
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Haiku module implementing hierarchical attention over memory."""
import functools
import inspect
from typing import Optional, NamedTuple
import chex
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
_EPSILON = 1e-3
class HierarchicalMemory(NamedTuple):
"""Structure of the hierarchical memory.
Where 'B' is batch size, 'M' is number of memories, 'C' is chunk size, and 'D'
is memory dimension.
"""
keys: jnp.ndarray # [B, M, D]
contents: jnp.ndarray # [B, M, C, D]
steps_since_last_write: jnp.ndarray # [B], steps since last memory write
accumulator: jnp.ndarray # [B, C, D], accumulates experiences before write
def sinusoid_position_encoding(
sequence_length: int,
hidden_size: int,
min_timescale: float = 2.,
max_timescale: float = 1e4,
) -> jnp.ndarray:
"""Creates sinusoidal encodings.
Args:
sequence_length: length [L] of sequence to be position encoded.
hidden_size: dimension [D] of the positional encoding vectors.
min_timescale: minimum timescale for the frequency.
max_timescale: maximum timescale for the frequency.
Returns:
An array of shape [L, D]
"""
freqs = np.arange(0, hidden_size, min_timescale)
inv_freq = max_timescale**(-freqs / hidden_size)
pos_seq = np.arange(sequence_length - 1, -1, -1.0)
sinusoid_inp = np.einsum("i,j->ij", pos_seq, inv_freq)
pos_emb = np.concatenate(
[np.sin(sinusoid_inp), np.cos(sinusoid_inp)], axis=-1)
return pos_emb
class HierarchicalMemoryAttention(hk.Module):
"""Multi-head attention over hierarchical memory."""
def __init__(self,
feature_size: int,
k: int,
num_heads: int = 1,
memory_position_encoding: bool = True,
init_scale: float = 2.,
name: Optional[str] = None) -> None:
"""Constructor.
Args:
feature_size: size of feature dimension of attention-over-memories
embedding.
k: number of memories to sample.
num_heads: number of attention heads.
memory_position_encoding: whether to add positional encodings to memories
during within memory attention.
init_scale: scale factor for Variance weight initializers.
name: module name.
"""
super().__init__(name=name)
self._size = feature_size
self._k = k
self._num_heads = num_heads
self._weights = None
self._memory_position_encoding = memory_position_encoding
self._init_scale = init_scale
@property
def num_heads(self):
return self._num_heads
@hk.transparent
def _singlehead_linear(self,
inputs: jnp.ndarray,
hidden_size: int,
name: str):
linear = hk.Linear(
hidden_size,
with_bias=False,
w_init=hk.initializers.VarianceScaling(scale=self._init_scale),
name=name)
out = linear(inputs)
return out
def __call__(
self,
queries: jnp.ndarray,
hm_memory: HierarchicalMemory,
hm_mask: Optional[jnp.ndarray] = None) -> jnp.ndarray:
"""Do hierarchical attention over the stored memories.
Args:
queries: Tensor [B, Q, E] Query(ies) in, for batch size B, query length
Q, and embedding dimension E.
hm_memory: Hierarchical Memory.
hm_mask: Optional boolean mask tensor of shape [B, Q, M]. Where false,
the corresponding query timepoints cannot attend to the corresponding
memory chunks. This can be used for enforcing causal attention on the
learner, not attending to memories from prior episodes, etc.
Returns:
Value updates for each query slot: [B, Q, D]
"""
# some shape checks
batch_size, query_length, _ = queries.shape
(memory_batch_size, num_memories,
memory_chunk_size, mem_embbedding_size) = hm_memory.contents.shape
assert batch_size == memory_batch_size
chex.assert_shape(hm_memory.keys,
(batch_size, num_memories, mem_embbedding_size))
chex.assert_shape(hm_memory.accumulator,
(memory_batch_size, memory_chunk_size,
mem_embbedding_size))
chex.assert_shape(hm_memory.steps_since_last_write,
(memory_batch_size,))
if hm_mask is not None:
chex.assert_type(hm_mask, bool)
chex.assert_shape(hm_mask,
(batch_size, query_length, num_memories))
query_head = self._singlehead_linear(queries, self._size, "query")
key_head = self._singlehead_linear(
jax.lax.stop_gradient(hm_memory.keys), self._size, "key")
# What times in the input [t] attend to what times in the memories [T].
logits = jnp.einsum("btd,bTd->btT", query_head, key_head)
scaled_logits = logits / np.sqrt(self._size)
# Mask last dimension, replacing invalid logits with large negative values.
# This allows e.g. enforcing causal attention on learner, or blocking
# attention across episodes
if hm_mask is not None:
masked_logits = jnp.where(hm_mask, scaled_logits, -1e6)
else:
masked_logits = scaled_logits
# identify the top-k memories and their relevance weights
top_k_logits, top_k_indices = jax.lax.top_k(masked_logits, self._k)
weights = jax.nn.softmax(top_k_logits)
# set up the within-memory attention
assert self._size % self._num_heads == 0
mha_key_size = self._size // self._num_heads
attention_layer = hk.MultiHeadAttention(
key_size=mha_key_size,
model_size=self._size,
num_heads=self._num_heads,
w_init_scale=self._init_scale,
name="within_mem_attn")
# position encodings
augmented_contents = hm_memory.contents
if self._memory_position_encoding:
position_embs = sinusoid_position_encoding(
memory_chunk_size, mem_embbedding_size)
augmented_contents += position_embs[None, None, :, :]
def _within_memory_attention(sub_inputs, sub_memory_contents, sub_weights,
sub_top_k_indices):
top_k_contents = sub_memory_contents[sub_top_k_indices, :, :]
# Now we go deeper, with another vmap over **tokens**, because each token
# can each attend to different memories.
def do_attention(sub_sub_inputs, sub_sub_top_k_contents):
tiled_inputs = jnp.tile(sub_sub_inputs[None, None, :],
reps=(self._k, 1, 1))
sub_attention_results = attention_layer(
query=tiled_inputs,
key=sub_sub_top_k_contents,
value=sub_sub_top_k_contents)
return sub_attention_results
do_attention = hk_vmap(do_attention, in_axes=0, split_rng=False)
attention_results = do_attention(sub_inputs, top_k_contents)
attention_results = jnp.squeeze(attention_results, axis=2)
# Now collapse results across k memories
attention_results = sub_weights[:, :, None] * attention_results
attention_results = jnp.sum(attention_results, axis=1)
return attention_results
# vmap across batch
batch_within_memory_attention = hk_vmap(_within_memory_attention,
in_axes=0, split_rng=False)
outputs = batch_within_memory_attention(
queries,
jax.lax.stop_gradient(augmented_contents),
weights,
top_k_indices)
return outputs
@functools.wraps(hk.vmap)
def hk_vmap(*args, **kwargs):
"""Helper function to support older versions of Haiku."""
# Older versions of Haiku did not have split_rng, but the behavior has always
# been equivalent to split_rng=False.
if "split_rng" not in inspect.signature(hk.vmap).parameters:
kwargs.setdefault("split_rng", False)
if kwargs.get["split_rng"]:
raise ValueError("The installed version of Haiku only supports "
"`split_rng=False`, please upgrade Haiku.")
del kwargs["split_rng"]
return hk.vmap(*args, **kwargs)
| |
from django import forms
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from pytz import common_timezones
from pretix.base.forms import SettingsForm, VersionedModelForm
from pretix.base.models import Event
class EventCreateForm(VersionedModelForm):
error_messages = {
'duplicate_slug': _("You already used this slug for a different event. Please choose a new one."),
}
class Meta:
model = Event
fields = [
'name',
'slug',
'currency',
'date_from',
'date_to',
'presale_start',
'presale_end'
]
def __init__(self, *args, **kwargs):
self.organizer = kwargs.pop('organizer')
super().__init__(*args, **kwargs)
def clean_slug(self):
slug = self.cleaned_data['slug']
if Event.objects.filter(slug=slug, organizer=self.organizer).exists():
raise forms.ValidationError(
self.error_messages['duplicate_slug'],
code='duplicate_slug'
)
return slug
class EventUpdateForm(VersionedModelForm):
def clean_slug(self):
return self.instance.slug
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['slug'].widget.attrs['readonly'] = 'readonly'
class Meta:
model = Event
localized_fields = '__all__'
fields = [
'name',
'slug',
'currency',
'date_from',
'date_to',
'presale_start',
'presale_end',
]
class EventSettingsForm(SettingsForm):
show_date_to = forms.BooleanField(
label=_("Show event end date"),
help_text=_("If disabled, only event's start date will be displayed to the public."),
required=False
)
show_times = forms.BooleanField(
label=_("Show dates with time"),
help_text=_("If disabled, the event's start and end date will be displayed without the time of day."),
required=False
)
payment_term_days = forms.IntegerField(
label=_('Payment term in days'),
help_text=_("The number of days after placing an order the user has to pay to preserve his reservation."),
)
show_items_outside_presale_period = forms.BooleanField(
label=_("Show items outside presale period"),
help_text=_("Show item details before presale has started and after presale has ended"),
required=False
)
presale_start_show_date = forms.BooleanField(
label=_("Show start date"),
help_text=_("Show the presale start date before presale has started"),
required=False
)
payment_term_last = forms.DateTimeField(
label=_('Last date of payments'),
help_text=_("The last date any payments are accepted. This has precedence over the number of "
"days configured above."),
required=False
)
payment_term_accept_late = forms.BooleanField(
label=_('Accept late payments'),
help_text=_("Accept payments that come after the end of the order's payment term. "
"Payments will only be accepted if the regarding quotas have remaining "
"capacity. No payments will be accepted after the 'Last date of payments' "
"configured above."),
required=False
)
last_order_modification_date = forms.DateTimeField(
label=_('Last date of modifications'),
help_text=_("The last date users can modify details of their orders, such as attendee names or "
"answers to questions."),
required=False
)
timezone = forms.ChoiceField(
choices=((a, a) for a in common_timezones),
label=_("Default timezone"),
)
locales = forms.MultipleChoiceField(
choices=settings.LANGUAGES,
label=_("Available langauges"),
)
locale = forms.ChoiceField(
choices=settings.LANGUAGES,
label=_("Default language"),
)
user_mail_required = forms.BooleanField(
label=_("Require e-mail adresses"),
help_text=_("Require all customers to provide an e-mail address."),
required=False
)
attendee_names_asked = forms.BooleanField(
label=_("Ask for attendee names"),
help_text=_("Ask for a name for all tickets which include admission to the event."),
required=False
)
attendee_names_required = forms.BooleanField(
label=_("Require attendee names"),
help_text=_("Require customers to fill in the names of all attendees."),
required=False
)
max_items_per_order = forms.IntegerField(
min_value=1,
label=_("Maximum number of items per order")
)
reservation_time = forms.IntegerField(
min_value=0,
label=_("Reservation period"),
help_text=_("The number of minutes the items in a user's card are reserved for this user."),
)
imprint_url = forms.URLField(
label=_("Imprint URL"),
required=False,
)
contact_mail = forms.EmailField(
label=_("Contact address"),
required=False,
help_text=_("Public email address for contacting the organizer")
)
mail_prefix = forms.CharField(
label=_("Subject prefix"),
help_text=_("This will be prepended to the subject of all outgoing emails. This could be a short form of "
"your event name.")
)
mail_from = forms.EmailField(
label=_("Sender address"),
help_text=_("Sender address for outgoing e-mails")
)
class ProviderForm(SettingsForm):
"""
This is a SettingsForm, but if fields are set to required=True, validation
errors are only raised if the payment method is enabled.
"""
def __init__(self, *args, **kwargs):
self.settingspref = kwargs.pop('settingspref')
super().__init__(*args, **kwargs)
def prepare_fields(self):
for k, v in self.fields.items():
v._required = v.required
v.required = False
v.widget.is_required = False
def clean(self):
cleaned_data = super().clean()
enabled = cleaned_data.get(self.settingspref + '_enabled') == 'True'
if not enabled:
return
for k, v in self.fields.items():
val = cleaned_data.get(k)
if v._required and (val is None or val == ""):
print(enabled, k, v)
self.add_error(k, _('This field is required.'))
class TicketSettingsForm(SettingsForm):
ticket_download = forms.BooleanField(
label=_("Use feature"),
help_text=_("Use pretix to generate tickets for the user to download and print out."),
required=False
)
ticket_download_date = forms.DateTimeField(
label=_("Download date"),
help_text=_("Ticket download will be offered after this date."),
required=True
)
def prepare_fields(self):
# See clean()
for k, v in self.fields.items():
v._required = v.required
v.required = False
v.widget.is_required = False
def clean(self):
# required=True files should only be required if the feature is enabled
cleaned_data = super().clean()
enabled = cleaned_data.get('ticket_download') == 'True'
if not enabled:
return
for k, v in self.fields.items():
val = cleaned_data.get(k)
if v._required and (val is None or val == ""):
print(enabled, k, v)
self.add_error(k, _('This field is required.'))
| |
"""Training Utilities for ViViT."""
import functools
from typing import Callable, Dict, List, Optional, Tuple, Union
from absl import logging
from flax import jax_utils
import flax.linen as nn
import jax
from jax.experimental.optimizers import clip_grads
import jax.numpy as jnp
import jax.profiler
import matplotlib.pyplot as plt
import ml_collections
import numpy as np
from scenic.dataset_lib import dataset_utils
from scenic.model_lib.base_models import model_utils
from scenic.train_lib import optimizers
from scenic.train_lib import train_utils
import seaborn as sns
# Aliases for custom types:
Array = Union[jnp.ndarray, np.ndarray]
Batch = Dict[str, jnp.ndarray]
MetricFn = Callable[[jnp.ndarray, Dict[str, jnp.ndarray]],
Dict[str, Tuple[float, int]]]
LossFn = Callable[[jnp.ndarray, Batch, Optional[jnp.ndarray]], float]
def to_cpu(array: jnp.ndarray):
"""Transfers array (replicated on multiple hosts) to a single host.
Args:
array: Replicated array of shape
[num_hosts, num_devices, local_batch_size, ...]
Returns:
array of shape [global_batch_size, ...] where
global_batch_size = num_devices * local_batch_size
"""
return jax.device_get(dataset_utils.unshard(jax_utils.unreplicate(array)))
def train_step(
train_state: train_utils.TrainState,
batch: Batch,
*,
flax_model: nn.Module,
learning_rate_fn: Callable[[int], float],
loss_fn: LossFn,
metrics_fn: MetricFn,
config: ml_collections.ConfigDict,
debug: Optional[bool] = False
) -> Tuple[train_utils.TrainState, Dict[str, Tuple[float, int]], float]:
"""Runs a single step of training.
Given the state of the training and a batch of data, computes
the loss and updates the parameters of the model.
Note that in this code, the buffers of the first (train_state) and second
(batch) arguments are donated to the computation.
Args:
train_state: The state of training including the current
global_step, model_state, rng, and optimizer. The buffer of this argument
can be donated to the computation.
batch: A single batch of data. The buffer of this argument can be donated to
the computation.
flax_model: A Flax model.
learning_rate_fn: learning rate scheduler which give the global_step
generates the learning rate.
loss_fn: A loss function that given logits, a batch, and parameters of the
model calculates the loss.
metrics_fn: A metrics function that given logits and batch of data,
calculates the metrics as well as the loss.
config: Configuration of the experiment.
debug: Whether the debug mode is enabled during training. `debug=True`
enables model specific logging/storing some values using
jax.host_callback.
Returns:
Updated state of training, computed metrics, and learning rate for logging.
"""
new_rng, rng = jax.random.split(train_state.rng)
if config.get('mixup') and config.mixup.alpha:
mixup_rng, rng = jax.random.split(rng, 2)
mixup_rng = train_utils.bind_rng_to_host_device(
mixup_rng,
axis_name='batch',
bind_to=config.mixup.get('bind_to', 'device'))
batch = dataset_utils.mixup(
batch,
config.mixup.alpha,
config.mixup.get('image_format', 'NTHWC'),
rng=mixup_rng)
# Bind the rng to the host/device we are on for dropout.
dropout_rng = train_utils.bind_rng_to_host_device(
rng, axis_name='batch', bind_to='device')
def training_loss_fn(params):
variables = {'params': params, **train_state.model_state}
logits, new_model_state = flax_model.apply(
variables,
batch['inputs'],
mutable=['batch_stats'],
train=True,
rngs={'dropout': dropout_rng},
debug=debug)
loss = loss_fn(logits, batch, variables['params'])
return loss, (new_model_state, logits)
compute_gradient_fn = jax.value_and_grad(training_loss_fn, has_aux=True)
step = train_state.global_step
lr = learning_rate_fn(step)
if config.get('sam_rho', None) is None:
# Normal training
(train_cost,
(new_model_state,
logits)), grad = compute_gradient_fn(train_state.optimizer.target)
else:
# SAM training, taken from cl/373487774
def dual_vector(y: jnp.ndarray) -> jnp.ndarray:
"""Returns the solution of max_x y^T x s.t. ||x||_2 <= 1."""
gradient_norm = jnp.sqrt(sum(
[jnp.sum(jnp.square(e)) for e in jax.tree_util.tree_leaves(y)]))
normalized_gradient = jax.tree_map(
lambda x: x / (gradient_norm + 1e-7), y)
return normalized_gradient
g_sam, _ = jax.grad(training_loss_fn, has_aux=True)(
train_state.optimizer.target)
g_sam = dual_vector(g_sam)
target_sam = jax.tree_multimap(lambda a, b: a + config.get('sam_rho') * b,
train_state.optimizer.target, g_sam)
(train_cost,
(new_model_state,
logits)), grad = compute_gradient_fn(target_sam)
# TODO(dehghani,aarnab): Check how to move this after the pmeam.
if config.get('max_grad_norm', None) is not None:
grad = clip_grads(grad, config.max_grad_norm)
del train_cost
# Re-use same axis_name as in the call to `pmap(...train_step...)` below.
grad = jax.lax.pmean(grad, axis_name='batch')
new_optimizer = train_state.optimizer.apply_gradient(grad, learning_rate=lr)
# Explicit weight decay, if necessary.
if config.get('explicit_weight_decay', None) is not None:
new_optimizer = new_optimizer.replace(
target=optimizers.tree_map_with_names(
functools.partial(
optimizers.decay_weight_fn,
lr=lr,
decay=config.explicit_weight_decay),
new_optimizer.target,
match_name_fn=lambda name: 'kernel' in name))
metrics = metrics_fn(logits, batch)
new_train_state = train_state.replace( # pytype: disable=attribute-error
global_step=step + 1,
optimizer=new_optimizer,
model_state=new_model_state,
rng=new_rng)
return new_train_state, metrics, lr
def eval_step(
train_state: train_utils.TrainState,
batch: Batch,
*,
flax_model: nn.Module,
metrics_fn: MetricFn,
return_logits_and_labels: bool = False,
return_confusion_matrix: bool = False,
debug: Optional[bool] = False
) -> Union[Tuple[Dict[str, Tuple[float, int]], jnp.ndarray, jnp.array],
Tuple[Dict[str, Tuple[float, int]], jnp.ndarray],
Dict[str, Tuple[float, int]]]:
"""Runs a single step of training.
Note that in this code, the buffer of the second argument (batch) is donated
to the computation.
Assumed API of metrics_fn is:
```metrics = metrics_fn(logits, batch)
where batch is yielded by the batch iterator, and metrics is a dictionary
mapping metric name to a vector of per example measurements. eval_step will
aggregate (by summing) all per example measurements and divide by the
aggregated normalizers. For each given metric we compute:
1/N sum_{b in batch_iter} metric(b), where N is the sum of normalizer
over all batches.
Args:
train_state: TrainState, the state of training including the current
global_step, model_state, rng, and optimizer. The buffer of this argument
can be donated to the computation.
batch: A single batch of data. a metrics function, that given logits and
batch of data, calculates the metrics as well as the loss.
flax_model: A Flax model.
metrics_fn: A metrics function, that given logits and batch of data,
calculates the metrics as well as the loss.
return_logits_and_labels: If true, returns logits and labels. Can be used
for calculating the Mean Average Precision for multi-label problems.
Only one of "return_logits_and_labels" and "return_confusion_matrix"
should be true, with the latter taking precedence if both are set as true.
return_confusion_matrix: If true, returns confusion matrix. Can be used
to calculate additional metrics for k-way classification problems.
debug: Whether the debug mode is enabled during evaluation.
`debug=True` enables model specific logging/storing some values using
jax.host_callback.
Returns:
Calculated metrics [and optionally logits or confusion matrix].
"""
variables = {
'params': train_state.optimizer.target,
**train_state.model_state
}
logits = flax_model.apply(
variables, batch['inputs'], train=False, mutable=False, debug=debug)
metrics = metrics_fn(logits, batch)
if return_confusion_matrix:
confusion_matrix = get_confusion_matrix(
labels=batch['label'], logits=logits, batch_mask=batch['batch_mask'])
confusion_matrix = jax.lax.all_gather(confusion_matrix, 'batch')
return metrics, confusion_matrix
if return_logits_and_labels:
logits = jax.lax.all_gather(logits, 'batch')
labels = jax.lax.all_gather(batch['label'], 'batch')
return metrics, logits, labels
return metrics
def test_step(
train_state: train_utils.TrainState,
batch: Batch,
*,
flax_model: nn.Module,
metrics_fn: MetricFn,
n_clips: int = 2,
return_logits_and_labels: bool = False,
softmax_logits: bool = False,
debug: bool = False
) -> Union[Dict[str, Tuple[float, int]], Tuple[Dict[str, Tuple[float, int]],
jnp.array, jnp.array]]:
"""Runs a single step of testing.
For multi-crop testing, we assume that num_crops consecutive entries in the
batch are from the same example. And we average the logits over these examples
We assume that the batch contains different crops of the same original
example. Therefore, we can average all the logits of it.
This assumption is true when local_batch_size = num_local_devices
Args:
train_state: The state of training including the current
global_step, model_state, rng, and optimizer, and other metadata.
batch: Dictionary with keys 'inputs', 'labels', 'batch_mask'. We assume that
all the inputs correspond to the same original example in the test set.
The input shapes to this function are batch['inputs'] = [num_crops, t, h,
w, c] batch['labels'] = [num_crops, num_classes] However, for
classification, the labels for all the crops are the same.
batch['batch_mask'] = [num_crops]
flax_model: A Flax model.
metrics_fn: Metrics function for the model.
n_clips: The number of clips to process at a time by each device. Set
due to memory constraints.
return_logits_and_labels: Whether return logits of the model or not.
softmax_logits: Whether to softmax-normalise the logits before
averaging
debug: Whether the debug mode is enabled during evaluation.
`debug=True` enables model specific logging/storing some values using
jax.host_callback.
Returns:
Calculated metrics [and optionally averaged logits that are of
shape `[1, num_classes]`].
"""
all_logits = jnp.zeros(batch['label'].shape[1])
assert len(batch['batch_mask'].shape) == 1, (
'Spatial padding is not supported in multi-crop evaluation.')
num_crops = batch['inputs'].shape[0]
variables = {
'params': train_state.optimizer.target,
**train_state.model_state
}
for idx in range(0, num_crops, n_clips):
temp_input = batch['inputs'][idx:idx + n_clips]
logits = flax_model.apply(
variables, temp_input, train=False, mutable=False, debug=debug)
if softmax_logits:
logits = nn.softmax(logits, axis=-1)
logits = jnp.sum(logits, axis=0)
all_logits = all_logits + logits
all_logits = all_logits / num_crops
all_logits = jnp.expand_dims(all_logits, axis=0)
batch['label'] = jnp.expand_dims(batch['label'][0], axis=0)
batch['batch_mask'] = jnp.expand_dims(batch['batch_mask'][0], axis=0)
metrics = metrics_fn(all_logits, batch)
if return_logits_and_labels:
return metrics, all_logits, batch['label']
return metrics
def get_confusion_matrix(labels: Array, logits: Array,
batch_mask: Array) -> Array:
"""Computes confusion matrix from predictions.
Args:
labels: [n_batch] or [n_batch, n_classes] array. In the latter case, labels
are assumed to be one-hot, since the confusion matrix is only defined when
each example has one label.
logits: [n_batch, n_classes] array, which are the predictions of the model.
batch_mask: [n_batch] array. Entries should be 1 or 0, and indicate if the
example is valid or not.
Returns:
confusion_matrix of shape [1, n_classes, n_classes]
"""
if labels.ndim == logits.ndim: # one-hot targets
y_true = jnp.argmax(labels, axis=-1)
else:
y_true = labels
y_pred = jnp.argmax(logits, axis=-1)
# Prepare sample weights for confusion matrix:
weights = batch_mask.astype(jnp.float32)
confusion_matrix = model_utils.confusion_matrix(
y_true=y_true,
y_pred=y_pred,
num_classes=logits.shape[-1],
weights=weights)
confusion_matrix = confusion_matrix[jnp.newaxis, ...] # Dummy batch dim.
return confusion_matrix
def render_confusion_matrices(confusion_matrices: List[Array],
normalization_method: str = 'cols',
figsize: Tuple[int, int] = (12, 12),
dpi: int = 100,
font_scale: int = 3) -> Array:
"""Render confusion matrix so that it can be logged to Tensorboard.
Args:
confusion_matrices: List of [n_batch, n_class, n_class] confusion matrices.
The first two dimensions will be summed over to get an [n_class, n_class]
matrix for rendering.
normalization_method: Method of normalizing the confusion matrix before
plotting. Supported values are one of "cols", "rows" and "none".
If any other value, no normalization is performed.
figsize: The figure size used by matplotlib and seaborn.
dpi: The dpi used by matplotlib and seaborn.
font_scale: The font scale used by seaborn.
Returns:
image: Rendered image of the confusion matrix for plotting. Data type is
uint8 and values are in range [0, 255]. Shape is
[1, figsize * dpi, figsize * dpi, 3]
"""
conf_matrix = np.sum(confusion_matrices, axis=0) # Sum over eval batches.
if conf_matrix.ndim != 3:
raise AssertionError(
'Expecting confusion matrix to have shape '
f'[batch_size, num_classes, num_classes], got {conf_matrix.shape}.')
conf_matrix = np.sum(conf_matrix, axis=0) # Sum over batch dimension.
if normalization_method not in {'rows', 'cols', 'none'}:
logging.warning('Normalizer must be one of {rows, cols, none}.'
'Defaulting to none.')
sns.set(font_scale=font_scale)
fig = plt.figure(figsize=figsize, dpi=dpi)
# Normalize entries of the confusion matrix.
if normalization_method == 'rows':
normalizer = conf_matrix.sum(axis=1)[:, np.newaxis]
elif normalization_method == 'cols':
normalizer = conf_matrix.sum(axis=0)[np.newaxis, :]
else:
normalizer = 1
normalized_matrix = np.nan_to_num(conf_matrix / normalizer)
if np.sum(normalized_matrix) > 0:
sns.heatmap(
normalized_matrix,
annot=True,
linewidths=0.5,
square=True,
cbar=False,
cmap='jet',
annot_kws={'size': 18})
fig.tight_layout(pad=0.0)
fig.canvas.draw()
ncols, nrows = fig.canvas.get_width_height()
image = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8)
image = image.reshape(nrows, ncols, 3)
return np.expand_dims(image, axis=0)
| |
#!/usr/bin/env python
# encoding: utf-8
"""
Analyze multiple periods and starspot evolution
"""
from __future__ import print_function, division, absolute_import
import datetime
import os
import sys
from scipy.ndimage import gaussian_filter
from scipy import signal
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import h5py
from . import eclipsing_binary, config, utils
def find_subpeak_range(peak_freqs, peak_heights, height_ratio):
"""
Find the highest subpeak and range of significant subpeak frequencies.
Parameters
----------
peak_freqs : numpy.array_like
Frequencies of subpeaks
peak_heights : numpy.array_like
Heights of subpeaks
height_ratio : float
Significant peaks have `peak_heights` > `height_ratio` * highest peak
Returns
-------
freq_range, height_range : None, numpy.ndarray, or 2-tuple of numpy.ndarray
The frequencies and heights of the subpeaks.
None if no subpeaks are found,
2-tuple of length 1 array if one subpeak is found,
3-tuple of length 1 arrays if two subpeaks are found.
"""
if len(peak_freqs) == 0:
return None, None
# Sort in descending order of peak height
height_sort = np.argsort(peak_heights)[::-1]
peak_heights_sorted = peak_heights[height_sort]
peak_freqs_sorted = peak_freqs[height_sort]
sig_indices = peak_heights_sorted > height_ratio * peak_heights_sorted[0]
if np.sum(sig_indices) <= 1:
height_range = (peak_heights_sorted[sig_indices],
peak_heights_sorted[sig_indices])
freq_range = (peak_freqs_sorted[sig_indices],
peak_freqs_sorted[sig_indices])
else:
sig_heights = peak_heights_sorted[sig_indices]
sig_freqs = peak_freqs_sorted[sig_indices]
subpeak_index_1 = np.argmin(sig_freqs)
subpeak_index_2 = np.argmax(sig_freqs)
height_range = (sig_heights[0],
sig_heights[subpeak_index_1],
sig_heights[subpeak_index_2])
freq_range = (sig_freqs[0],
sig_freqs[subpeak_index_1],
sig_freqs[subpeak_index_2])
return freq_range, height_range
def detect_multiple_periods(results_file, pgram_file, kernel=30,
height_ratio=0.3, fix=False,
restrict_file='restrict_freqs.csv',
plot_all=False, plot_example=False):
"""
Look for multiple periods and constrain differential rotation.
Parameters
----------
results_file : str
Name of the CSV file containing results.
pgram_file : str
Name of the HDF5 file containing the periodograms
kernel : float, optional
Standard deviation of Gaussian smoothing kernel
height_ratio : float, optional
Significant peaks have `peak_heights` > `height_ratio` * highest peak
fix : bool, optional
Set to True to only run on EBs that need fixing.
restrict_file : str, optional
CSV file containing KIC numbers and period range restrictions.
plot_all : bool, optional
Set to True to save plots of periodogram and peaks as PNGs.
plot_example : bool, optional
Set to True to plot results for a single example.
"""
# Load inspection data
df = pd.read_csv('{}/{}'.format(config.repo_data_dir, results_file))
# Load periodograms
h5 = h5py.File('{}/{}'.format(config.data_dir, pgram_file), 'r')
# Only analyze likely starspot EBs
sp_mask = df['class'].values == 'sp'
if fix:
sp_mask &= df['p_acf_ok'] == 'f'
kics = df['KIC'][sp_mask].values
p_rot = df['p_acf'][sp_mask].values
# -1 filled arrays to hold results
# First column for highest peak, next two columns for subpeaks
peak_freqs_1 = np.zeros((len(kics), 3), dtype=np.float64) - 1.
peak_heights_1 = np.zeros_like(peak_freqs_1, dtype=np.float64) - 1.
peak_freqs_2 = np.zeros_like(peak_freqs_1, dtype=np.float64) - 1.
peak_heights_2 = np.zeros_like(peak_freqs_1, dtype=np.float64) - 1.
total_systems = len(kics)
print('Finding peaks in {} periodograms...'.format(total_systems))
if plot_all:
if not os.path.exists('{}/png'.format(config.data_dir)):
os.mkdir('{}/png'.format(config.data_dir))
if plot_example:
save_results = False
# HACK: Set first rotation period to rotation period of example
p_rot[0] = p_rot[np.where(kics == 4751083)[0][0]]
kics = kics[kics == 4751083]
else:
save_results = True
df_rf = pd.read_csv('{}/{}'.format(config.repo_data_dir, restrict_file))
for ii in range(len(kics)):
# Compute periodogram on frequency grid
freqs = h5['{}/freqs'.format(kics[ii])][:]
powers = h5['{}/powers'.format(kics[ii])][:]
# Only look for peaks around p_rot from ACF
if p_rot[ii] > 45:
# Expand search area
f_min = 1 / 200
else:
f_min = 1 / 45
if kics[ii] in df_rf['KIC'].values:
mask = df_rf['KIC'].values == kics[ii]
f_min = 1 / df_rf['pmax'].values[mask]
f_max = 1 / df_rf['pmin'].values[mask]
else:
f_max = 1.5 / p_rot[ii]
search_area = (freqs > f_min) & (freqs < f_max)
search_freqs = freqs[search_area]
search_powers = powers[search_area]
# Smooth periodogram
smoothed_power = gaussian_filter(search_powers, sigma=kernel)
# Detect peaks and minima in smoothed periodogram
relative_maxes = signal.argrelmax(smoothed_power)[0]
relative_mins = signal.argrelmin(smoothed_power)[0]
# Sort peaks by height
height_sort = np.argsort(smoothed_power[relative_maxes])[::-1]
powers_sorted = smoothed_power[relative_maxes][height_sort]
if len(powers_sorted) == 0:
# No peaks found
peak_freqs_1[ii] = (-2., -2., -2.)
peak_heights_1[ii] = (-2., -2., -2.)
peak_freqs_2[ii] = (-2., -2., -2.)
peak_heights_2[ii] = (-2., -2., 2.)
continue
# Only consider peaks higher than height_ratio * highest peak
height_cutoff = powers_sorted > height_ratio * powers_sorted[0]
n_significant_peaks = np.sum(height_cutoff)
if n_significant_peaks > 1:
# Take the two highest peaks
peak_index_1 = relative_maxes[height_sort][height_cutoff][0]
peak_index_2 = relative_maxes[height_sort][height_cutoff][1]
peak_indicies = (peak_index_1, peak_index_2)
store_arrs = [[peak_freqs_1, peak_heights_1],
[peak_freqs_2, peak_heights_2]]
else:
# There is only one significant subpeak
peak_indicies = relative_maxes[height_sort][height_cutoff]
store_arrs = [[peak_freqs_1, peak_heights_1]]
# Find peaks in unsmoothed, oversampled periodogram
orig_peaks = signal.argrelmax(search_powers)[0]
for index, store in zip(peak_indicies, store_arrs):
# Get indices of adjacent minima
lt_max = relative_mins < index
if np.sum(lt_max) == 0:
# There is no minimum to the left
index_left = index - 1
else:
index_left = relative_mins[np.nonzero(lt_max)[0][-1]]
if np.sum(~lt_max) == 0:
# There is no minimum to the right
index_right = index + 1
else:
index_right = relative_mins[np.nonzero(~lt_max)[0][0]]
# Peak indices within adjacent minima
in_range = (orig_peaks > index_left) & (orig_peaks < index_right)
if np.sum(in_range) == 0:
# No adjacent minima, search all frequencies
xx = search_freqs[orig_peaks]
yy = search_powers[orig_peaks]
else:
xx = search_freqs[orig_peaks][in_range]
yy = search_powers[orig_peaks][in_range]
freq_range, height_range = find_subpeak_range(xx, yy, height_ratio)
if freq_range is None:
# No subpeaks found
continue
# Store results to arrays
peak_freqs, peak_heights = store
if len(freq_range) == 2:
peak_freqs[ii, :2] = freq_range
peak_heights[ii, :2] = height_range
elif len(freq_range) == 3:
peak_freqs[ii] = freq_range
peak_heights[ii] = height_range
sys.stdout.write('\r{:.1f}% complete'.format((ii + 1) * 100 /
total_systems))
sys.stdout.flush()
if plot_example:
fig, ax1 = plt.subplots()
ax1.plot(search_freqs, search_powers, lw=0.75, color='k')
ax1.plot(search_freqs, smoothed_power, lw=1.5, ls=':',
color='purple')
for jj in [1, 2]:
xx = peak_freqs_1[ii, jj]
yy = peak_heights_1[ii, jj]
ax1.scatter(xx, yy, color='k', s=30, zorder=4, marker='x')
xx = peak_freqs_2[ii, jj]
yy = peak_heights_2[ii, jj]
ax1.scatter(xx, yy, color='k', s=30, zorder=4, marker='x')
ax1.set_xlabel('Frequency (day$^{-1}$)')
ax1.set_ylabel('Power')
ax1.set_title('KIC {}'.format(kics[ii]), fontsize=20, y=1.24)
ax1.minorticks_on()
# Set x limits
x_vals = np.concatenate((peak_freqs_1[ii], peak_freqs_2[ii]))
x_vals = x_vals[x_vals > 0]
if len(x_vals) > 1:
xmin = x_vals.min()
xmax = x_vals.max()
xdiff = xmax - xmin
ax1.set_xlim(xmin - 0.5 * xdiff, xmax + 0.5 * xdiff)
else:
ax1.set_xlim(search_freqs.min(), search_freqs.max())
ax2 = ax1.twiny()
ax2_ticks = ax1.get_xticks()
def tick_function(x):
per = 1 / x
return ['{:.1f}'.format(p) for p in per]
ax2.set_xticks(ax2_ticks)
ax2.set_xbound(ax1.get_xbound())
ax2.set_xticklabels(tick_function(ax2_ticks))
ax2.tick_params(axis='x', which='major', pad=5)
ax2.set_xlabel('Period (days)')
plt.savefig('{}/multipeak_example.pdf'.format(config.data_dir))
plt.close()
if plot_all:
plt.plot(search_freqs, search_powers, lw=0.5)
plt.plot(search_freqs, smoothed_power, lw=0.5, ls=':')
plt.axvline(1 / p_rot[ii], lw=0.5, linestyle='--', color='k')
colors = ['r', 'b']
for jj in [0, 1, 2]:
xx = peak_freqs_1[ii, jj]
yy = peak_heights_1[ii, jj]
plt.scatter(xx, yy, color=colors[0], s=5, zorder=4)
xx = peak_freqs_2[ii, jj]
yy = peak_heights_2[ii, jj]
plt.scatter(xx, yy, color=colors[1], s=5, zorder=4)
plt.xlabel('Frequency (day$^{-1}$)')
plt.ylabel('Power')
plt.title('KIC {}'.format(kics[ii]))
plt.xlim(search_freqs.min(), search_freqs.max())
plt.ylim(0, 1.1 * search_powers.max())
plt.savefig('{}/png/pgram_KIC{:09d}.png'.format(config.data_dir,
kics[ii]))
plt.close()
print()
if save_results:
# Save results to CSV
df.loc[sp_mask, 'freq_1_1'] = peak_freqs_1[:, 1]
df.loc[sp_mask, 'freq_1_2'] = peak_freqs_1[:, 2]
df.loc[sp_mask, 'freq_2_1'] = peak_freqs_2[:, 1]
df.loc[sp_mask, 'freq_2_2'] = peak_freqs_2[:, 2]
df.loc[sp_mask, 'height_1_1'] = peak_heights_1[:, 1]
df.loc[sp_mask, 'height_1_2'] = peak_heights_1[:, 2]
df.loc[sp_mask, 'height_2_1'] = peak_heights_2[:, 1]
df.loc[sp_mask, 'height_2_2'] = peak_heights_2[:, 2]
df.to_csv('{}/{}'.format(config.repo_data_dir, results_file),
index=False)
def generate_frequency_periodograms(sp_class='sp', window=1.5,
output_file=None, kic_list=None,
sap_list=None, detrend_list=None):
"""
Generate periodograms on a uniform frequency grid.
Parameters
----------
sp_class : {'sp', 'spx'}
Likely or possible starspots
window : float, optional
Window around the eclipse to interpolate over
output_file : str, optional
Specify an alternate output filename. Default uses today's date.
kic_list : list, optional
Only run on these KIC IDs
detrend_list : list, optional
These KIC IDs will be detrended with a low order polynomial.
sap_list : list, optional
These KIC IDs will use the SAP flux instead of PDC.
"""
df = utils.collect_results()
if output_file is None:
today = '{:%Y%m%d}'.format(datetime.date.today())
output_file = '{}/grid_pgrams.{}.h5'.format(config.data_dir, today)
else:
output_file = '{}/{}'.format(config.data_dir, output_file)
h5 = h5py.File(output_file, 'w')
h5.attrs['window'] = window
kics = df['KIC'].values[df['class'].values == sp_class]
if kic_list is not None:
kics = kics[np.in1d(kics, kic_list)]
total_systems = len(kics)
print('Computing grid periodograms for {} systems...'.format(total_systems))
for ii, kic in enumerate(kics):
if sap_list is not None and kic in sap_list:
eb = eclipsing_binary.EclipsingBinary.from_kic(kic, use_pdc=False)
else:
eb = eclipsing_binary.EclipsingBinary.from_kic(kic)
if detrend_list is not None and kic in detrend_list:
eb.normalize(detrend=True)
else:
eb.normalize()
eb.interpolate_over_eclipse(window=window)
freqs, powers = eb.frequency_periodogram()
group = h5.create_group(str(kic))
group.create_dataset('freqs', data=freqs)
group.create_dataset('powers', data=powers)
del eb
sys.stdout.write('\r{:.1f}% complete'.format((ii + 1) * 100 / total_systems))
sys.stdout.flush()
print()
| |
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class TaskStatisticsList(ListResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, assistant_sid, task_sid):
"""
Initialize the TaskStatisticsList
:param Version version: Version that contains the resource
:param assistant_sid: The SID of the Assistant that is the parent of the Task associated with the resource
:param task_sid: The SID of the Task for which the statistics were collected
:returns: twilio.rest.autopilot.v1.assistant.task.task_statistics.TaskStatisticsList
:rtype: twilio.rest.autopilot.v1.assistant.task.task_statistics.TaskStatisticsList
"""
super(TaskStatisticsList, self).__init__(version)
# Path Solution
self._solution = {'assistant_sid': assistant_sid, 'task_sid': task_sid, }
def get(self):
"""
Constructs a TaskStatisticsContext
:returns: twilio.rest.autopilot.v1.assistant.task.task_statistics.TaskStatisticsContext
:rtype: twilio.rest.autopilot.v1.assistant.task.task_statistics.TaskStatisticsContext
"""
return TaskStatisticsContext(
self._version,
assistant_sid=self._solution['assistant_sid'],
task_sid=self._solution['task_sid'],
)
def __call__(self):
"""
Constructs a TaskStatisticsContext
:returns: twilio.rest.autopilot.v1.assistant.task.task_statistics.TaskStatisticsContext
:rtype: twilio.rest.autopilot.v1.assistant.task.task_statistics.TaskStatisticsContext
"""
return TaskStatisticsContext(
self._version,
assistant_sid=self._solution['assistant_sid'],
task_sid=self._solution['task_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Autopilot.V1.TaskStatisticsList>'
class TaskStatisticsPage(Page):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, response, solution):
"""
Initialize the TaskStatisticsPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param assistant_sid: The SID of the Assistant that is the parent of the Task associated with the resource
:param task_sid: The SID of the Task for which the statistics were collected
:returns: twilio.rest.autopilot.v1.assistant.task.task_statistics.TaskStatisticsPage
:rtype: twilio.rest.autopilot.v1.assistant.task.task_statistics.TaskStatisticsPage
"""
super(TaskStatisticsPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of TaskStatisticsInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.autopilot.v1.assistant.task.task_statistics.TaskStatisticsInstance
:rtype: twilio.rest.autopilot.v1.assistant.task.task_statistics.TaskStatisticsInstance
"""
return TaskStatisticsInstance(
self._version,
payload,
assistant_sid=self._solution['assistant_sid'],
task_sid=self._solution['task_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Autopilot.V1.TaskStatisticsPage>'
class TaskStatisticsContext(InstanceContext):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, assistant_sid, task_sid):
"""
Initialize the TaskStatisticsContext
:param Version version: Version that contains the resource
:param assistant_sid: The SID of the Assistant that is the parent of the resource to fetch
:param task_sid: The SID of the Task that is associated with the resource to fetch
:returns: twilio.rest.autopilot.v1.assistant.task.task_statistics.TaskStatisticsContext
:rtype: twilio.rest.autopilot.v1.assistant.task.task_statistics.TaskStatisticsContext
"""
super(TaskStatisticsContext, self).__init__(version)
# Path Solution
self._solution = {'assistant_sid': assistant_sid, 'task_sid': task_sid, }
self._uri = '/Assistants/{assistant_sid}/Tasks/{task_sid}/Statistics'.format(**self._solution)
def fetch(self):
"""
Fetch a TaskStatisticsInstance
:returns: Fetched TaskStatisticsInstance
:rtype: twilio.rest.autopilot.v1.assistant.task.task_statistics.TaskStatisticsInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return TaskStatisticsInstance(
self._version,
payload,
assistant_sid=self._solution['assistant_sid'],
task_sid=self._solution['task_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Autopilot.V1.TaskStatisticsContext {}>'.format(context)
class TaskStatisticsInstance(InstanceResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, payload, assistant_sid, task_sid):
"""
Initialize the TaskStatisticsInstance
:returns: twilio.rest.autopilot.v1.assistant.task.task_statistics.TaskStatisticsInstance
:rtype: twilio.rest.autopilot.v1.assistant.task.task_statistics.TaskStatisticsInstance
"""
super(TaskStatisticsInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload.get('account_sid'),
'assistant_sid': payload.get('assistant_sid'),
'task_sid': payload.get('task_sid'),
'samples_count': deserialize.integer(payload.get('samples_count')),
'fields_count': deserialize.integer(payload.get('fields_count')),
'url': payload.get('url'),
}
# Context
self._context = None
self._solution = {'assistant_sid': assistant_sid, 'task_sid': task_sid, }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: TaskStatisticsContext for this TaskStatisticsInstance
:rtype: twilio.rest.autopilot.v1.assistant.task.task_statistics.TaskStatisticsContext
"""
if self._context is None:
self._context = TaskStatisticsContext(
self._version,
assistant_sid=self._solution['assistant_sid'],
task_sid=self._solution['task_sid'],
)
return self._context
@property
def account_sid(self):
"""
:returns: The SID of the Account that created the resource
:rtype: unicode
"""
return self._properties['account_sid']
@property
def assistant_sid(self):
"""
:returns: The SID of the Assistant that is the parent of the Task associated with the resource
:rtype: unicode
"""
return self._properties['assistant_sid']
@property
def task_sid(self):
"""
:returns: The SID of the Task for which the statistics were collected
:rtype: unicode
"""
return self._properties['task_sid']
@property
def samples_count(self):
"""
:returns: The total number of Samples associated with the Task
:rtype: unicode
"""
return self._properties['samples_count']
@property
def fields_count(self):
"""
:returns: The total number of Fields associated with the Task
:rtype: unicode
"""
return self._properties['fields_count']
@property
def url(self):
"""
:returns: The absolute URL of the TaskStatistics resource
:rtype: unicode
"""
return self._properties['url']
def fetch(self):
"""
Fetch a TaskStatisticsInstance
:returns: Fetched TaskStatisticsInstance
:rtype: twilio.rest.autopilot.v1.assistant.task.task_statistics.TaskStatisticsInstance
"""
return self._proxy.fetch()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Autopilot.V1.TaskStatisticsInstance {}>'.format(context)
| |
import itertools
import sys
from pyglet.gl import *
from pywavefront import Wavefront
from pywavefront.material import Material
from pywavefront.mesh import Mesh
from bomb_defusal.view.utils.vector import Vector
colors = {
'Blue': [0, 0, 1, 1],
'Yellow': [1, 1, 0, 1],
'Green': [0, .5, 0, 1],
'Red': [1, 0, 0, 1],
'White': [1, 1, 1, 1],
'Black': [.1, .1, .1, 1],
}
def vector(*args):
""" Define a simple function to create ctypes arrays of floats """
# noinspection PyCallingNonCallable,PyTypeChecker
return (GLfloat * len(args))(*args)
def matrix4x4(*args):
# noinspection PyCallingNonCallable,PyTypeChecker
return (GLdouble * 16)(*args)
def int_vector(*args):
# noinspection PyCallingNonCallable,PyTypeChecker
return (GLint * 4)(*args)
def load_model(file_path):
"""
Loads a wavefront obj file and fixes the ambient color (blender export issue).
The method replaces the loaded ambient color of the model with the diffuse color. This is required because
the ambient value is set to a invalid value by the blender export script.
:param path file_path: Path to the wavefront obj file to load
:returns: 3D model representing the loaded wavefront obj file
:rtype: pywavefront.Wavefront
"""
model = Wavefront(file_path)
for material in model.materials.values():
material.ambient = material.diffuse
return model
def grouper(iterable, n, fillvalue=None):
""" Collect data into fixed-length chunks of blocks """
args = [iter(iterable)] * n
return itertools.zip_longest(*args, fillvalue=fillvalue)
def get_hit_distance(vertices, mouse, model_matrix):
"""
Gets the distance of the closest hit on a mesh or None if the mesh is not hit
:param Union[Wavefront, Mesh, list] vertices: Mesh to get the distance of the closest hit
:param mouse: Mouse position on the screen
:param model_matrix: Model matrix for the mesh
:return: Distance to the closest hit or None
"""
if isinstance(vertices, Wavefront):
closest = None
for m in vertices.meshes.values():
distance = get_hit_distance(m, mouse, model_matrix)
if closest is None or distance < closest:
closest = distance
return closest
if isinstance(vertices, Mesh):
closest = None
for material in vertices.materials:
distance = get_hit_distance(material, mouse, model_matrix)
if closest is None or distance < closest:
closest = distance
return closest
if isinstance(vertices, Material):
closest = None
distance = get_hit_distance(vertices.vertices, mouse, model_matrix)
if closest is None or distance < closest:
closest = distance
return closest
projection_matrix = matrix4x4()
glGetDoublev(GL_PROJECTION_MATRIX, projection_matrix)
viewport = int_vector()
glGetIntegerv(GL_VIEWPORT, viewport)
near = (GLdouble(), GLdouble(), GLdouble())
gluUnProject(*mouse, 0, model_matrix, projection_matrix, viewport, *near)
near = Vector(*[v.value for v in near])
far = (GLdouble(), GLdouble(), GLdouble())
gluUnProject(*mouse, 1, model_matrix, projection_matrix, viewport, *far)
far = Vector(*[v.value for v in far])
hit_distance = None
ray_length = far - near
# get triangles from vertices 3x (2x texture, 3x normal, 3x vertex) = 24
for triangle in grouper(vertices, 24):
n = Vector(*triangle[2:5])
a = Vector(*triangle[5:8])
b = Vector(*triangle[13:16])
c = Vector(*triangle[21:24])
dot_length = n * ray_length
# if dot_length <= 0:
# ray is parallel to plane
# continue
distance = n * (a - near) / dot_length
if distance < 0 or distance > 1:
# plane is beyond the ray we consider
continue
p = near + distance * ray_length
n1 = (b - a).cross(p - a)
n2 = (c - b).cross(p - b)
n3 = (a - c).cross(p - c)
if n * n1 >= 0. and n * n2 >= 0. and n * n3 >= 0.:
# We have found on eof the triangle that intersects the line/ray
if hit_distance is None or hit_distance > distance:
hit_distance = distance
return hit_distance
def get_bounds(vertices):
"""
Calculates the bounding box for the given 3D object
:param Union[Wavefront, Mesh, list] vertices: 3D object to calculate the bounding box for
:return: Minimum and maximum vertex of the 3D object
:rtype: Tuple[list, list]
"""
def min_max(minimum, maximum, *vertices):
for vertex in vertices:
minimum = (min(minimum[0], vertex[0]), min(minimum[1], vertex[1]), min(minimum[2], vertex[2]))
maximum = (max(maximum[0], vertex[0]), max(maximum[1], vertex[1]), max(maximum[2], vertex[2]))
return minimum, maximum
minsize = -sys.maxsize - 1
minimum = (sys.maxsize, sys.maxsize, sys.maxsize)
maximum = (minsize, minsize, minsize)
if isinstance(vertices, Wavefront):
for m in vertices.meshes.values():
minimum, maximum = min_max(minimum, maximum, *get_bounds(m))
return minimum, maximum
if isinstance(vertices, Mesh):
for material in vertices.materials:
minimum, maximum = min_max(minimum, maximum, *get_bounds(material))
return minimum, maximum
if isinstance(vertices, Material):
minimum, maximum = min_max(minimum, maximum, *get_bounds(vertices.vertices))
return minimum, maximum
for vertex in grouper(vertices, 8):
minimum, maximum = min_max(minimum, maximum, vertex[5:8])
return minimum, maximum
def create_box(minimum, maximum):
"""
Creates the vertices representing the box of the minimum and maximum vertices
:param minimum: Vertex of the absolute minimum of all 3 axes
:param maximum: Vertex of the absolute maximum of all 3 axes
:return: Vertices in UV, NORM, VERTEX format of the box defined with the given bounds
"""
vertices = [
(minimum[0], minimum[1], minimum[2]),
(maximum[0], minimum[1], minimum[2]),
(maximum[0], maximum[1], minimum[2]),
(minimum[0], maximum[1], minimum[2]),
(minimum[0], minimum[1], maximum[2]),
(maximum[0], minimum[1], maximum[2]),
(maximum[0], maximum[1], maximum[2]),
(minimum[0], maximum[1], maximum[2]),
]
return create_box_from_vertices(vertices)
def create_box_from_vertices(vertices):
faces = [(1, 0, 3), (1, 3, 2),
(4, 5, 6), (4, 6, 7),
(4, 7, 3), (4, 3, 0),
(6, 5, 1), (6, 1, 2),
(5, 4, 0), (5, 0, 1),
(7, 6, 2), (7, 2, 3)]
box = []
for face in faces:
u = Vector(*vertices[face[1]]) - Vector(*vertices[face[0]])
v = Vector(*vertices[face[2]]) - Vector(*vertices[face[1]])
normal = Vector(
u[1] * v[2] - u[2] * v[1],
u[2] * v[0] - u[0] * v[2],
u[0] * v[1] - u[1] * v[0]).normalize().values
for index in face:
box.extend([0, 0])
box.extend(normal)
box.extend(vertices[index])
return box
def draw_box(vertices, color=(1, 1, 1, .9), emission=None, face_type=GL_TRIANGLES):
glMaterialfv(GL_FRONT, GL_DIFFUSE, vector(*color))
glMaterialfv(GL_FRONT, GL_AMBIENT, vector(*color))
glMaterialfv(GL_FRONT, GL_SPECULAR, vector(*color))
if emission:
glMaterialfv(GL_FRONT, GL_EMISSION, vector(*emission))
gl_floats = vector(*vertices)
faces_count = len(vertices) // 8
glInterleavedArrays(GL_T2F_N3F_V3F, 0, gl_floats)
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
glDrawArrays(face_type, 0, faces_count)
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.