gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
from Tkinter import *
import tkFont
from tkSimpleDialog import askstring
from tkFileDialog import asksaveasfilename
from corpus import Corpus
from channel import Channel
from masterchannel import MasterChannel
from load_window import LoadWindow
from text_window import ScrolledText
class Editor(Frame):
def __init__(self, parent=None):
Frame.__init__(self, parent)
self.pack(expand=YES, fill=BOTH)
self.font = tkFont.Font(family="Helvetica", size=12)
Button(self, text='Load', command=self.onLoad).pack()
self.text_frame = ScrolledText(self)
self.textbox = self.text_frame.text
c = Corpus('treadmill',file('texts/laguardia.txt').read())
self.channels = self.channels_from_corpora([c])
self.master_channel = MasterChannel(self, self.textbox, self.channels)
self.channels.append(self.master_channel)
self.select_channel(0)
self.textbox.bind('<BackSpace>', self.onDelWord)
self.textbox.bind('<Return>', self.onReturn)
self.textbox.bind('<Left>', self.onArrowLeft)
self.textbox.bind('<Right>', self.onArrowRight)
self.textbox.bind('<Tab>', self.onTab)
self.textbox.bind('<Shift-Tab>', self.onShiftTab)
self.textbox.bind('=', self.onPlus)
self.textbox.bind('-', self.onMinus)
self.textbox.bind('M', self.onMute)
self.textbox.bind('D', self.onDebug)
# selects channel n
def select_channel(self, n):
self.active_number = n
self.refresh_keyboards()
# removes channel n
def removeChannel(self, n):
del self.channels[n]
if self.active_number == n:
self.select_channel(0)
# given a list of corpora, adds new channels from those corpora
def channels_from_corpora(self, corpora, channels = []):
for corpus in corpora:
channels.append(Channel(self, self.textbox, corpus, len(channels)))
return channels
# returns true if one of the channels in a list of channels has the given name
def name_in_channels(self, name, channel_list):
found = False
for channel in channel_list:
if channel.channel_name == name:
found = True
return found
def printChannels(self, event):
for channel_num in range(len(self.channels)):
print channel_num, self.channels[channel_num].channel_name
print "active:", self.active_number
def onScrape(self):
self.sw = ScrapeWindow(Toplevel(self))
def onLoad(self):
self.load_window = LoadWindow(Toplevel(self), self)
def refresh_keyboards(self):
for cnum in range(len(self.channels)):
channel = self.channels[cnum]
if not cnum == self.active_number:
channel.settings['color'] = 'black'
channel.refresh_keyboard()
active_channel = self.channels[self.active_number]
active_channel.settings['color'] = 'blue'
active_channel.refresh_keyboard()
def get_master(self):
keyboard = Frame(self, padx = 10)
header = Frame(keyboard)
Label(header, text = "master").pack()
header.pack()
mainkeys = Frame(keyboard)
master_list = []
for chnl in self.channels:
channel_list = chnl.options
for word in channel_list:
break
for i in range(len(wordlist)):
optkey = Frame(mainkeys)
num = (i + 1) % 10
keylabel = '%s.' % Channel.optionmap()[i][0]
keystroke = Channel.optionmap()[i][1]
Label(optkey, text = keylabel, width = 4, anchor = W, font = self.font).pack(side = LEFT)
option = wordlist[i]
label = option
b = Button(optkey, text=label, font = self.font, width = 14, anchor = W, borderwidth = 0,
command= lambda word=option: self.onAddWord(word), pady = 0)
b.pack(side = LEFT)
self.textframe.bind(keystroke, lambda event, arg=option: self.onAddWord(arg))
optkey.pack(side = TOP)
mainkeys.pack()
def onMute(self, event):
self.channels[self.active_number].wt_scale.set(0)
self.refresh_keyboards
return 'break'
# volume up
def onPlus(self, event):
c = self.channels[self.active_number]
c.wt_scale.set(c.wt_scale.get() + 10)
self.refresh_keyboards()
return 'break'
# volume down
def onMinus(self, event):
c = self.channels[self.active_number]
c.wt_scale.set(c.wt_scale.get() - 10)
self.refresh_keyboards()
return 'break'
# goes to the next channel on tab press
def onTab(self, event):
self.cycle(1)
return 'break'
def onShiftTab(self, event):
self.cycle(-1)
return 'break'
def cycle(self, n):
self.active_number = (self.active_number + n) % len(self.channels)
self.select_channel(self.active_number)
def onReturn(self, event):
self.refresh_keyboards()
return 'break'
def onArrowLeft(self, event):
t = self.text_frame.text
prev_wordbreak = t.search(' ', INSERT, stopindex='1.0', backwards=True)
if prev_wordbreak:
self.textbox.mark_set(INSERT, '%s+1c' % prev_wordbreak)
else:
self.textbox.mark_set(INSERT, '1.0')
self.refresh_keyboards()
def onArrowRight(self, event):
t = self.text_frame.text
next_wordbreak = t.search(' ', '%s+1c' %INSERT, stopindex='end')
if next_wordbreak:
self.textbox.mark_set(INSERT, '%s-1c' % next_wordbreak) #TODO: fix this so that it grabs the right characters
else:
self.textbox.mark_set(INSERT, END)
self.refresh_keyboards()
def onDelWord(self, event):
t = self.text_frame.text
prev_wordbreak = t.search(' ', '%s-1c' % INSERT, stopindex='1.0', backwards=True)
next_wordbreak = t.search(' ', '%s-1c' % INSERT, stopindex='end')
if prev_wordbreak:
start = prev_wordbreak
else:
start = '1.0'
if next_wordbreak:
end = next_wordbreak
else:
end = END
t.delete('%s+1c' % start, end)
self.refresh_keyboards()
def get_previous(self):
previous = self.textbox.get('insert linestart', INSERT).split()
reach = 2
if len(previous)>=reach:
return previous[-1*reach:]
else:
return ['[$]'] + previous # sentence start marker
def make_num_opt_menu(self, parent, dictionary, title):
panel = Frame(parent)
Label(panel, text = title).pack()
ref_list = []
var = 1
for key in dictionary:
b = Radiobutton(panel, text=key, variable=var, value=dictionary[key], command = self.setNumOptions)
ref_list.append(b)
b.pack(anchor=W)
return panel
def get_next(self):
next = self.textbox.get(INSERT, 'insert lineend').split()
if len(next)>=2:
return next[0:2]
else:
return next
def setNumOptions(self):
for c in self.channels:
c.num_options = self.opt_box.get()
def onDebug(self, event):
for ch in self.channels:
for corpus in ch.corpora:
print corpus.name
print len(corpus.memory)
return 'break'
Editor().mainloop()
|
|
#!/usr/bin/python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import pickle
import re
import autofill_dataset_converter
import autofill_dataset_generator
import pyauto_functional # Must be imported before pyauto
import pyauto
class AutofillTest(pyauto.PyUITest):
"""Tests that autofill works correctly"""
def Debug(self):
"""Test method for experimentation.
This method will not run automatically.
"""
import pprint
pp = pprint.PrettyPrinter(indent=2)
while True:
raw_input('Hit <enter> to dump info.. ')
info = self.GetAutofillProfile()
pp.pprint(info)
def testFillProfile(self):
"""Test filling profiles and overwriting with new profiles."""
profiles = [{'NAME_FIRST': 'Bob',
'NAME_LAST': 'Smith', 'ADDRESS_HOME_ZIP': '94043',},
{'EMAIL_ADDRESS': 'sue@example.com',
'COMPANY_NAME': 'Company X',}]
credit_cards = [{'CREDIT_CARD_NUMBER': '6011111111111117',
'CREDIT_CARD_EXP_MONTH': '12',
'CREDIT_CARD_EXP_4_DIGIT_YEAR': '2011'},
{'CREDIT_CARD_NAME': 'Bob C. Smith'}]
self.FillAutofillProfile(profiles=profiles, credit_cards=credit_cards)
profile = self.GetAutofillProfile()
self.assertEqual(profiles, profile['profiles'])
self.assertEqual(credit_cards, profile['credit_cards'])
profiles = [ {'NAME_FIRST': 'Larry'}]
self.FillAutofillProfile(profiles=profiles)
profile = self.GetAutofillProfile()
self.assertEqual(profiles, profile['profiles'])
self.assertEqual(credit_cards, profile['credit_cards'])
def testFillProfileCrazyCharacters(self):
"""Test filling profiles with unicode strings and crazy characters."""
# Adding autofill profiles.
file_path = os.path.join(self.DataDir(), 'autofill', 'crazy_autofill.txt')
profiles = self.EvalDataFrom(file_path)
self.FillAutofillProfile(profiles=profiles)
self.assertEqual(profiles, self.GetAutofillProfile()['profiles'])
# Adding credit cards.
file_path = os.path.join(self.DataDir(), 'autofill',
'crazy_creditcards.txt')
test_data = self.EvalDataFrom(file_path)
credit_cards_input = test_data['input']
self.FillAutofillProfile(credit_cards=credit_cards_input)
self.assertEqual(test_data['expected'],
self.GetAutofillProfile()['credit_cards'])
def testGetProfilesEmpty(self):
"""Test getting profiles when none have been filled."""
profile = self.GetAutofillProfile()
self.assertEqual([], profile['profiles'])
self.assertEqual([], profile['credit_cards'])
def testAutofillInvalid(self):
"""Test filling in invalid values for profiles."""
# First try profiles with invalid input.
without_invalid = {'NAME_FIRST': u'Will',
'ADDRESS_HOME_CITY': 'Sunnyvale',
'ADDRESS_HOME_STATE': 'CA',
'ADDRESS_HOME_ZIP': 'my_zip',
'ADDRESS_HOME_COUNTRY': 'United States'}
# Add some invalid fields.
with_invalid = without_invalid.copy()
with_invalid['PHONE_HOME_WHOLE_NUMBER'] = 'Invalid_Phone_Number'
with_invalid['PHONE_FAX_WHOLE_NUMBER'] = 'Invalid_Fax_Number'
self.FillAutofillProfile(profiles=[with_invalid])
self.assertEqual([without_invalid],
self.GetAutofillProfile()['profiles'])
def testAutofillPrefsStringSavedAsIs(self):
"""Test invalid credit card numbers typed in prefs should be saved as-is."""
credit_card = {'CREDIT_CARD_NUMBER': 'Not_0123-5Checked'}
self.FillAutofillProfile(credit_cards=[credit_card])
self.assertEqual([credit_card],
self.GetAutofillProfile()['credit_cards'],
msg='Credit card number in prefs not saved as-is.')
def _LuhnCreditCardNumberValidator(self, number):
"""Validates whether a number is valid or invalid using the Luhn test.
Validation example:
1. Example number: 49927398716
2. Reverse the digits: 61789372994
3. Sum the digits in the odd-numbered position for s1:
6 + 7 + 9 + 7 + 9 + 4 = 42
4. Take the digits in the even-numbered position: 1, 8, 3, 2, 9
4.1. Two times each digit in the even-numbered position: 2, 16, 6, 4, 18
4.2. For each resulting value that is now 2 digits, add the digits
together: 2, 7, 6, 4, 9
(0 + 2 = 2, 1 + 6 = 7, 0 + 6 = 6, 0 + 4 = 4, 1 + 8 = 9)
4.3. Sum together the digits for s2: 2 + 7 + 6 + 4 + 9 = 28
5. Sum together s1 + s2 and if the sum ends in zero, the number passes the
Luhn test: 42 + 28 = 70 which is a valid credit card number.
Args:
number: the credit card number being validated, as a string.
Return:
boolean whether the credit card number is valid or not.
"""
# Filters out non-digit characters.
number = re.sub('[^0-9]', '', number)
reverse = [int(ch) for ch in str(number)][::-1]
# The divmod of the function splits a number into two digits, ready for
# summing.
return ((sum(reverse[0::2]) + sum(sum(divmod(d*2, 10))
for d in reverse[1::2])) % 10 == 0)
def testInvalidCreditCardNumberIsNotAggregated(self):
"""Test credit card info with an invalid number is not aggregated.
When filling out a form with an invalid credit card number (one that
does not pass the Luhn test) the credit card info should not be saved into
Autofill preferences.
"""
invalid_cc_info = {'CREDIT_CARD_NAME': 'Bob Smith',
'CREDIT_CARD_NUMBER': '4408 0412 3456 7890',
'CREDIT_CARD_EXP_MONTH': '12',
'CREDIT_CARD_EXP_4_DIGIT_YEAR': '2014'}
cc_number = invalid_cc_info['CREDIT_CARD_NUMBER']
self.assertFalse(self._LuhnCreditCardNumberValidator(cc_number),
msg='This test requires an invalid credit card number.')
url = self.GetHttpURLForDataPath(
os.path.join('autofill', 'autofill_creditcard_form.html'))
self.NavigateToURL(url)
for key, value in invalid_cc_info.iteritems():
script = ('document.getElementById("%s").value = "%s"; '
'window.domAutomationController.send("done");') % (key, value)
self.ExecuteJavascript(script, 0, 0)
js_code = """
document.getElementById("cc_submit").submit();
window.addEventListener("unload", function() {
window.domAutomationController.send("done");
});
"""
self.ExecuteJavascript(js_code, 0, 0)
# Wait until the form is submitted and the page completes loading.
self.WaitUntil(
lambda: self.GetDOMValue('document.readyState'),
expect_retval='complete')
cc_infobar = self.GetBrowserInfo()['windows'][0]['tabs'][0]['infobars']
self.assertFalse(
cc_infobar, msg='Save credit card infobar offered to save CC info.')
def testWhitespacesAndSeparatorCharsStrippedForValidCCNums(self):
"""Test whitespaces and separator chars are stripped for valid CC numbers.
The credit card numbers used in this test pass the Luhn test.
For reference: http://www.merriampark.com/anatomycc.htm
"""
credit_card_info = [{'CREDIT_CARD_NAME': 'Bob Smith',
'CREDIT_CARD_NUMBER': '4408 0412 3456 7893',
'CREDIT_CARD_EXP_MONTH': '12',
'CREDIT_CARD_EXP_4_DIGIT_YEAR': '2014'},
{'CREDIT_CARD_NAME': 'Jane Doe',
'CREDIT_CARD_NUMBER': '4417-1234-5678-9113',
'CREDIT_CARD_EXP_MONTH': '10',
'CREDIT_CARD_EXP_4_DIGIT_YEAR': '2013'}]
url = self.GetHttpURLForDataPath(
os.path.join('autofill', 'autofill_creditcard_form.html'))
for cc_info in credit_card_info:
self.NavigateToURL(url)
for key, value in cc_info.iteritems():
cc_number = cc_info['CREDIT_CARD_NUMBER']
self.assertTrue(self._LuhnCreditCardNumberValidator(cc_number),
msg='This test requires a valid credit card number.')
script = ('document.getElementById("%s").value = "%s"; '
'window.domAutomationController.send("done");') % (key, value)
self.ExecuteJavascript(script, 0, 0)
js_code = """
document.getElementById("cc_submit").submit();
window.addEventListener("unload", function() {
window.domAutomationController.send("done");
});
"""
self.ExecuteJavascript(js_code, 0, 0)
# Wait until form is submitted and page completes loading.
self.WaitUntil(
lambda: self.GetDOMValue('document.readyState'),
expect_retval='complete')
self.PerformActionOnInfobar('accept', infobar_index=0)
# Verify the filled-in credit card number against the aggregated number.
aggregated_cc_1 = (
self.GetAutofillProfile()['credit_cards'][0]['CREDIT_CARD_NUMBER'])
aggregated_cc_2 = (
self.GetAutofillProfile()['credit_cards'][1]['CREDIT_CARD_NUMBER'])
self.assertFalse((' ' in aggregated_cc_1 or ' ' in aggregated_cc_2 or
'-' in aggregated_cc_1 or '-' in aggregated_cc_2),
msg='Whitespaces or separator chars not stripped.')
def testProfilesNotAggregatedWithNoAddress(self):
"""Test Autofill does not aggregate profiles with no address info."""
profile = {'NAME_FIRST': 'Bob',
'NAME_LAST': 'Smith',
'EMAIL_ADDRESS': 'bsmith@example.com',
'COMPANY_NAME': 'Company X',
'PHONE_HOME_WHOLE_NUMBER': '650-123-4567',}
url = self.GetHttpURLForDataPath(
os.path.join('autofill', 'duplicate_profiles_test.html'))
self.NavigateToURL(url)
for key, value in profile.iteritems():
script = ('document.getElementById("%s").value = "%s"; '
'window.domAutomationController.send("done");') % (key, value)
self.ExecuteJavascript(script, 0, 0)
js_code = """
document.getElementById("merge_dup").submit();
window.addEventListener("unload", function() {
window.domAutomationController.send("done");
});
"""
self.ExecuteJavascript(js_code, 0, 0)
self.assertFalse(self.GetAutofillProfile()['profiles'],
msg='Profile with no address info was aggregated.')
def testProfilesNotAggregatedWithInvalidEmail(self):
"""Test Autofill does not aggregate profiles with an invalid email."""
profile = {'NAME_FIRST': 'Bob',
'NAME_LAST': 'Smith',
'EMAIL_ADDRESS': 'garbage',
'ADDRESS_HOME_LINE1': '1234 H St.',
'ADDRESS_HOME_CITY': 'San Jose',
'ADDRESS_HOME_STATE': 'CA',
'ADDRESS_HOME_ZIP': '95110',
'COMPANY_NAME': 'Company X',
'PHONE_HOME_WHOLE_NUMBER': '408-123-4567',}
url = self.GetHttpURLForDataPath(
os.path.join('autofill', 'duplicate_profiles_test.html'))
self.NavigateToURL(url)
for key, value in profile.iteritems():
script = ('document.getElementById("%s").value = "%s"; '
'window.domAutomationController.send("done");') % (key, value)
self.ExecuteJavascript(script, 0, 0)
js_code = """
document.getElementById("merge_dup").submit();
window.addEventListener("unload", function() {
window.domAutomationController.send("done");
});
"""
self.ExecuteJavascript(js_code, 0, 0)
self.assertFalse(self.GetAutofillProfile()['profiles'],
msg='Profile with invalid email was aggregated.')
def _SendKeyEventsToPopulateForm(self, tab_index=0, windex=0):
"""Send key events to populate a web form with Autofill profile data.
Args:
tab_index: The tab index, default is 0.
windex: The window index, default is 0.
"""
TAB_KEYPRESS = 0x09 # Tab keyboard key press.
DOWN_KEYPRESS = 0x28 # Down arrow keyboard key press.
RETURN_KEYPRESS = 0x0D # Return keyboard key press.
self.SendWebkitKeypressEvent(TAB_KEYPRESS, tab_index, windex)
self.SendWebkitKeypressEvent(DOWN_KEYPRESS, tab_index, windex)
self.SendWebkitKeypressEvent(DOWN_KEYPRESS, tab_index, windex)
self.SendWebkitKeypressEvent(RETURN_KEYPRESS, tab_index, windex)
def testComparePhoneNumbers(self):
"""Test phone fields parse correctly from a given profile.
The high level key presses execute the following: Select the first text
field, invoke the autofill popup list, select the first profile within the
list, and commit to the profile to populate the form.
"""
profile_path = os.path.join(self.DataDir(), 'autofill',
'phone_pinput_autofill.txt')
profile_expected_path = os.path.join(self.DataDir(), 'autofill',
'phone_pexpected_autofill.txt')
profiles = self.EvalDataFrom(profile_path)
profiles_expected = self.EvalDataFrom(profile_expected_path)
self.FillAutofillProfile(profiles=profiles)
url = self.GetHttpURLForDataPath(
os.path.join('autofill', 'form_phones.html'))
for profile_expected in profiles_expected:
self.NavigateToURL(url)
self._SendKeyEventsToPopulateForm()
form_values = {}
for key, value in profile_expected.iteritems():
js_returning_field_value = (
'var field_value = document.getElementById("%s").value;'
'window.domAutomationController.send(field_value);'
) % key
form_values[key] = self.ExecuteJavascript(
js_returning_field_value, 0, 0)
self.assertEqual(
form_values[key], value,
msg=('Original profile not equal to expected profile at key: "%s"\n'
'Expected: "%s"\nReturned: "%s"' % (
key, value, form_values[key])))
def testCCInfoNotStoredWhenAutocompleteOff(self):
"""Test CC info not offered to be saved when autocomplete=off for CC field.
If the credit card number field has autocomplete turned off, then the credit
card infobar should not offer to save the credit card info. The credit card
number must be a valid Luhn number.
"""
credit_card_info = {'CREDIT_CARD_NAME': 'Bob Smith',
'CREDIT_CARD_NUMBER': '4408041234567893',
'CREDIT_CARD_EXP_MONTH': '12',
'CREDIT_CARD_EXP_4_DIGIT_YEAR': '2014'}
url = self.GetHttpURLForDataPath(
os.path.join('autofill', 'cc_autocomplete_off_test.html'))
self.NavigateToURL(url)
for key, value in credit_card_info.iteritems():
script = ('document.getElementById("%s").value = "%s"; '
'window.domAutomationController.send("done");') % (key, value)
self.ExecuteJavascript(script, 0, 0)
js_code = """
document.getElementById("cc_submit").submit();
window.addEventListener("unload", function() {
window.domAutomationController.send("done");
});
"""
self.ExecuteJavascript(js_code, 0, 0)
# Wait until form is submitted and page completes loading.
self.WaitUntil(
lambda: self.GetDOMValue('document.readyState'),
expect_retval='complete')
cc_infobar = self.GetBrowserInfo()['windows'][0]['tabs'][0]['infobars']
self.assertFalse(cc_infobar,
msg='Save credit card infobar offered to save CC info.')
def testNoAutofillForReadOnlyFields(self):
"""Test that Autofill does not fill in read-only fields."""
profile = {'NAME_FIRST': 'Bob',
'NAME_LAST': 'Smith',
'EMAIL_ADDRESS': 'bsmith@gmail.com',
'ADDRESS_HOME_LINE1': '1234 H St.',
'ADDRESS_HOME_CITY': 'San Jose',
'ADDRESS_HOME_STATE': 'CA',
'ADDRESS_HOME_ZIP': '95110',
'COMPANY_NAME': 'Company X',
'PHONE_HOME_WHOLE_NUMBER': '408-123-4567',}
self.FillAutofillProfile(profiles=[profile])
url = self.GetHttpURLForDataPath(
os.path.join('autofill', 'read_only_field_test.html'))
self.NavigateToURL(url)
self._SendKeyEventsToPopulateForm()
js_return_readonly_field = (
'var field_value = document.getElementById("email").value;'
'window.domAutomationController.send(field_value);')
readonly_field_value = self.ExecuteJavascript(
js_return_readonly_field, 0, 0)
js_return_addrline1_field = (
'var field_value = document.getElementById("address").value;'
'window.domAutomationController.send(field_value);')
addrline1_field_value = self.ExecuteJavascript(
js_return_addrline1_field, 0, 0)
self.assertNotEqual(
readonly_field_value, profile['EMAIL_ADDRESS'],
'Autofill filled in value "%s" for a read-only field.'
% readonly_field_value)
self.assertEqual(
addrline1_field_value, profile['ADDRESS_HOME_LINE1'],
'Unexpected value "%s" in the Address field.' % addrline1_field_value)
def FormFillLatencyAfterSubmit(self):
"""Test latency time on form submit with lots of stored Autofill profiles.
This test verifies when a profile is selected from the Autofill dictionary
that consists of thousands of profiles, the form does not hang after being
submitted.
The high level key presses execute the following: Select the first text
field, invoke the autofill popup list, select the first profile within the
list, and commit to the profile to populate the form.
This test is partially automated. The bulk of the work is done, such as
generating 1500 plus profiles, inserting those profiles into Autofill,
selecting a profile from the list. The tester will need to click on the
submit button and check if the browser hangs.
"""
# HTML file needs to be run from a http:// url.
url = self.GetHttpURLForDataPath(
os.path.join('autofill', 'latency_after_submit_test.html'))
# Run the generator script to generate the dictionary list needed for the
# profiles.
gen = autofill_dataset_generator.DatasetGenerator(
logging_level=logging.ERROR)
list_of_dict = gen.GenerateDataset(num_of_dict_to_generate=1501)
self.FillAutofillProfile(profiles=list_of_dict)
self.NavigateToURL(url)
self._SendKeyEventsToPopulateForm()
# TODO(dyu): add automated form hang or crash verification.
raw_input(
'Verify the test manually. Test hang time after submitting the form.')
def AutofillCrowdsourcing(self):
"""Test able to send POST request of web form to Autofill server.
The Autofill server processes the data offline, so it can take a few days
for the result to be detectable. Manual verification is required.
"""
# HTML file needs to be run from a specific http:// url to be able to verify
# the results a few days later by visiting the same url.
url = 'http://www.corp.google.com/~dyu/autofill/crowdsourcing-test.html'
# Adding crowdsourcing Autofill profile.
file_path = os.path.join(self.DataDir(), 'autofill',
'crowdsource_autofill.txt')
profiles = self.EvalDataFrom(file_path)
self.FillAutofillProfile(profiles=profiles)
# Autofill server captures 2.5% of the data posted.
# Looping 1000 times is a safe minimum to exceed the server's threshold or
# noise.
for i in range(1000):
fname = self.GetAutofillProfile()['profiles'][0]['NAME_FIRST']
lname = self.GetAutofillProfile()['profiles'][0]['NAME_LAST']
email = self.GetAutofillProfile()['profiles'][0]['EMAIL_ADDRESS']
# Submit form to collect crowdsourcing data for Autofill.
self.NavigateToURL(url, 0, 0)
fname_field = ('document.getElementById("fn").value = "%s"; '
'window.domAutomationController.send("done");') % fname
lname_field = ('document.getElementById("ln").value = "%s"; '
'window.domAutomationController.send("done");') % lname
email_field = ('document.getElementById("em").value = "%s"; '
'window.domAutomationController.send("done");') % email
self.ExecuteJavascript(fname_field, 0, 0);
self.ExecuteJavascript(lname_field, 0, 0);
self.ExecuteJavascript(email_field, 0, 0);
self.ExecuteJavascript('document.getElementById("frmsubmit").submit();'
'window.domAutomationController.send("done");',
0, 0)
def MergeDuplicateProfilesInAutofill(self):
"""Test Autofill ability to merge duplicate profiles and throw away junk."""
# HTML file needs to be run from a http:// url.
url = self.GetHttpURLForDataPath(
os.path.join('autofill', 'duplicate_profiles_test.html'))
# Run the parser script to generate the dictionary list needed for the
# profiles.
c = autofill_dataset_converter.DatasetConverter(
os.path.join(self.DataDir(), 'autofill', 'dataset.txt'),
logging_level=logging.INFO) # Set verbosity to INFO, WARNING, ERROR.
list_of_dict = c.Convert()
for profile in list_of_dict:
self.NavigateToURL(url)
for key, value in profile.iteritems():
script = ('document.getElementById("%s").value = "%s"; '
'window.domAutomationController.send("done");') % (key, value)
self.ExecuteJavascript(script, 0, 0)
self.ExecuteJavascript('document.getElementById("merge_dup").submit();'
'window.domAutomationController.send("done");',
0, 0)
# Verify total number of inputted profiles is greater than the final number
# of profiles after merging.
self.assertTrue(
len(list_of_dict) > len(self.GetAutofillProfile()['profiles']))
# Write profile dictionary to a file.
merged_profile = os.path.join(self.DataDir(), 'autofill',
'merged-profiles.txt')
profile_dict = self.GetAutofillProfile()['profiles']
output = open(merged_profile, 'wb')
pickle.dump(profile_dict, output)
output.close()
if __name__ == '__main__':
pyauto_functional.Main()
|
|
import os
import sys
import shutil
import logging
import time
try:
# Python 3.x
import queue
except:
# Python 2.x
import Queue as queue
try:
# Python 3.x
from urllib.parse import quote
from urllib.parse import urlencode
from urllib.request import urlopen
from urllib.error import URLError
from urllib.request import HTTPSHandler
from urllib.request import HTTPPasswordMgrWithDefaultRealm
from urllib.request import HTTPBasicAuthHandler
from urllib.request import build_opener
from urllib.request import install_opener
except ImportError:
# Python 2.x
from urllib import urlencode
from urllib2 import quote
from urllib2 import urlopen
from urllib2 import URLError
from urllib2 import HTTPSHandler
from urllib2 import HTTPPasswordMgrWithDefaultRealm
from urllib2 import HTTPBasicAuthHandler
from urllib2 import build_opener
from urllib2 import install_opener
import tarfile
import fhem
"""
FhemSelfTester implements necessary functionality for automatic testing of FHEM
with the Python API.
This module can automatically download, install and run a clean FHEM server.
"""
class FhemSelfTester:
def __init__(self):
self.log = logging.getLogger('SelfTester')
def download(self, filename, urlpath):
"""
Download an FHEM tar.gz file, if not yet available locally.
"""
if os.path.exists(filename):
return True
try:
dat = urlopen(urlpath).read()
except Exception as e:
self.log.error("Failed to download {}, {}".format(urlpath, e))
return False
try:
with open(filename, 'wb') as f:
f.write(dat)
except Exception as e:
self.log.error("Failed to write {}, {}".format(filename, e))
return True
def install(self, archivename, destination, sanity_check_file):
"""
Install a NEW, DEFAULT FHEM server.
WARNING: the directory tree in destination is ERASED! In order to prevent
accidental erasures, the destination direction must contain 'fhem' and the fhem.pl
file at sanity_check_file must exist.
OLD INSTALLATIONS ARE DELETE!
"""
if not archivename.endswith("tar.gz"):
self.log.error(
"Archive needs to be of type *.tar.gz: {}".format(archivename))
return False
if not os.path.exists(archivename):
self.log.error("Archive {} not found.".format(archivename))
return False
if "fhem" not in destination or (os.path.exists(destination) and not os.path.exists(sanity_check_file)):
self.log.error(
"Dangerous or inconsistent fhem install-path: {}, need destination with 'fhem' in name.".format(destination))
self.log.error(
"Or {} exists and sanity-check-file {} doesn't exist.".format(destination, sanity_check_file))
return False
if os.path.exists(destination):
try:
shutil.rmtree(destination)
except Exception as e:
self.log.error(
"Failed to remove existing installation at {}".format(destination))
return False
try:
tar = tarfile.open(archivename, "r:gz")
tar.extractall(destination)
tar.close()
except Exception as e:
self.log.error("Failed to extract {}, {}".format(archivename, e))
return True
def is_running(self, fhem_url='localhost', protocol='http', port=8083):
"""
Check if an fhem server is already running.
"""
fh = fhem.Fhem(fhem_url, protocol=protocol, port=port)
ver = fh.send_cmd('version')
if ver is not None:
fh.close()
return ver
return None
def shutdown(self, fhem_url='localhost', protocol='http', port=8083):
"""
Shutdown a running FHEM server
"""
fh = fhem.Fhem(fhem_url, protocol=protocol, port=port)
fh.log.level = logging.CRITICAL
try:
self.log.warning("Shutting down fhem at {}".format(fhem_url))
fh.send_cmd("shutdown")
except:
pass
self.log.warning("Fhem shutdown complete.")
def set_reading(fhi, name, reading, value):
fhi.send_cmd("setreading {} {} {}".format(name, reading, value))
def create_device(fhi, name, readings):
fhi.send_cmd("define {} dummy".format(name))
fhi.send_cmd("attr {} setList state:on,off".format(name))
fhi.send_cmd("set {} on".format(name))
readingList = ""
for rd in readings:
if readingList != "":
readingList += " "
readingList += rd
fhi.send_cmd("attr {} readingList {}".format(name, readingList))
for rd in readings:
set_reading(fhi,name,rd,readings[rd])
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG,format='%(asctime)s.%(msecs)03d %(levelname)s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
print("Start FhemSelfTest")
st = FhemSelfTester()
print("State 1: Object created.")
config = {
'archivename': "./fhem-5.9.tar.gz",
'urlpath': "https://fhem.de/fhem-5.9.tar.gz",
'destination': "./fhem",
'fhem_file': "./fhem/fhem-5.9/fhem.pl",
'config_file': "./fhem/fhem-5.9/fhem.cfg",
'fhem_dir': "./fhem/fhem-5.9/",
'exec': "cd fhem/fhem-5.9/ && perl fhem.pl fhem.cfg",
'testhost': 'localhost',
}
if st.is_running(fhem_url=config['testhost'], protocol='http', port=8083) is not None:
print("Fhem is already running!")
st.shutdown(fhem_url=config['testhost'], protocol='http', port=8083)
time.sleep(1)
if st.is_running(fhem_url=config['testhost'], protocol='http', port=8083) is not None:
print("Shutdown failed!")
sys.exit(-3)
print("--------------------")
print("Reinstalling FHEM...")
if not st.download(config['archivename'], config['urlpath']):
print("Download failed.")
sys.exit(-1)
print("Starting fhem installation")
# WARNING! THIS DELETES ANY EXISTING FHEM SERVER at 'destination'!
# All configuration files, databases, logs etc. are DELETED to allow a fresh test install!
if not st.install(config['archivename'], config['destination'], config['fhem_file']):
print("Install failed")
sys.exit(-2)
os.system('cat fhem-config-addon.cfg >> {}'.format(config['config_file']))
certs_dir = os.path.join(config['fhem_dir'], 'certs')
os.system('mkdir {}'.format(certs_dir))
os.system('cd {} && openssl req -newkey rsa:2048 -nodes -keyout server-key.pem -x509 -days 36500 -out server-cert.pem -subj "/C=DE/ST=NRW/L=Earth/O=CompanyName/OU=IT/CN=www.example.com/emailAddress=email@example.com"'.format(certs_dir))
os.system(config['exec'])
time.sleep(1)
if st.is_running(fhem_url=config['testhost'], protocol='http', port=8083) is None:
print("Fhem is NOT running after install and start!")
sys.exit(-4)
print("Install should be ok, Fhem running.")
connections = [
{'protocol': 'http',
'port': 8083},
{'protocol': 'telnet',
'port': 7073,
'use_ssl': True,
'password': 'secretsauce'},
{'protocol': 'telnet',
'port': 7072},
{'protocol': 'https',
'port': 8084},
{'protocol': 'https',
'port': 8085,
'username': 'test',
'password': 'secretsauce'},
]
first = True
print("")
print("----------------- Fhem ------------")
print("Testing python-fhem Fhem():")
for connection in connections:
print('Testing connection to {} via {}'.format(
config['testhost'], connection))
fh = fhem.Fhem(config['testhost'], **connection)
devs = [
{'name': 'clima_sensor1',
'readings': {'temperature': 18.2,
'humidity': 88.2}},
{'name': 'clima_sensor2',
'readings': {'temperature': 19.1,
'humidity': 85.7}}
]
if first is True:
for dev in devs:
create_device(fh, dev['name'], dev['readings'])
first = False
for dev in devs:
for i in range(10):
print("Repetion: {}".format(i+1))
for rd in dev['readings']:
dict_value = fh.get_device_reading(
dev['name'], rd, blocking=False)
try:
value = dict_value['Value']
except:
print(
'Bad reply reading {} {} -> {}'.format(dev['name'], rd, dict_value))
sys.exit(-7)
if value == dev['readings'][rd]:
print(
"Reading-test {},{}={} ok.".format(dev['name'], rd, dev['readings'][rd]))
else:
print("Failed to set and read reading! {},{} {} != {}".format(
dev['name'], rd, value, dev['readings'][rd]))
sys.exit(-5)
num_temps = 0
for dev in devs:
if 'temperature' in dev['readings']:
num_temps += 1
temps = fh.get_readings("temperature", timeout=0.1, blocking=False)
if len(temps) != num_temps:
print("There should have been {} devices with temperature reading, but we got {}. Ans: {}".format(
num_temps, len(temps), temps))
sys.exit(-6)
else:
print("Multiread of all devices with 'temperature' reading: ok.")
states = fh.get_states()
if len(states) < 5:
print("Iconsistent number of states: {}".format(len(states)))
sys.exit(-7)
else:
print("states received: {}, ok.".format(len(states)))
fh.close()
print("")
print("")
print("---------------Queues--------------------------")
print("Testing python-fhem telnet FhemEventQueues():")
for connection in connections:
if connection['protocol'] != 'telnet':
continue
print('Testing connection to {} via {}'.format(
config['testhost'], connection))
fh = fhem.Fhem(config['testhost'], **connections[0])
que = queue.Queue()
que_events=0
fq = fhem.FhemEventQueue(config['testhost'], que, **connection)
devs = [
{'name': 'clima_sensor1',
'readings': {'temperature': 18.2,
'humidity': 88.2}},
{'name': 'clima_sensor2',
'readings': {'temperature': 19.1,
'humidity': 85.7}}
]
time.sleep(1.0)
for dev in devs:
for i in range(10):
print("Repetion: {}".format(i+1))
for rd in dev['readings']:
set_reading(fh,dev['name'],rd,18.0+i/0.2)
que_events += 1
time.sleep(0.05)
time.sleep(3) # This is crucial due to python's "thread"-handling.
ql = 0
has_data = True
while has_data:
try:
que.get(False)
except:
has_data = False
break
que.task_done()
ql += 1
print("Queue length: {}".format(ql))
if ql != que_events:
print("FhemEventQueue contains {} entries, expected {} entries, failure.".format(ql,que_events))
sys.exit(-8)
else:
print("Queue test success, Ok.")
fh.close()
fq.close()
time.sleep(0.5)
print("")
sys.exit(0)
|
|
# Copyright 2013 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Juergen Brendel, Cisco Systems Inc.
# @author: Abhishek Raut, Cisco Systems Inc.
# @author: Rudrajit Tapadar, Cisco Systems Inc.
from six import moves
from sqlalchemy.orm import exc as s_exc
from testtools import matchers
from neutron.common import exceptions as n_exc
from neutron import context
from neutron.db import api as db
from neutron.db import db_base_plugin_v2
from neutron.plugins.cisco.common import cisco_constants
from neutron.plugins.cisco.common import cisco_exceptions as c_exc
from neutron.plugins.cisco.db import n1kv_db_v2
from neutron.plugins.cisco.db import n1kv_models_v2
from neutron.tests import base
from neutron.tests.unit import test_db_plugin as test_plugin
PHYS_NET = 'physnet1'
PHYS_NET_2 = 'physnet2'
VLAN_MIN = 10
VLAN_MAX = 19
VXLAN_MIN = 5000
VXLAN_MAX = 5009
SEGMENT_RANGE = '200-220'
SEGMENT_RANGE_MIN_OVERLAP = '210-230'
SEGMENT_RANGE_MAX_OVERLAP = '190-209'
SEGMENT_RANGE_OVERLAP = '190-230'
TEST_NETWORK_ID = 'abcdefghijklmnopqrstuvwxyz'
TEST_NETWORK_ID2 = 'abcdefghijklmnopqrstuvwxy2'
TEST_NETWORK_ID3 = 'abcdefghijklmnopqrstuvwxy3'
TEST_NETWORK_PROFILE = {'name': 'test_profile',
'segment_type': 'vlan',
'physical_network': 'physnet1',
'segment_range': '10-19'}
TEST_NETWORK_PROFILE_2 = {'name': 'test_profile_2',
'segment_type': 'vlan',
'physical_network': 'physnet1',
'segment_range': SEGMENT_RANGE}
TEST_NETWORK_PROFILE_VXLAN = {'name': 'test_profile',
'segment_type': 'overlay',
'sub_type': 'native_vxlan',
'segment_range': '5000-5009',
'multicast_ip_range': '239.0.0.70-239.0.0.80'}
TEST_POLICY_PROFILE = {'id': '4a417990-76fb-11e2-bcfd-0800200c9a66',
'name': 'test_policy_profile'}
TEST_NETWORK_PROFILE_MULTI_SEGMENT = {'name': 'test_profile',
'segment_type': 'multi-segment'}
TEST_NETWORK_PROFILE_VLAN_TRUNK = {'name': 'test_profile',
'segment_type': 'trunk',
'sub_type': 'vlan'}
TEST_NETWORK_PROFILE_VXLAN_TRUNK = {'name': 'test_profile',
'segment_type': 'trunk',
'sub_type': 'overlay'}
def _create_test_network_profile_if_not_there(session,
profile=TEST_NETWORK_PROFILE):
try:
_profile = session.query(n1kv_models_v2.NetworkProfile).filter_by(
name=profile['name']).one()
except s_exc.NoResultFound:
_profile = n1kv_db_v2.create_network_profile(session, profile)
return _profile
def _create_test_policy_profile_if_not_there(session,
profile=TEST_POLICY_PROFILE):
try:
_profile = session.query(n1kv_models_v2.PolicyProfile).filter_by(
name=profile['name']).one()
except s_exc.NoResultFound:
_profile = n1kv_db_v2.create_policy_profile(profile)
return _profile
class VlanAllocationsTest(base.BaseTestCase):
def setUp(self):
super(VlanAllocationsTest, self).setUp()
db.configure_db()
self.session = db.get_session()
self.net_p = _create_test_network_profile_if_not_there(self.session)
n1kv_db_v2.sync_vlan_allocations(self.session, self.net_p)
self.addCleanup(db.clear_db)
def test_sync_vlan_allocations_outside_segment_range(self):
self.assertRaises(c_exc.VlanIDNotFound,
n1kv_db_v2.get_vlan_allocation,
self.session,
PHYS_NET,
VLAN_MIN - 1)
self.assertRaises(c_exc.VlanIDNotFound,
n1kv_db_v2.get_vlan_allocation,
self.session,
PHYS_NET,
VLAN_MAX + 1)
self.assertRaises(c_exc.VlanIDNotFound,
n1kv_db_v2.get_vlan_allocation,
self.session,
PHYS_NET_2,
VLAN_MIN + 20)
self.assertRaises(c_exc.VlanIDNotFound,
n1kv_db_v2.get_vlan_allocation,
self.session,
PHYS_NET_2,
VLAN_MIN + 20)
self.assertRaises(c_exc.VlanIDNotFound,
n1kv_db_v2.get_vlan_allocation,
self.session,
PHYS_NET_2,
VLAN_MAX + 20)
def test_sync_vlan_allocations_unallocated_vlans(self):
self.assertFalse(n1kv_db_v2.get_vlan_allocation(self.session,
PHYS_NET,
VLAN_MIN).allocated)
self.assertFalse(n1kv_db_v2.get_vlan_allocation(self.session,
PHYS_NET,
VLAN_MIN + 1).
allocated)
self.assertFalse(n1kv_db_v2.get_vlan_allocation(self.session,
PHYS_NET,
VLAN_MAX - 1).
allocated)
self.assertFalse(n1kv_db_v2.get_vlan_allocation(self.session,
PHYS_NET,
VLAN_MAX).allocated)
def test_vlan_pool(self):
vlan_ids = set()
for x in moves.xrange(VLAN_MIN, VLAN_MAX + 1):
(physical_network, seg_type,
vlan_id, m_ip) = n1kv_db_v2.reserve_vlan(self.session, self.net_p)
self.assertEqual(physical_network, PHYS_NET)
self.assertThat(vlan_id, matchers.GreaterThan(VLAN_MIN - 1))
self.assertThat(vlan_id, matchers.LessThan(VLAN_MAX + 1))
vlan_ids.add(vlan_id)
self.assertRaises(n_exc.NoNetworkAvailable,
n1kv_db_v2.reserve_vlan,
self.session,
self.net_p)
n1kv_db_v2.release_vlan(self.session, PHYS_NET, vlan_ids.pop())
physical_network, seg_type, vlan_id, m_ip = (n1kv_db_v2.reserve_vlan(
self.session, self.net_p))
self.assertEqual(physical_network, PHYS_NET)
self.assertThat(vlan_id, matchers.GreaterThan(VLAN_MIN - 1))
self.assertThat(vlan_id, matchers.LessThan(VLAN_MAX + 1))
vlan_ids.add(vlan_id)
for vlan_id in vlan_ids:
n1kv_db_v2.release_vlan(self.session, PHYS_NET, vlan_id)
def test_specific_vlan_inside_pool(self):
vlan_id = VLAN_MIN + 5
self.assertFalse(n1kv_db_v2.get_vlan_allocation(self.session,
PHYS_NET,
vlan_id).allocated)
n1kv_db_v2.reserve_specific_vlan(self.session, PHYS_NET, vlan_id)
self.assertTrue(n1kv_db_v2.get_vlan_allocation(self.session,
PHYS_NET,
vlan_id).allocated)
self.assertRaises(n_exc.VlanIdInUse,
n1kv_db_v2.reserve_specific_vlan,
self.session,
PHYS_NET,
vlan_id)
n1kv_db_v2.release_vlan(self.session, PHYS_NET, vlan_id)
self.assertFalse(n1kv_db_v2.get_vlan_allocation(self.session,
PHYS_NET,
vlan_id).allocated)
def test_specific_vlan_outside_pool(self):
vlan_id = VLAN_MAX + 5
self.assertRaises(c_exc.VlanIDNotFound,
n1kv_db_v2.get_vlan_allocation,
self.session,
PHYS_NET,
vlan_id)
self.assertRaises(c_exc.VlanIDOutsidePool,
n1kv_db_v2.reserve_specific_vlan,
self.session,
PHYS_NET,
vlan_id)
class VxlanAllocationsTest(base.BaseTestCase,
n1kv_db_v2.NetworkProfile_db_mixin):
def setUp(self):
super(VxlanAllocationsTest, self).setUp()
db.configure_db()
self.session = db.get_session()
self.net_p = _create_test_network_profile_if_not_there(
self.session, TEST_NETWORK_PROFILE_VXLAN)
n1kv_db_v2.sync_vxlan_allocations(self.session, self.net_p)
self.addCleanup(db.clear_db)
def test_sync_vxlan_allocations_outside_segment_range(self):
self.assertRaises(c_exc.VxlanIDNotFound,
n1kv_db_v2.get_vxlan_allocation,
self.session,
VXLAN_MIN - 1)
self.assertRaises(c_exc.VxlanIDNotFound,
n1kv_db_v2.get_vxlan_allocation,
self.session,
VXLAN_MAX + 1)
def test_sync_vxlan_allocations_unallocated_vxlans(self):
self.assertFalse(n1kv_db_v2.get_vxlan_allocation(self.session,
VXLAN_MIN).allocated)
self.assertFalse(n1kv_db_v2.get_vxlan_allocation(self.session,
VXLAN_MIN + 1).
allocated)
self.assertFalse(n1kv_db_v2.get_vxlan_allocation(self.session,
VXLAN_MAX - 1).
allocated)
self.assertFalse(n1kv_db_v2.get_vxlan_allocation(self.session,
VXLAN_MAX).allocated)
def test_vxlan_pool(self):
vxlan_ids = set()
for x in moves.xrange(VXLAN_MIN, VXLAN_MAX + 1):
vxlan = n1kv_db_v2.reserve_vxlan(self.session, self.net_p)
vxlan_id = vxlan[2]
self.assertThat(vxlan_id, matchers.GreaterThan(VXLAN_MIN - 1))
self.assertThat(vxlan_id, matchers.LessThan(VXLAN_MAX + 1))
vxlan_ids.add(vxlan_id)
self.assertRaises(n_exc.NoNetworkAvailable,
n1kv_db_v2.reserve_vxlan,
self.session,
self.net_p)
n1kv_db_v2.release_vxlan(self.session, vxlan_ids.pop())
vxlan = n1kv_db_v2.reserve_vxlan(self.session, self.net_p)
vxlan_id = vxlan[2]
self.assertThat(vxlan_id, matchers.GreaterThan(VXLAN_MIN - 1))
self.assertThat(vxlan_id, matchers.LessThan(VXLAN_MAX + 1))
vxlan_ids.add(vxlan_id)
for vxlan_id in vxlan_ids:
n1kv_db_v2.release_vxlan(self.session, vxlan_id)
n1kv_db_v2.delete_network_profile(self.session, self.net_p.id)
def test_specific_vxlan_inside_pool(self):
vxlan_id = VXLAN_MIN + 5
self.assertFalse(n1kv_db_v2.get_vxlan_allocation(self.session,
vxlan_id).allocated)
n1kv_db_v2.reserve_specific_vxlan(self.session, vxlan_id)
self.assertTrue(n1kv_db_v2.get_vxlan_allocation(self.session,
vxlan_id).allocated)
self.assertRaises(c_exc.VxlanIDInUse,
n1kv_db_v2.reserve_specific_vxlan,
self.session,
vxlan_id)
n1kv_db_v2.release_vxlan(self.session, vxlan_id)
self.assertFalse(n1kv_db_v2.get_vxlan_allocation(self.session,
vxlan_id).allocated)
def test_specific_vxlan_outside_pool(self):
vxlan_id = VXLAN_MAX + 5
self.assertRaises(c_exc.VxlanIDNotFound,
n1kv_db_v2.get_vxlan_allocation,
self.session,
vxlan_id)
self.assertRaises(c_exc.VxlanIDOutsidePool,
n1kv_db_v2.reserve_specific_vxlan,
self.session,
vxlan_id)
class NetworkBindingsTest(test_plugin.NeutronDbPluginV2TestCase):
def setUp(self):
super(NetworkBindingsTest, self).setUp()
db.configure_db()
self.session = db.get_session()
self.addCleanup(db.clear_db)
def test_add_network_binding(self):
with self.network() as network:
TEST_NETWORK_ID = network['network']['id']
self.assertRaises(c_exc.NetworkBindingNotFound,
n1kv_db_v2.get_network_binding,
self.session,
TEST_NETWORK_ID)
p = _create_test_network_profile_if_not_there(self.session)
n1kv_db_v2.add_network_binding(
self.session, TEST_NETWORK_ID, 'vlan',
PHYS_NET, 1234, '0.0.0.0', p.id, None)
binding = n1kv_db_v2.get_network_binding(
self.session, TEST_NETWORK_ID)
self.assertIsNotNone(binding)
self.assertEqual(binding.network_id, TEST_NETWORK_ID)
self.assertEqual(binding.network_type, 'vlan')
self.assertEqual(binding.physical_network, PHYS_NET)
self.assertEqual(binding.segmentation_id, 1234)
def test_create_multi_segment_network(self):
with self.network() as network:
TEST_NETWORK_ID = network['network']['id']
self.assertRaises(c_exc.NetworkBindingNotFound,
n1kv_db_v2.get_network_binding,
self.session,
TEST_NETWORK_ID)
p = _create_test_network_profile_if_not_there(
self.session,
TEST_NETWORK_PROFILE_MULTI_SEGMENT)
n1kv_db_v2.add_network_binding(
self.session, TEST_NETWORK_ID, 'multi-segment',
None, 0, '0.0.0.0', p.id, None)
binding = n1kv_db_v2.get_network_binding(
self.session, TEST_NETWORK_ID)
self.assertIsNotNone(binding)
self.assertEqual(binding.network_id, TEST_NETWORK_ID)
self.assertEqual(binding.network_type, 'multi-segment')
self.assertIsNone(binding.physical_network)
self.assertEqual(binding.segmentation_id, 0)
def test_add_multi_segment_binding(self):
with self.network() as network:
TEST_NETWORK_ID = network['network']['id']
self.assertRaises(c_exc.NetworkBindingNotFound,
n1kv_db_v2.get_network_binding,
self.session,
TEST_NETWORK_ID)
p = _create_test_network_profile_if_not_there(
self.session,
TEST_NETWORK_PROFILE_MULTI_SEGMENT)
n1kv_db_v2.add_network_binding(
self.session, TEST_NETWORK_ID, 'multi-segment',
None, 0, '0.0.0.0', p.id,
[(TEST_NETWORK_ID2, TEST_NETWORK_ID3)])
binding = n1kv_db_v2.get_network_binding(
self.session, TEST_NETWORK_ID)
self.assertIsNotNone(binding)
self.assertEqual(binding.network_id, TEST_NETWORK_ID)
self.assertEqual(binding.network_type, 'multi-segment')
self.assertIsNone(binding.physical_network)
self.assertEqual(binding.segmentation_id, 0)
ms_binding = (n1kv_db_v2.get_multi_segment_network_binding(
self.session, TEST_NETWORK_ID,
(TEST_NETWORK_ID2, TEST_NETWORK_ID3)))
self.assertIsNotNone(ms_binding)
self.assertEqual(ms_binding.multi_segment_id, TEST_NETWORK_ID)
self.assertEqual(ms_binding.segment1_id, TEST_NETWORK_ID2)
self.assertEqual(ms_binding.segment2_id, TEST_NETWORK_ID3)
ms_members = (n1kv_db_v2.get_multi_segment_members(
self.session, TEST_NETWORK_ID))
self.assertEqual(ms_members,
[(TEST_NETWORK_ID2, TEST_NETWORK_ID3)])
self.assertTrue(n1kv_db_v2.is_multi_segment_member(
self.session, TEST_NETWORK_ID2))
self.assertTrue(n1kv_db_v2.is_multi_segment_member(
self.session, TEST_NETWORK_ID3))
n1kv_db_v2.del_multi_segment_binding(
self.session, TEST_NETWORK_ID,
[(TEST_NETWORK_ID2, TEST_NETWORK_ID3)])
ms_members = (n1kv_db_v2.get_multi_segment_members(
self.session, TEST_NETWORK_ID))
self.assertEqual(ms_members, [])
def test_create_vlan_trunk_network(self):
with self.network() as network:
TEST_NETWORK_ID = network['network']['id']
self.assertRaises(c_exc.NetworkBindingNotFound,
n1kv_db_v2.get_network_binding,
self.session,
TEST_NETWORK_ID)
p = _create_test_network_profile_if_not_there(
self.session,
TEST_NETWORK_PROFILE_VLAN_TRUNK)
n1kv_db_v2.add_network_binding(
self.session, TEST_NETWORK_ID, 'trunk',
None, 0, '0.0.0.0', p.id, None)
binding = n1kv_db_v2.get_network_binding(
self.session, TEST_NETWORK_ID)
self.assertIsNotNone(binding)
self.assertEqual(binding.network_id, TEST_NETWORK_ID)
self.assertEqual(binding.network_type, 'trunk')
self.assertIsNone(binding.physical_network)
self.assertEqual(binding.segmentation_id, 0)
def test_create_vxlan_trunk_network(self):
with self.network() as network:
TEST_NETWORK_ID = network['network']['id']
self.assertRaises(c_exc.NetworkBindingNotFound,
n1kv_db_v2.get_network_binding,
self.session,
TEST_NETWORK_ID)
p = _create_test_network_profile_if_not_there(
self.session,
TEST_NETWORK_PROFILE_VXLAN_TRUNK)
n1kv_db_v2.add_network_binding(
self.session, TEST_NETWORK_ID, 'trunk',
None, 0, '0.0.0.0', p.id, None)
binding = n1kv_db_v2.get_network_binding(
self.session, TEST_NETWORK_ID)
self.assertIsNotNone(binding)
self.assertEqual(binding.network_id, TEST_NETWORK_ID)
self.assertEqual(binding.network_type, 'trunk')
self.assertIsNone(binding.physical_network)
self.assertEqual(binding.segmentation_id, 0)
def test_add_vlan_trunk_binding(self):
with self.network() as network1:
with self.network() as network2:
TEST_NETWORK_ID = network1['network']['id']
self.assertRaises(c_exc.NetworkBindingNotFound,
n1kv_db_v2.get_network_binding,
self.session,
TEST_NETWORK_ID)
TEST_NETWORK_ID2 = network2['network']['id']
self.assertRaises(c_exc.NetworkBindingNotFound,
n1kv_db_v2.get_network_binding,
self.session,
TEST_NETWORK_ID2)
p_v = _create_test_network_profile_if_not_there(self.session)
n1kv_db_v2.add_network_binding(
self.session, TEST_NETWORK_ID2, 'vlan',
PHYS_NET, 1234, '0.0.0.0', p_v.id, None)
p = _create_test_network_profile_if_not_there(
self.session,
TEST_NETWORK_PROFILE_VLAN_TRUNK)
n1kv_db_v2.add_network_binding(
self.session, TEST_NETWORK_ID, 'trunk',
None, 0, '0.0.0.0', p.id, [(TEST_NETWORK_ID2, 0)])
binding = n1kv_db_v2.get_network_binding(
self.session, TEST_NETWORK_ID)
self.assertIsNotNone(binding)
self.assertEqual(binding.network_id, TEST_NETWORK_ID)
self.assertEqual(binding.network_type, 'trunk')
self.assertEqual(binding.physical_network, PHYS_NET)
self.assertEqual(binding.segmentation_id, 0)
t_binding = (n1kv_db_v2.get_trunk_network_binding(
self.session, TEST_NETWORK_ID,
(TEST_NETWORK_ID2, 0)))
self.assertIsNotNone(t_binding)
self.assertEqual(t_binding.trunk_segment_id, TEST_NETWORK_ID)
self.assertEqual(t_binding.segment_id, TEST_NETWORK_ID2)
self.assertEqual(t_binding.dot1qtag, '0')
t_members = (n1kv_db_v2.get_trunk_members(
self.session, TEST_NETWORK_ID))
self.assertEqual(t_members,
[(TEST_NETWORK_ID2, '0')])
self.assertTrue(n1kv_db_v2.is_trunk_member(
self.session, TEST_NETWORK_ID2))
n1kv_db_v2.del_trunk_segment_binding(
self.session, TEST_NETWORK_ID,
[(TEST_NETWORK_ID2, '0')])
t_members = (n1kv_db_v2.get_multi_segment_members(
self.session, TEST_NETWORK_ID))
self.assertEqual(t_members, [])
def test_add_vxlan_trunk_binding(self):
with self.network() as network1:
with self.network() as network2:
TEST_NETWORK_ID = network1['network']['id']
self.assertRaises(c_exc.NetworkBindingNotFound,
n1kv_db_v2.get_network_binding,
self.session,
TEST_NETWORK_ID)
TEST_NETWORK_ID2 = network2['network']['id']
self.assertRaises(c_exc.NetworkBindingNotFound,
n1kv_db_v2.get_network_binding,
self.session,
TEST_NETWORK_ID2)
p_v = _create_test_network_profile_if_not_there(
self.session, TEST_NETWORK_PROFILE_VXLAN_TRUNK)
n1kv_db_v2.add_network_binding(
self.session, TEST_NETWORK_ID2, 'overlay',
None, 5100, '224.10.10.10', p_v.id, None)
p = _create_test_network_profile_if_not_there(
self.session,
TEST_NETWORK_PROFILE_VXLAN_TRUNK)
n1kv_db_v2.add_network_binding(
self.session, TEST_NETWORK_ID, 'trunk',
None, 0, '0.0.0.0', p.id,
[(TEST_NETWORK_ID2, 5)])
binding = n1kv_db_v2.get_network_binding(
self.session, TEST_NETWORK_ID)
self.assertIsNotNone(binding)
self.assertEqual(binding.network_id, TEST_NETWORK_ID)
self.assertEqual(binding.network_type, 'trunk')
self.assertIsNone(binding.physical_network)
self.assertEqual(binding.segmentation_id, 0)
t_binding = (n1kv_db_v2.get_trunk_network_binding(
self.session, TEST_NETWORK_ID,
(TEST_NETWORK_ID2, '5')))
self.assertIsNotNone(t_binding)
self.assertEqual(t_binding.trunk_segment_id, TEST_NETWORK_ID)
self.assertEqual(t_binding.segment_id, TEST_NETWORK_ID2)
self.assertEqual(t_binding.dot1qtag, '5')
t_members = (n1kv_db_v2.get_trunk_members(
self.session, TEST_NETWORK_ID))
self.assertEqual(t_members,
[(TEST_NETWORK_ID2, '5')])
self.assertTrue(n1kv_db_v2.is_trunk_member(
self.session, TEST_NETWORK_ID2))
n1kv_db_v2.del_trunk_segment_binding(
self.session, TEST_NETWORK_ID,
[(TEST_NETWORK_ID2, '5')])
t_members = (n1kv_db_v2.get_multi_segment_members(
self.session, TEST_NETWORK_ID))
self.assertEqual(t_members, [])
class NetworkProfileTests(base.BaseTestCase,
n1kv_db_v2.NetworkProfile_db_mixin):
def setUp(self):
super(NetworkProfileTests, self).setUp()
db.configure_db()
self.session = db.get_session()
self.addCleanup(db.clear_db)
def test_create_network_profile(self):
_db_profile = n1kv_db_v2.create_network_profile(self.session,
TEST_NETWORK_PROFILE)
self.assertIsNotNone(_db_profile)
db_profile = (self.session.query(n1kv_models_v2.NetworkProfile).
filter_by(name=TEST_NETWORK_PROFILE['name']).one())
self.assertIsNotNone(db_profile)
self.assertEqual(_db_profile.id, db_profile.id)
self.assertEqual(_db_profile.name, db_profile.name)
self.assertEqual(_db_profile.segment_type, db_profile.segment_type)
self.assertEqual(_db_profile.segment_range, db_profile.segment_range)
self.assertEqual(_db_profile.multicast_ip_index,
db_profile.multicast_ip_index)
self.assertEqual(_db_profile.multicast_ip_range,
db_profile.multicast_ip_range)
n1kv_db_v2.delete_network_profile(self.session, _db_profile.id)
def test_create_multi_segment_network_profile(self):
_db_profile = (n1kv_db_v2.create_network_profile(
self.session, TEST_NETWORK_PROFILE_MULTI_SEGMENT))
self.assertIsNotNone(_db_profile)
db_profile = (
self.session.query(
n1kv_models_v2.NetworkProfile).filter_by(
name=TEST_NETWORK_PROFILE_MULTI_SEGMENT['name'])
.one())
self.assertIsNotNone(db_profile)
self.assertEqual(_db_profile.id, db_profile.id)
self.assertEqual(_db_profile.name, db_profile.name)
self.assertEqual(_db_profile.segment_type, db_profile.segment_type)
self.assertEqual(_db_profile.segment_range, db_profile.segment_range)
self.assertEqual(_db_profile.multicast_ip_index,
db_profile.multicast_ip_index)
self.assertEqual(_db_profile.multicast_ip_range,
db_profile.multicast_ip_range)
n1kv_db_v2.delete_network_profile(self.session, _db_profile.id)
def test_create_vlan_trunk_network_profile(self):
_db_profile = (n1kv_db_v2.create_network_profile(
self.session, TEST_NETWORK_PROFILE_VLAN_TRUNK))
self.assertIsNotNone(_db_profile)
db_profile = (self.session.query(n1kv_models_v2.NetworkProfile).
filter_by(name=TEST_NETWORK_PROFILE_VLAN_TRUNK['name']).
one())
self.assertIsNotNone(db_profile)
self.assertEqual(_db_profile.id, db_profile.id)
self.assertEqual(_db_profile.name, db_profile.name)
self.assertEqual(_db_profile.segment_type, db_profile.segment_type)
self.assertEqual(_db_profile.segment_range, db_profile.segment_range)
self.assertEqual(_db_profile.multicast_ip_index,
db_profile.multicast_ip_index)
self.assertEqual(_db_profile.multicast_ip_range,
db_profile.multicast_ip_range)
self.assertEqual(_db_profile.sub_type, db_profile.sub_type)
n1kv_db_v2.delete_network_profile(self.session, _db_profile.id)
def test_create_vxlan_trunk_network_profile(self):
_db_profile = (n1kv_db_v2.create_network_profile(
self.session, TEST_NETWORK_PROFILE_VXLAN_TRUNK))
self.assertIsNotNone(_db_profile)
db_profile = (self.session.query(n1kv_models_v2.NetworkProfile).
filter_by(name=TEST_NETWORK_PROFILE_VXLAN_TRUNK['name']).
one())
self.assertIsNotNone(db_profile)
self.assertEqual(_db_profile.id, db_profile.id)
self.assertEqual(_db_profile.name, db_profile.name)
self.assertEqual(_db_profile.segment_type, db_profile.segment_type)
self.assertEqual(_db_profile.segment_range, db_profile.segment_range)
self.assertEqual(_db_profile.multicast_ip_index,
db_profile.multicast_ip_index)
self.assertEqual(_db_profile.multicast_ip_range,
db_profile.multicast_ip_range)
self.assertEqual(_db_profile.sub_type, db_profile.sub_type)
n1kv_db_v2.delete_network_profile(self.session, _db_profile.id)
def test_create_network_profile_overlap(self):
_db_profile = n1kv_db_v2.create_network_profile(self.session,
TEST_NETWORK_PROFILE_2)
ctx = context.get_admin_context()
TEST_NETWORK_PROFILE_2['name'] = 'net-profile-min-overlap'
TEST_NETWORK_PROFILE_2['segment_range'] = SEGMENT_RANGE_MIN_OVERLAP
test_net_profile = {'network_profile': TEST_NETWORK_PROFILE_2}
self.assertRaises(n_exc.InvalidInput,
self.create_network_profile,
ctx,
test_net_profile)
TEST_NETWORK_PROFILE_2['name'] = 'net-profile-max-overlap'
TEST_NETWORK_PROFILE_2['segment_range'] = SEGMENT_RANGE_MAX_OVERLAP
test_net_profile = {'network_profile': TEST_NETWORK_PROFILE_2}
self.assertRaises(n_exc.InvalidInput,
self.create_network_profile,
ctx,
test_net_profile)
TEST_NETWORK_PROFILE_2['name'] = 'net-profile-overlap'
TEST_NETWORK_PROFILE_2['segment_range'] = SEGMENT_RANGE_OVERLAP
test_net_profile = {'network_profile': TEST_NETWORK_PROFILE_2}
self.assertRaises(n_exc.InvalidInput,
self.create_network_profile,
ctx,
test_net_profile)
n1kv_db_v2.delete_network_profile(self.session, _db_profile.id)
def test_delete_network_profile(self):
try:
profile = (self.session.query(n1kv_models_v2.NetworkProfile).
filter_by(name=TEST_NETWORK_PROFILE['name']).one())
except s_exc.NoResultFound:
profile = n1kv_db_v2.create_network_profile(self.session,
TEST_NETWORK_PROFILE)
n1kv_db_v2.delete_network_profile(self.session, profile.id)
try:
self.session.query(n1kv_models_v2.NetworkProfile).filter_by(
name=TEST_NETWORK_PROFILE['name']).one()
except s_exc.NoResultFound:
pass
else:
self.fail("Network Profile (%s) was not deleted" %
TEST_NETWORK_PROFILE['name'])
def test_update_network_profile(self):
TEST_PROFILE_1 = {'name': 'test_profile_1'}
profile = _create_test_network_profile_if_not_there(self.session)
updated_profile = n1kv_db_v2.update_network_profile(self.session,
profile.id,
TEST_PROFILE_1)
self.assertEqual(updated_profile.name, TEST_PROFILE_1['name'])
n1kv_db_v2.delete_network_profile(self.session, profile.id)
def test_get_network_profile(self):
profile = n1kv_db_v2.create_network_profile(self.session,
TEST_NETWORK_PROFILE)
got_profile = n1kv_db_v2.get_network_profile(self.session, profile.id)
self.assertEqual(profile.id, got_profile.id)
self.assertEqual(profile.name, got_profile.name)
n1kv_db_v2.delete_network_profile(self.session, profile.id)
def test_get_network_profiles(self):
test_profiles = [{'name': 'test_profile1',
'segment_type': 'vlan',
'physical_network': 'phys1',
'segment_range': '200-210'},
{'name': 'test_profile2',
'segment_type': 'vlan',
'physical_network': 'phys1',
'segment_range': '211-220'},
{'name': 'test_profile3',
'segment_type': 'vlan',
'physical_network': 'phys1',
'segment_range': '221-230'},
{'name': 'test_profile4',
'segment_type': 'vlan',
'physical_network': 'phys1',
'segment_range': '231-240'},
{'name': 'test_profile5',
'segment_type': 'vlan',
'physical_network': 'phys1',
'segment_range': '241-250'},
{'name': 'test_profile6',
'segment_type': 'vlan',
'physical_network': 'phys1',
'segment_range': '251-260'},
{'name': 'test_profile7',
'segment_type': 'vlan',
'physical_network': 'phys1',
'segment_range': '261-270'}]
[n1kv_db_v2.create_network_profile(self.session, p)
for p in test_profiles]
# TODO(abhraut): Fix this test to work with real tenant_td
profiles = n1kv_db_v2._get_network_profiles(db_session=self.session)
self.assertEqual(len(test_profiles), len(list(profiles)))
class PolicyProfileTests(base.BaseTestCase):
def setUp(self):
super(PolicyProfileTests, self).setUp()
db.configure_db()
self.session = db.get_session()
self.addCleanup(db.clear_db)
def test_create_policy_profile(self):
_db_profile = n1kv_db_v2.create_policy_profile(TEST_POLICY_PROFILE)
self.assertIsNotNone(_db_profile)
db_profile = (self.session.query(n1kv_models_v2.PolicyProfile).
filter_by(name=TEST_POLICY_PROFILE['name']).one)()
self.assertIsNotNone(db_profile)
self.assertTrue(_db_profile.id == db_profile.id)
self.assertTrue(_db_profile.name == db_profile.name)
def test_delete_policy_profile(self):
profile = _create_test_policy_profile_if_not_there(self.session)
n1kv_db_v2.delete_policy_profile(profile.id)
try:
self.session.query(n1kv_models_v2.PolicyProfile).filter_by(
name=TEST_POLICY_PROFILE['name']).one()
except s_exc.NoResultFound:
pass
else:
self.fail("Policy Profile (%s) was not deleted" %
TEST_POLICY_PROFILE['name'])
def test_update_policy_profile(self):
TEST_PROFILE_1 = {'name': 'test_profile_1'}
profile = _create_test_policy_profile_if_not_there(self.session)
updated_profile = n1kv_db_v2.update_policy_profile(self.session,
profile.id,
TEST_PROFILE_1)
self.assertEqual(updated_profile.name, TEST_PROFILE_1['name'])
def test_get_policy_profile(self):
profile = _create_test_policy_profile_if_not_there(self.session)
got_profile = n1kv_db_v2.get_policy_profile(self.session, profile.id)
self.assertEqual(profile.id, got_profile.id)
self.assertEqual(profile.name, got_profile.name)
class ProfileBindingTests(base.BaseTestCase,
n1kv_db_v2.NetworkProfile_db_mixin,
db_base_plugin_v2.CommonDbMixin):
def setUp(self):
super(ProfileBindingTests, self).setUp()
db.configure_db()
self.session = db.get_session()
self.addCleanup(db.clear_db)
def _create_test_binding_if_not_there(self, tenant_id, profile_id,
profile_type):
try:
_binding = (self.session.query(n1kv_models_v2.ProfileBinding).
filter_by(profile_type=profile_type,
tenant_id=tenant_id,
profile_id=profile_id).one())
except s_exc.NoResultFound:
_binding = n1kv_db_v2.create_profile_binding(self.session,
tenant_id,
profile_id,
profile_type)
return _binding
def test_create_profile_binding(self):
test_tenant_id = "d434dd90-76ec-11e2-bcfd-0800200c9a66"
test_profile_id = "dd7b9741-76ec-11e2-bcfd-0800200c9a66"
test_profile_type = "network"
n1kv_db_v2.create_profile_binding(self.session,
test_tenant_id,
test_profile_id,
test_profile_type)
try:
self.session.query(n1kv_models_v2.ProfileBinding).filter_by(
profile_type=test_profile_type,
tenant_id=test_tenant_id,
profile_id=test_profile_id).one()
except s_exc.MultipleResultsFound:
self.fail("Bindings must be unique")
except s_exc.NoResultFound:
self.fail("Could not create Profile Binding")
def test_get_profile_binding(self):
test_tenant_id = "d434dd90-76ec-11e2-bcfd-0800200c9a66"
test_profile_id = "dd7b9741-76ec-11e2-bcfd-0800200c9a66"
test_profile_type = "network"
self._create_test_binding_if_not_there(test_tenant_id,
test_profile_id,
test_profile_type)
binding = n1kv_db_v2.get_profile_binding(self.session,
test_tenant_id,
test_profile_id)
self.assertEqual(binding.tenant_id, test_tenant_id)
self.assertEqual(binding.profile_id, test_profile_id)
self.assertEqual(binding.profile_type, test_profile_type)
def test_get_profile_binding_not_found(self):
self.assertRaises(
c_exc.ProfileTenantBindingNotFound,
n1kv_db_v2.get_profile_binding, self.session, "123", "456")
def test_delete_profile_binding(self):
test_tenant_id = "d434dd90-76ec-11e2-bcfd-0800200c9a66"
test_profile_id = "dd7b9741-76ec-11e2-bcfd-0800200c9a66"
test_profile_type = "network"
self._create_test_binding_if_not_there(test_tenant_id,
test_profile_id,
test_profile_type)
n1kv_db_v2.delete_profile_binding(self.session,
test_tenant_id,
test_profile_id)
q = (self.session.query(n1kv_models_v2.ProfileBinding).filter_by(
profile_type=test_profile_type,
tenant_id=test_tenant_id,
profile_id=test_profile_id))
self.assertFalse(q.count())
def test_default_tenant_replace(self):
ctx = context.get_admin_context()
ctx.tenant_id = "d434dd90-76ec-11e2-bcfd-0800200c9a66"
test_profile_id = "AAAAAAAA-76ec-11e2-bcfd-0800200c9a66"
test_profile_type = "policy"
n1kv_db_v2.create_profile_binding(self.session,
cisco_constants.TENANT_ID_NOT_SET,
test_profile_id,
test_profile_type)
network_profile = {"network_profile": TEST_NETWORK_PROFILE}
self.create_network_profile(ctx, network_profile)
binding = n1kv_db_v2.get_profile_binding(self.session,
ctx.tenant_id,
test_profile_id)
self.assertRaises(
c_exc.ProfileTenantBindingNotFound,
n1kv_db_v2.get_profile_binding,
self.session,
cisco_constants.TENANT_ID_NOT_SET,
test_profile_id)
self.assertNotEqual(binding.tenant_id,
cisco_constants.TENANT_ID_NOT_SET)
|
|
"""
The Spatial Reference class, represents OGR Spatial Reference objects.
Example:
>>> from django.contrib.gis.gdal import SpatialReference
>>> srs = SpatialReference('WGS84')
>>> print(srs)
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
TOWGS84[0,0,0,0,0,0,0],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]
>>> print(srs.proj)
+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs
>>> print(srs.ellipsoid)
(6378137.0, 6356752.3142451793, 298.25722356300003)
>>> print(srs.projected, srs.geographic)
False True
>>> srs.import_epsg(32140)
>>> print(srs.name)
NAD83 / Texas South Central
"""
from ctypes import byref, c_char_p, c_int
# Getting the error checking routine and exceptions
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import SRSException
from django.contrib.gis.gdal.prototypes import srs as capi
from django.utils import six
from django.utils.encoding import force_bytes, force_text
#### Spatial Reference class. ####
class SpatialReference(GDALBase):
"""
A wrapper for the OGRSpatialReference object. According to the GDAL Web site,
the SpatialReference object "provide[s] services to represent coordinate
systems (projections and datums) and to transform between them."
"""
#### Python 'magic' routines ####
def __init__(self, srs_input='', srs_type='user'):
"""
Creates a GDAL OSR Spatial Reference object from the given input.
The input may be string of OGC Well Known Text (WKT), an integer
EPSG code, a PROJ.4 string, and/or a projection "well known" shorthand
string (one of 'WGS84', 'WGS72', 'NAD27', 'NAD83').
"""
if srs_type == 'wkt':
self.ptr = capi.new_srs(c_char_p(b''))
self.import_wkt(srs_input)
return
elif isinstance(srs_input, six.string_types):
# Encoding to ASCII if unicode passed in.
if isinstance(srs_input, six.text_type):
srs_input = srs_input.encode('ascii')
try:
# If SRID is a string, e.g., '4326', then make acceptable
# as user input.
srid = int(srs_input)
srs_input = 'EPSG:%d' % srid
except ValueError:
pass
elif isinstance(srs_input, six.integer_types):
# EPSG integer code was input.
srs_type = 'epsg'
elif isinstance(srs_input, self.ptr_type):
srs = srs_input
srs_type = 'ogr'
else:
raise TypeError('Invalid SRS type "%s"' % srs_type)
if srs_type == 'ogr':
# Input is already an SRS pointer.
srs = srs_input
else:
# Creating a new SRS pointer, using the string buffer.
buf = c_char_p(b'')
srs = capi.new_srs(buf)
# If the pointer is NULL, throw an exception.
if not srs:
raise SRSException('Could not create spatial reference from: %s' % srs_input)
else:
self.ptr = srs
# Importing from either the user input string or an integer SRID.
if srs_type == 'user':
self.import_user_input(srs_input)
elif srs_type == 'epsg':
self.import_epsg(srs_input)
def __del__(self):
"Destroys this spatial reference."
if self._ptr and capi:
capi.release_srs(self._ptr)
def __getitem__(self, target):
"""
Returns the value of the given string attribute node, None if the node
doesn't exist. Can also take a tuple as a parameter, (target, child),
where child is the index of the attribute in the WKT. For example:
>>> wkt = 'GEOGCS["WGS 84", DATUM["WGS_1984, ... AUTHORITY["EPSG","4326"]]'
>>> srs = SpatialReference(wkt) # could also use 'WGS84', or 4326
>>> print(srs['GEOGCS'])
WGS 84
>>> print(srs['DATUM'])
WGS_1984
>>> print(srs['AUTHORITY'])
EPSG
>>> print(srs['AUTHORITY', 1]) # The authority value
4326
>>> print(srs['TOWGS84', 4]) # the fourth value in this wkt
0
>>> print(srs['UNIT|AUTHORITY']) # For the units authority, have to use the pipe symbole.
EPSG
>>> print(srs['UNIT|AUTHORITY', 1]) # The authority value for the units
9122
"""
if isinstance(target, tuple):
return self.attr_value(*target)
else:
return self.attr_value(target)
def __str__(self):
"The string representation uses 'pretty' WKT."
return self.pretty_wkt
#### SpatialReference Methods ####
def attr_value(self, target, index=0):
"""
The attribute value for the given target node (e.g. 'PROJCS'). The index
keyword specifies an index of the child node to return.
"""
if not isinstance(target, six.string_types) or not isinstance(index, int):
raise TypeError
return capi.get_attr_value(self.ptr, force_bytes(target), index)
def auth_name(self, target):
"Returns the authority name for the given string target node."
return capi.get_auth_name(self.ptr, force_bytes(target))
def auth_code(self, target):
"Returns the authority code for the given string target node."
return capi.get_auth_code(self.ptr, force_bytes(target))
def clone(self):
"Returns a clone of this SpatialReference object."
return SpatialReference(capi.clone_srs(self.ptr))
def from_esri(self):
"Morphs this SpatialReference from ESRI's format to EPSG."
capi.morph_from_esri(self.ptr)
def identify_epsg(self):
"""
This method inspects the WKT of this SpatialReference, and will
add EPSG authority nodes where an EPSG identifier is applicable.
"""
capi.identify_epsg(self.ptr)
def to_esri(self):
"Morphs this SpatialReference to ESRI's format."
capi.morph_to_esri(self.ptr)
def validate(self):
"Checks to see if the given spatial reference is valid."
capi.srs_validate(self.ptr)
#### Name & SRID properties ####
@property
def name(self):
"Returns the name of this Spatial Reference."
if self.projected:
return self.attr_value('PROJCS')
elif self.geographic:
return self.attr_value('GEOGCS')
elif self.local:
return self.attr_value('LOCAL_CS')
else:
return None
@property
def srid(self):
"Returns the SRID of top-level authority, or None if undefined."
try:
return int(self.attr_value('AUTHORITY', 1))
except (TypeError, ValueError):
return None
#### Unit Properties ####
@property
def linear_name(self):
"Returns the name of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return name
@property
def linear_units(self):
"Returns the value of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return units
@property
def angular_name(self):
"Returns the name of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return name
@property
def angular_units(self):
"Returns the value of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return units
@property
def units(self):
"""
Returns a 2-tuple of the units value and the units name,
and will automatically determines whether to return the linear
or angular units.
"""
units, name = None, None
if self.projected or self.local:
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
elif self.geographic:
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
if name is not None:
name = force_text(name)
return (units, name)
#### Spheroid/Ellipsoid Properties ####
@property
def ellipsoid(self):
"""
Returns a tuple of the ellipsoid parameters:
(semimajor axis, semiminor axis, and inverse flattening)
"""
return (self.semi_major, self.semi_minor, self.inverse_flattening)
@property
def semi_major(self):
"Returns the Semi Major Axis for this Spatial Reference."
return capi.semi_major(self.ptr, byref(c_int()))
@property
def semi_minor(self):
"Returns the Semi Minor Axis for this Spatial Reference."
return capi.semi_minor(self.ptr, byref(c_int()))
@property
def inverse_flattening(self):
"Returns the Inverse Flattening for this Spatial Reference."
return capi.invflattening(self.ptr, byref(c_int()))
#### Boolean Properties ####
@property
def geographic(self):
"""
Returns True if this SpatialReference is geographic
(root node is GEOGCS).
"""
return bool(capi.isgeographic(self.ptr))
@property
def local(self):
"Returns True if this SpatialReference is local (root node is LOCAL_CS)."
return bool(capi.islocal(self.ptr))
@property
def projected(self):
"""
Returns True if this SpatialReference is a projected coordinate system
(root node is PROJCS).
"""
return bool(capi.isprojected(self.ptr))
#### Import Routines #####
def import_epsg(self, epsg):
"Imports the Spatial Reference from the EPSG code (an integer)."
capi.from_epsg(self.ptr, epsg)
def import_proj(self, proj):
"Imports the Spatial Reference from a PROJ.4 string."
capi.from_proj(self.ptr, proj)
def import_user_input(self, user_input):
"Imports the Spatial Reference from the given user input string."
capi.from_user_input(self.ptr, force_bytes(user_input))
def import_wkt(self, wkt):
"Imports the Spatial Reference from OGC WKT (string)"
capi.from_wkt(self.ptr, byref(c_char_p(wkt)))
def import_xml(self, xml):
"Imports the Spatial Reference from an XML string."
capi.from_xml(self.ptr, xml)
#### Export Properties ####
@property
def wkt(self):
"Returns the WKT representation of this Spatial Reference."
return capi.to_wkt(self.ptr, byref(c_char_p()))
@property
def pretty_wkt(self, simplify=0):
"Returns the 'pretty' representation of the WKT."
return capi.to_pretty_wkt(self.ptr, byref(c_char_p()), simplify)
@property
def proj(self):
"Returns the PROJ.4 representation for this Spatial Reference."
return capi.to_proj(self.ptr, byref(c_char_p()))
@property
def proj4(self):
"Alias for proj()."
return self.proj
@property
def xml(self, dialect=''):
"Returns the XML representation of this Spatial Reference."
return capi.to_xml(self.ptr, byref(c_char_p()), dialect)
class CoordTransform(GDALBase):
"The coordinate system transformation object."
def __init__(self, source, target):
"Initializes on a source and target SpatialReference objects."
if not isinstance(source, SpatialReference) or not isinstance(target, SpatialReference):
raise TypeError('source and target must be of type SpatialReference')
self.ptr = capi.new_ct(source._ptr, target._ptr)
self._srs1_name = source.name
self._srs2_name = target.name
def __del__(self):
"Deletes this Coordinate Transformation object."
if self._ptr and capi:
capi.destroy_ct(self._ptr)
def __str__(self):
return 'Transform from "%s" to "%s"' % (self._srs1_name, self._srs2_name)
|
|
"""Metrics to perform pairwise computation."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
import numpy as np
from scipy.spatial import distance_matrix
from sklearn.base import BaseEstimator
from sklearn.utils import check_consistent_length
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.validation import check_is_fitted
class ValueDifferenceMetric(BaseEstimator):
r"""Class implementing the Value Difference Metric.
This metric computes the distance between samples containing only
categorical features. The distance between feature values of two samples is
defined as:
.. math::
\delta(x, y) = \sum_{c=1}^{C} |p(c|x_{f}) - p(c|y_{f})|^{k} \ ,
where :math:`x` and :math:`y` are two samples and :math:`f` a given
feature, :math:`C` is the number of classes, :math:`p(c|x_{f})` is the
conditional probability that the output class is :math:`c` given that
the feature value :math:`f` has the value :math:`x` and :math:`k` an
exponent usually defined to 1 or 2.
The distance for the feature vectors :math:`X` and :math:`Y` is
subsequently defined as:
.. math::
\Delta(X, Y) = \sum_{f=1}^{F} \delta(X_{f}, Y_{f})^{r} \ ,
where :math:`F` is the number of feature and :math:`r` an exponent usually
defined equal to 1 or 2.
The definition of this distance was propoed in [1]_.
Read more in the :ref:`User Guide <vdm>`.
.. versionadded:: 0.8
Parameters
----------
n_categories : "auto" or array-like of shape (n_features,), default="auto"
The number of unique categories per features. If `"auto"`, the number
of categories will be computed from `X` at `fit`. Otherwise, you can
provide an array-like of such counts to avoid computation. You can use
the fitted attribute `categories_` of the
:class:`~sklearn.preprocesssing.OrdinalEncoder` to deduce these counts.
k : int, default=1
Exponent used to compute the distance between feature value.
r : int, default=2
Exponent used to compute the distance between the feature vector.
Attributes
----------
n_categories_ : ndarray of shape (n_features,)
The number of categories per features.
proba_per_class_ : list of ndarray of shape (n_categories, n_classes)
List of length `n_features` containing the conditional probabilities
for each category given a class.
Notes
-----
The input data `X` are expected to be encoded by an
:class:`~sklearn.preprocessing.OrdinalEncoder` and the data type is used
should be `np.int32`. If other data types are given, `X` will be converted
to `np.int32`.
References
----------
.. [1] Stanfill, Craig, and David Waltz. "Toward memory-based reasoning."
Communications of the ACM 29.12 (1986): 1213-1228.
Examples
--------
>>> import numpy as np
>>> X = np.array(["green"] * 10 + ["red"] * 10 + ["blue"] * 10).reshape(-1, 1)
>>> y = [1] * 8 + [0] * 5 + [1] * 7 + [0] * 9 + [1]
>>> from sklearn.preprocessing import OrdinalEncoder
>>> encoder = OrdinalEncoder(dtype=np.int32)
>>> X_encoded = encoder.fit_transform(X)
>>> from imblearn.metrics.pairwise import ValueDifferenceMetric
>>> vdm = ValueDifferenceMetric().fit(X_encoded, y)
>>> pairwise_distance = vdm.pairwise(X_encoded)
>>> pairwise_distance.shape
(30, 30)
>>> X_test = np.array(["green", "red", "blue"]).reshape(-1, 1)
>>> X_test_encoded = encoder.transform(X_test)
>>> vdm.pairwise(X_test_encoded)
array([[ 0. , 0.04, 1.96],
[ 0.04, 0. , 1.44],
[ 1.96, 1.44, 0. ]])
"""
def __init__(self, *, n_categories="auto", k=1, r=2):
self.n_categories = n_categories
self.k = k
self.r = r
def fit(self, X, y):
"""Compute the necessary statistics from the training set.
Parameters
----------
X : ndarray of shape (n_samples, n_features), dtype=np.int32
The input data. The data are expected to be encoded with a
:class:`~sklearn.preprocessing.OrdinalEncoder`.
y : ndarray of shape (n_features,)
The target.
Returns
-------
self
"""
check_consistent_length(X, y)
X, y = self._validate_data(X, y, reset=True, dtype=np.int32)
if isinstance(self.n_categories, str) and self.n_categories == "auto":
# categories are expected to be encoded from 0 to n_categories - 1
self.n_categories_ = X.max(axis=0) + 1
else:
if len(self.n_categories) != self.n_features_in_:
raise ValueError(
f"The length of n_categories is not consistent with the "
f"number of feature in X. Got {len(self.n_categories)} "
f"elements in n_categories and {self.n_features_in_} in "
f"X."
)
self.n_categories_ = np.array(self.n_categories, copy=False)
classes = unique_labels(y)
# list of length n_features of ndarray (n_categories, n_classes)
# compute the counts
self.proba_per_class_ = [
np.empty(shape=(n_cat, len(classes)), dtype=np.float64)
for n_cat in self.n_categories_
]
for feature_idx in range(self.n_features_in_):
for klass_idx, klass in enumerate(classes):
self.proba_per_class_[feature_idx][:, klass_idx] = np.bincount(
X[y == klass, feature_idx],
minlength=self.n_categories_[feature_idx],
)
# normalize by the summing over the classes
with np.errstate(invalid="ignore"):
# silence potential warning due to in-place division by zero
for feature_idx in range(self.n_features_in_):
self.proba_per_class_[feature_idx] /= (
self.proba_per_class_[feature_idx].sum(axis=1).reshape(-1, 1)
)
np.nan_to_num(self.proba_per_class_[feature_idx], copy=False)
return self
def pairwise(self, X, Y=None):
"""Compute the VDM distance pairwise.
Parameters
----------
X : ndarray of shape (n_samples, n_features), dtype=np.int32
The input data. The data are expected to be encoded with a
:class:`~sklearn.preprocessing.OrdinalEncoder`.
Y : ndarray of shape (n_samples, n_features), dtype=np.int32
The input data. The data are expected to be encoded with a
:class:`~sklearn.preprocessing.OrdinalEncoder`.
Returns
-------
distance_matrix : ndarray of shape (n_samples, n_samples)
The VDM pairwise distance.
"""
check_is_fitted(self)
X = self._validate_data(X, reset=False, dtype=np.int32)
n_samples_X = X.shape[0]
if Y is not None:
Y = self._validate_data(Y, reset=False, dtype=np.int32)
n_samples_Y = Y.shape[0]
else:
n_samples_Y = n_samples_X
distance = np.zeros(shape=(n_samples_X, n_samples_Y), dtype=np.float64)
for feature_idx in range(self.n_features_in_):
proba_feature_X = self.proba_per_class_[feature_idx][X[:, feature_idx]]
if Y is not None:
proba_feature_Y = self.proba_per_class_[feature_idx][Y[:, feature_idx]]
else:
proba_feature_Y = proba_feature_X
distance += (
distance_matrix(proba_feature_X, proba_feature_Y, p=self.k) ** self.r
)
return distance
|
|
import csv
import json
import logging
import numpy as np
import os
import yaml
from typing import Iterable, TYPE_CHECKING, Dict, List, Optional, TextIO, Type
import ray.cloudpickle as cloudpickle
from ray.tune.callback import Callback
from ray.tune.utils.util import SafeFallbackEncoder
from ray.util.debug import log_once
from ray.tune.result import (
TRAINING_ITERATION,
TIME_TOTAL_S,
TIMESTEPS_TOTAL,
EXPR_PARAM_FILE,
EXPR_PARAM_PICKLE_FILE,
EXPR_PROGRESS_FILE,
EXPR_RESULT_FILE,
)
from ray.tune.utils import flatten_dict
from ray.util.annotations import PublicAPI
if TYPE_CHECKING:
from ray.tune.trial import Trial # noqa: F401
logger = logging.getLogger(__name__)
tf = None
VALID_SUMMARY_TYPES = [int, float, np.float32, np.float64, np.int32, np.int64]
class Logger:
"""Logging interface for ray.tune.
By default, the UnifiedLogger implementation is used which logs results in
multiple formats (TensorBoard, rllab/viskit, plain json, custom loggers)
at once.
Arguments:
config: Configuration passed to all logger creators.
logdir: Directory for all logger creators to log to.
trial (Trial): Trial object for the logger to access.
"""
def __init__(self, config: Dict, logdir: str, trial: Optional["Trial"] = None):
self.config = config
self.logdir = logdir
self.trial = trial
self._init()
def _init(self):
pass
def on_result(self, result):
"""Given a result, appends it to the existing log."""
raise NotImplementedError
def update_config(self, config):
"""Updates the config for logger."""
pass
def close(self):
"""Releases all resources used by this logger."""
pass
def flush(self):
"""Flushes all disk writes to storage."""
pass
class NoopLogger(Logger):
def on_result(self, result):
pass
class JsonLogger(Logger):
"""Logs trial results in json format.
Also writes to a results file and param.json file when results or
configurations are updated. Experiments must be executed with the
JsonLogger to be compatible with the ExperimentAnalysis tool.
"""
def _init(self):
self.update_config(self.config)
local_file = os.path.join(self.logdir, EXPR_RESULT_FILE)
self.local_out = open(local_file, "a")
def on_result(self, result: Dict):
json.dump(result, self, cls=SafeFallbackEncoder)
self.write("\n")
self.local_out.flush()
def write(self, b):
self.local_out.write(b)
def flush(self):
if not self.local_out.closed:
self.local_out.flush()
def close(self):
self.local_out.close()
def update_config(self, config: Dict):
self.config = config
config_out = os.path.join(self.logdir, EXPR_PARAM_FILE)
with open(config_out, "w") as f:
json.dump(self.config, f, indent=2, sort_keys=True, cls=SafeFallbackEncoder)
config_pkl = os.path.join(self.logdir, EXPR_PARAM_PICKLE_FILE)
with open(config_pkl, "wb") as f:
cloudpickle.dump(self.config, f)
class CSVLogger(Logger):
"""Logs results to progress.csv under the trial directory.
Automatically flattens nested dicts in the result dict before writing
to csv:
{"a": {"b": 1, "c": 2}} -> {"a/b": 1, "a/c": 2}
"""
def _init(self):
"""CSV outputted with Headers as first set of results."""
progress_file = os.path.join(self.logdir, EXPR_PROGRESS_FILE)
self._continuing = os.path.exists(progress_file)
self._file = open(progress_file, "a")
self._csv_out = None
def on_result(self, result: Dict):
tmp = result.copy()
if "config" in tmp:
del tmp["config"]
result = flatten_dict(tmp, delimiter="/")
if self._csv_out is None:
self._csv_out = csv.DictWriter(self._file, result.keys())
if not self._continuing:
self._csv_out.writeheader()
self._csv_out.writerow(
{k: v for k, v in result.items() if k in self._csv_out.fieldnames}
)
self._file.flush()
def flush(self):
if not self._file.closed:
self._file.flush()
def close(self):
self._file.close()
class TBXLogger(Logger):
"""TensorBoardX Logger.
Note that hparams will be written only after a trial has terminated.
This logger automatically flattens nested dicts to show on TensorBoard:
{"a": {"b": 1, "c": 2}} -> {"a/b": 1, "a/c": 2}
"""
VALID_HPARAMS = (str, bool, int, float, list, type(None))
VALID_NP_HPARAMS = (np.bool8, np.float32, np.float64, np.int32, np.int64)
def _init(self):
try:
from tensorboardX import SummaryWriter
except ImportError:
if log_once("tbx-install"):
logger.info('pip install "ray[tune]" to see TensorBoard files.')
raise
self._file_writer = SummaryWriter(self.logdir, flush_secs=30)
self.last_result = None
def on_result(self, result: Dict):
step = result.get(TIMESTEPS_TOTAL) or result[TRAINING_ITERATION]
tmp = result.copy()
for k in ["config", "pid", "timestamp", TIME_TOTAL_S, TRAINING_ITERATION]:
if k in tmp:
del tmp[k] # not useful to log these
flat_result = flatten_dict(tmp, delimiter="/")
path = ["ray", "tune"]
valid_result = {}
for attr, value in flat_result.items():
full_attr = "/".join(path + [attr])
if isinstance(value, tuple(VALID_SUMMARY_TYPES)) and not np.isnan(value):
valid_result[full_attr] = value
self._file_writer.add_scalar(full_attr, value, global_step=step)
elif (isinstance(value, list) and len(value) > 0) or (
isinstance(value, np.ndarray) and value.size > 0
):
valid_result[full_attr] = value
# Must be video
if isinstance(value, np.ndarray) and value.ndim == 5:
self._file_writer.add_video(
full_attr, value, global_step=step, fps=20
)
continue
try:
self._file_writer.add_histogram(full_attr, value, global_step=step)
# In case TensorboardX still doesn't think it's a valid value
# (e.g. `[[]]`), warn and move on.
except (ValueError, TypeError):
if log_once("invalid_tbx_value"):
logger.warning(
"You are trying to log an invalid value ({}={}) "
"via {}!".format(full_attr, value, type(self).__name__)
)
self.last_result = valid_result
self._file_writer.flush()
def flush(self):
if self._file_writer is not None:
self._file_writer.flush()
def close(self):
if self._file_writer is not None:
if self.trial and self.trial.evaluated_params and self.last_result:
flat_result = flatten_dict(self.last_result, delimiter="/")
scrubbed_result = {
k: value
for k, value in flat_result.items()
if isinstance(value, tuple(VALID_SUMMARY_TYPES))
}
self._try_log_hparams(scrubbed_result)
self._file_writer.close()
def _try_log_hparams(self, result):
# TBX currently errors if the hparams value is None.
flat_params = flatten_dict(self.trial.evaluated_params)
scrubbed_params = {
k: v for k, v in flat_params.items() if isinstance(v, self.VALID_HPARAMS)
}
np_params = {
k: v.tolist()
for k, v in flat_params.items()
if isinstance(v, self.VALID_NP_HPARAMS)
}
scrubbed_params.update(np_params)
removed = {
k: v
for k, v in flat_params.items()
if not isinstance(v, self.VALID_HPARAMS + self.VALID_NP_HPARAMS)
}
if removed:
logger.info(
"Removed the following hyperparameter values when "
"logging to tensorboard: %s",
str(removed),
)
from tensorboardX.summary import hparams
try:
experiment_tag, session_start_tag, session_end_tag = hparams(
hparam_dict=scrubbed_params, metric_dict=result
)
self._file_writer.file_writer.add_summary(experiment_tag)
self._file_writer.file_writer.add_summary(session_start_tag)
self._file_writer.file_writer.add_summary(session_end_tag)
except Exception:
logger.exception(
"TensorboardX failed to log hparams. "
"This may be due to an unsupported type "
"in the hyperparameter values."
)
DEFAULT_LOGGERS = (JsonLogger, CSVLogger, TBXLogger)
class UnifiedLogger(Logger):
"""Unified result logger for TensorBoard, rllab/viskit, plain json.
Arguments:
config: Configuration passed to all logger creators.
logdir: Directory for all logger creators to log to.
loggers (list): List of logger creators. Defaults to CSV, Tensorboard,
and JSON loggers.
"""
def __init__(
self,
config: Dict,
logdir: str,
trial: Optional["Trial"] = None,
loggers: Optional[List[Type[Logger]]] = None,
):
if loggers is None:
self._logger_cls_list = DEFAULT_LOGGERS
else:
self._logger_cls_list = loggers
if JsonLogger not in self._logger_cls_list:
if log_once("JsonLogger"):
logger.warning(
"JsonLogger not provided. The ExperimentAnalysis tool is "
"disabled."
)
super(UnifiedLogger, self).__init__(config, logdir, trial)
def _init(self):
self._loggers = []
for cls in self._logger_cls_list:
try:
self._loggers.append(cls(self.config, self.logdir, self.trial))
except Exception as exc:
if log_once(f"instantiate:{cls.__name__}"):
logger.warning(
"Could not instantiate %s: %s.", cls.__name__, str(exc)
)
def on_result(self, result):
for _logger in self._loggers:
_logger.on_result(result)
def update_config(self, config):
for _logger in self._loggers:
_logger.update_config(config)
def close(self):
for _logger in self._loggers:
_logger.close()
def flush(self):
for _logger in self._loggers:
_logger.flush()
@PublicAPI
class LoggerCallback(Callback):
"""Base class for experiment-level logger callbacks
This base class defines a general interface for logging events,
like trial starts, restores, ends, checkpoint saves, and receiving
trial results.
Callbacks implementing this interface should make sure that logging
utilities are cleaned up properly on trial termination, i.e. when
``log_trial_end`` is received. This includes e.g. closing files.
"""
def log_trial_start(self, trial: "Trial"):
"""Handle logging when a trial starts.
Args:
trial (Trial): Trial object.
"""
pass
def log_trial_restore(self, trial: "Trial"):
"""Handle logging when a trial restores.
Args:
trial (Trial): Trial object.
"""
pass
def log_trial_save(self, trial: "Trial"):
"""Handle logging when a trial saves a checkpoint.
Args:
trial (Trial): Trial object.
"""
pass
def log_trial_result(self, iteration: int, trial: "Trial", result: Dict):
"""Handle logging when a trial reports a result.
Args:
trial (Trial): Trial object.
result (dict): Result dictionary.
"""
pass
def log_trial_end(self, trial: "Trial", failed: bool = False):
"""Handle logging when a trial ends.
Args:
trial (Trial): Trial object.
failed (bool): True if the Trial finished gracefully, False if
it failed (e.g. when it raised an exception).
"""
pass
def on_trial_result(
self,
iteration: int,
trials: List["Trial"],
trial: "Trial",
result: Dict,
**info,
):
self.log_trial_result(iteration, trial, result)
def on_trial_start(
self, iteration: int, trials: List["Trial"], trial: "Trial", **info
):
self.log_trial_start(trial)
def on_trial_restore(
self, iteration: int, trials: List["Trial"], trial: "Trial", **info
):
self.log_trial_restore(trial)
def on_trial_save(
self, iteration: int, trials: List["Trial"], trial: "Trial", **info
):
self.log_trial_save(trial)
def on_trial_complete(
self, iteration: int, trials: List["Trial"], trial: "Trial", **info
):
self.log_trial_end(trial, failed=False)
def on_trial_error(
self, iteration: int, trials: List["Trial"], trial: "Trial", **info
):
self.log_trial_end(trial, failed=True)
class LegacyLoggerCallback(LoggerCallback):
"""Supports logging to trial-specific `Logger` classes.
Previously, Ray Tune logging was handled via `Logger` classes that have
been instantiated per-trial. This callback is a fallback to these
`Logger`-classes, instantiating each `Logger` class for each trial
and logging to them.
Args:
logger_classes (Iterable[Type[Logger]]): Logger classes that should
be instantiated for each trial.
"""
def __init__(self, logger_classes: Iterable[Type[Logger]]):
self.logger_classes = list(logger_classes)
self._class_trial_loggers: Dict[Type[Logger], Dict["Trial", Logger]] = {}
def log_trial_start(self, trial: "Trial"):
trial.init_logdir()
for logger_class in self.logger_classes:
trial_loggers = self._class_trial_loggers.get(logger_class, {})
if trial not in trial_loggers:
logger = logger_class(trial.config, trial.logdir, trial)
trial_loggers[trial] = logger
self._class_trial_loggers[logger_class] = trial_loggers
def log_trial_restore(self, trial: "Trial"):
for logger_class, trial_loggers in self._class_trial_loggers.items():
if trial in trial_loggers:
trial_loggers[trial].flush()
def log_trial_save(self, trial: "Trial"):
for logger_class, trial_loggers in self._class_trial_loggers.items():
if trial in trial_loggers:
trial_loggers[trial].flush()
def log_trial_result(self, iteration: int, trial: "Trial", result: Dict):
for logger_class, trial_loggers in self._class_trial_loggers.items():
if trial in trial_loggers:
trial_loggers[trial].on_result(result)
def log_trial_end(self, trial: "Trial", failed: bool = False):
for logger_class, trial_loggers in self._class_trial_loggers.items():
if trial in trial_loggers:
trial_loggers[trial].close()
class JsonLoggerCallback(LoggerCallback):
"""Logs trial results in json format.
Also writes to a results file and param.json file when results or
configurations are updated. Experiments must be executed with the
JsonLoggerCallback to be compatible with the ExperimentAnalysis tool.
"""
def __init__(self):
self._trial_configs: Dict["Trial", Dict] = {}
self._trial_files: Dict["Trial", TextIO] = {}
def log_trial_start(self, trial: "Trial"):
if trial in self._trial_files:
self._trial_files[trial].close()
# Update config
self.update_config(trial, trial.config)
# Make sure logdir exists
trial.init_logdir()
local_file = os.path.join(trial.logdir, EXPR_RESULT_FILE)
self._trial_files[trial] = open(local_file, "at")
def log_trial_result(self, iteration: int, trial: "Trial", result: Dict):
if trial not in self._trial_files:
self.log_trial_start(trial)
json.dump(result, self._trial_files[trial], cls=SafeFallbackEncoder)
self._trial_files[trial].write("\n")
self._trial_files[trial].flush()
def log_trial_end(self, trial: "Trial", failed: bool = False):
if trial not in self._trial_files:
return
self._trial_files[trial].close()
del self._trial_files[trial]
def update_config(self, trial: "Trial", config: Dict):
self._trial_configs[trial] = config
config_out = os.path.join(trial.logdir, EXPR_PARAM_FILE)
with open(config_out, "w") as f:
json.dump(
self._trial_configs[trial],
f,
indent=2,
sort_keys=True,
cls=SafeFallbackEncoder,
)
config_pkl = os.path.join(trial.logdir, EXPR_PARAM_PICKLE_FILE)
with open(config_pkl, "wb") as f:
cloudpickle.dump(self._trial_configs[trial], f)
class CSVLoggerCallback(LoggerCallback):
"""Logs results to progress.csv under the trial directory.
Automatically flattens nested dicts in the result dict before writing
to csv:
{"a": {"b": 1, "c": 2}} -> {"a/b": 1, "a/c": 2}
"""
def __init__(self):
self._trial_continue: Dict["Trial", bool] = {}
self._trial_files: Dict["Trial", TextIO] = {}
self._trial_csv: Dict["Trial", csv.DictWriter] = {}
def log_trial_start(self, trial: "Trial"):
if trial in self._trial_files:
self._trial_files[trial].close()
# Make sure logdir exists
trial.init_logdir()
local_file = os.path.join(trial.logdir, EXPR_PROGRESS_FILE)
self._trial_continue[trial] = os.path.exists(local_file)
self._trial_files[trial] = open(local_file, "at")
self._trial_csv[trial] = None
def log_trial_result(self, iteration: int, trial: "Trial", result: Dict):
if trial not in self._trial_files:
self.log_trial_start(trial)
tmp = result.copy()
tmp.pop("config", None)
result = flatten_dict(tmp, delimiter="/")
if not self._trial_csv[trial]:
self._trial_csv[trial] = csv.DictWriter(
self._trial_files[trial], result.keys()
)
if not self._trial_continue[trial]:
self._trial_csv[trial].writeheader()
self._trial_csv[trial].writerow(
{k: v for k, v in result.items() if k in self._trial_csv[trial].fieldnames}
)
self._trial_files[trial].flush()
def log_trial_end(self, trial: "Trial", failed: bool = False):
if trial not in self._trial_files:
return
del self._trial_csv[trial]
self._trial_files[trial].close()
del self._trial_files[trial]
class TBXLoggerCallback(LoggerCallback):
"""TensorBoardX Logger.
Note that hparams will be written only after a trial has terminated.
This logger automatically flattens nested dicts to show on TensorBoard:
{"a": {"b": 1, "c": 2}} -> {"a/b": 1, "a/c": 2}
"""
VALID_HPARAMS = (str, bool, int, float, list, type(None))
VALID_NP_HPARAMS = (np.bool8, np.float32, np.float64, np.int32, np.int64)
def __init__(self):
try:
from tensorboardX import SummaryWriter
self._summary_writer_cls = SummaryWriter
except ImportError:
if log_once("tbx-install"):
logger.info('pip install "ray[tune]" to see TensorBoard files.')
raise
self._trial_writer: Dict["Trial", SummaryWriter] = {}
self._trial_result: Dict["Trial", Dict] = {}
def log_trial_start(self, trial: "Trial"):
if trial in self._trial_writer:
self._trial_writer[trial].close()
trial.init_logdir()
self._trial_writer[trial] = self._summary_writer_cls(
trial.logdir, flush_secs=30
)
self._trial_result[trial] = {}
def log_trial_result(self, iteration: int, trial: "Trial", result: Dict):
if trial not in self._trial_writer:
self.log_trial_start(trial)
step = result.get(TIMESTEPS_TOTAL) or result[TRAINING_ITERATION]
tmp = result.copy()
for k in ["config", "pid", "timestamp", TIME_TOTAL_S, TRAINING_ITERATION]:
if k in tmp:
del tmp[k] # not useful to log these
flat_result = flatten_dict(tmp, delimiter="/")
path = ["ray", "tune"]
valid_result = {}
for attr, value in flat_result.items():
full_attr = "/".join(path + [attr])
if isinstance(value, tuple(VALID_SUMMARY_TYPES)) and not np.isnan(value):
valid_result[full_attr] = value
self._trial_writer[trial].add_scalar(full_attr, value, global_step=step)
elif (isinstance(value, list) and len(value) > 0) or (
isinstance(value, np.ndarray) and value.size > 0
):
valid_result[full_attr] = value
# Must be video
if isinstance(value, np.ndarray) and value.ndim == 5:
self._trial_writer[trial].add_video(
full_attr, value, global_step=step, fps=20
)
continue
try:
self._trial_writer[trial].add_histogram(
full_attr, value, global_step=step
)
# In case TensorboardX still doesn't think it's a valid value
# (e.g. `[[]]`), warn and move on.
except (ValueError, TypeError):
if log_once("invalid_tbx_value"):
logger.warning(
"You are trying to log an invalid value ({}={}) "
"via {}!".format(full_attr, value, type(self).__name__)
)
self._trial_result[trial] = valid_result
self._trial_writer[trial].flush()
def log_trial_end(self, trial: "Trial", failed: bool = False):
if trial in self._trial_writer:
if trial and trial.evaluated_params and self._trial_result[trial]:
flat_result = flatten_dict(self._trial_result[trial], delimiter="/")
scrubbed_result = {
k: value
for k, value in flat_result.items()
if isinstance(value, tuple(VALID_SUMMARY_TYPES))
}
self._try_log_hparams(trial, scrubbed_result)
self._trial_writer[trial].close()
del self._trial_writer[trial]
del self._trial_result[trial]
def _try_log_hparams(self, trial: "Trial", result: Dict):
# TBX currently errors if the hparams value is None.
flat_params = flatten_dict(trial.evaluated_params)
scrubbed_params = {
k: v for k, v in flat_params.items() if isinstance(v, self.VALID_HPARAMS)
}
np_params = {
k: v.tolist()
for k, v in flat_params.items()
if isinstance(v, self.VALID_NP_HPARAMS)
}
scrubbed_params.update(np_params)
removed = {
k: v
for k, v in flat_params.items()
if not isinstance(v, self.VALID_HPARAMS + self.VALID_NP_HPARAMS)
}
if removed:
logger.info(
"Removed the following hyperparameter values when "
"logging to tensorboard: %s",
str(removed),
)
from tensorboardX.summary import hparams
try:
experiment_tag, session_start_tag, session_end_tag = hparams(
hparam_dict=scrubbed_params, metric_dict=result
)
self._trial_writer[trial].file_writer.add_summary(experiment_tag)
self._trial_writer[trial].file_writer.add_summary(session_start_tag)
self._trial_writer[trial].file_writer.add_summary(session_end_tag)
except Exception:
logger.exception(
"TensorboardX failed to log hparams. "
"This may be due to an unsupported type "
"in the hyperparameter values."
)
# Maintain backwards compatibility.
from ray.tune.integration.mlflow import ( # noqa: E402
MLflowLogger as _MLflowLogger,
)
MLflowLogger = _MLflowLogger
# The capital L is a typo, but needs to remain for backwards compatibility.
MLFLowLogger = _MLflowLogger
def pretty_print(result):
result = result.copy()
result.update(config=None) # drop config from pretty print
result.update(hist_stats=None) # drop hist_stats from pretty print
out = {}
for k, v in result.items():
if v is not None:
out[k] = v
cleaned = json.dumps(out, cls=SafeFallbackEncoder)
return yaml.safe_dump(json.loads(cleaned), default_flow_style=False)
|
|
#
# Copyright 2017 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
PAS5211 scapy structs used for interaction with Ruby
"""
import struct
from scapy.fields import LEShortField, Field, LEIntField, LESignedIntField, FieldLenField, FieldListField, PacketField, \
ByteField, StrFixedLenField, ConditionalField, StrField, MACField, LELongField, LenField, StrLenField
from scapy.layers.l2 import DestMACField, ETHER_ANY, Ether
from scapy.packet import Packet, bind_layers
from scapy.utils import lhex
from scapy.volatile import RandSInt
from scapy.layers.ntp import XLEShortField
from voltha.adapters.microsemi_olt.PAS5211_constants import PON_ENABLE, PON_PORT_PON, PON_FALSE, PON_TRUE
from voltha.extensions.omci.omci_frame import OmciFrame
"""
PAS5211 Constants
"""
# TODO get range from olt_version message
CHANNELS = range(0, 4)
PORTS = range(1, 129)
class XLESignedIntField(Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "<i")
def randval(self):
return RandSInt()
def i2repr(self, pkt, x):
return lhex(self.i2h(pkt, x))
class LESignedShortField(Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "<h")
class PAS5211FrameHeader(Packet):
name = "PAS5211FrameHeader"
fields_desc = [
LEShortField("part", 1),
LEShortField("total_parts", 1),
LEShortField("size", 0),
XLESignedIntField("magic_number", 0x1234ABCD)
]
class PAS5211MsgHeader(Packet):
name = "PAS5211MsgHeader"
fields_desc = [
LEIntField("sequence_number", 0),
XLEShortField("opcode", 0),
LEShortField("event_type", 0),
LESignedShortField("channel_id", -1),
LESignedShortField("onu_id", -1),
LESignedIntField("onu_session_id", -1)
]
class PAS5211Msg(Packet):
opcode = "Must be filled by subclass"
pass
class PAS5211MsgGetProtocolVersion(PAS5211Msg):
opcode = 2
name = "PAS5211MsgGetProtocolVersion"
fields_desc = []
class PAS5211MsgGetProtocolVersionResponse(PAS5211Msg):
name = "PAS5211MsgGetProtocolVersionResponse"
fields_desc = [
LEShortField("major_hardware_version", 0),
LEShortField("minor_hardware_version", 0),
LEShortField("major_pfi_version", 0),
LEShortField("minor_pfi_version", 0)
]
class PAS5211MsgGetOltVersion(PAS5211Msg):
opcode = 3
name = "PAS5211MsgGetOltVersion"
fields_desc = []
class PAS5211MsgGetOltVersionResponse(PAS5211Msg):
name = "PAS5211MsgGetOltVersionResponse"
fields_desc = [
LEShortField("major_firmware_version", 0),
LEShortField("minor_firmware_version", 0),
LEShortField("build_firmware_version", 0),
LEShortField("maintenance_firmware_version", 0),
LEShortField("major_hardware_version", 0),
LEShortField("minor_hardware_version", 0),
LEIntField("system_port_mac_type", 0),
FieldLenField("channels_supported", 0, fmt="<H"),
LEShortField("onus_supported_per_channel", 0),
LEShortField("ports_supported_per_channel", 0),
LEShortField("alloc_ids_supported_per_channel", 0),
FieldListField("critical_events_counter", [0, 0, 0, 0],
LEIntField("entry", 0),
count_from=lambda pkt: pkt.channels_supported),
FieldListField("non_critical_events_counter", [0, 0, 0, 0],
LEIntField("entry", 0),
count_from=lambda pkt: pkt.channels_supported)
]
class SnrBurstDelay(Packet):
name = "SnrBurstDelay"
fields_desc = [
LEShortField("timer_delay", None),
LEShortField("preamble_delay", None),
LEShortField("delimiter_delay", None),
LEShortField("burst_delay", None)
]
def extract_padding(self, p):
return "", p
class RngBurstDelay(Packet):
name = "SnrBurstDelay"
fields_desc = [
LEShortField("timer_delay", None),
LEShortField("preamble_delay", None),
LEShortField("delimiter_delay", None)
]
def extract_padding(self, p):
return "", p
class BurstTimingCtrl(Packet):
name = "BurstTimingCtrl"
fields_desc = [
PacketField("snr_burst_delay", None, SnrBurstDelay),
PacketField("rng_burst_delay", None, RngBurstDelay),
LEShortField("burst_delay_single", None),
LEShortField("burst_delay_double", None)
]
def extract_padding(self, p):
return "", p
class GeneralOpticsParams(Packet):
name = "GeneralOpticsParams"
fields_desc = [
ByteField("laser_reset_polarity", None),
ByteField("laser_sd_polarity", None),
ByteField("sd_source", None),
ByteField("sd_hold_snr_ranging", None),
ByteField("sd_hold_normal", None),
ByteField("reset_type_snr_ranging", None),
ByteField("reset_type_normal", None),
ByteField("laser_reset_enable", None),
]
def extract_padding(self, p):
return "", p
class ResetValues(Packet):
name = "ResetDataBurst"
fields_desc = [
ByteField("bcdr_reset_d2", None),
ByteField("bcdr_reset_d1", None),
ByteField("laser_reset_d2", None),
ByteField("laser_reset_d1", None)
]
def extract_padding(self, p):
return "", p
class DoubleResetValues(Packet):
name = "ResetDataBurst"
fields_desc = [
ByteField("bcdr_reset_d4", None),
ByteField("bcdr_reset_d3", None),
ByteField("laser_reset_d4", None),
ByteField("laser_reset_d3", None)
]
def extract_padding(self, p):
return "", p
class ResetTimingCtrl(Packet):
name = "ResetTimingCtrl"
fields_desc = [
PacketField("reset_data_burst", None, ResetValues),
PacketField("reset_snr_burst", None, ResetValues),
PacketField("reset_rng_burst", None, ResetValues),
PacketField("single_reset", None, ResetValues),
PacketField("double_reset", None, DoubleResetValues),
]
def extract_padding(self, p):
return "", p
class PreambleParams(Packet):
name = "PreambleParams"
fields_desc = [
ByteField("correlation_preamble_length", None),
ByteField("preamble_length_snr_rng", None),
ByteField("guard_time_data_mode", None),
ByteField("type1_size_data", None),
ByteField("type2_size_data", None),
ByteField("type3_size_data", None),
ByteField("type3_pattern", None),
ByteField("delimiter_size", None),
ByteField("delimiter_byte1", None),
ByteField("delimiter_byte2", None),
ByteField("delimiter_byte3", None)
]
def extract_padding(self, p):
return "", p
class PAS5211MsgSetOltOptics(PAS5211Msg):
opcode = 106
name = "PAS5211MsgSetOltOptics"
fields_desc = [
PacketField("burst_timing_ctrl", None, BurstTimingCtrl),
PacketField("general_optics_params", None, GeneralOpticsParams),
ByteField("reserved1", 0),
ByteField("reserved2", 0),
ByteField("reserved3", 0),
PacketField("reset_timing_ctrl", None, ResetTimingCtrl),
ByteField("voltage_if_mode", None),
PacketField("preamble_params", None, PreambleParams),
ByteField("reserved4", 0),
ByteField("reserved5", 0),
ByteField("reserved6", 0)
]
class PAS5211MsgSetOltOpticsResponse(PAS5211Msg):
name = "PAS5211MsgSetOltOpticsResponse"
fields_desc = []
class PAS5211MsgSetOpticsIoControl(PAS5211Msg):
opcode = 108
name = "PAS5211MsgSetOpticsIoControl"
fields_desc = [
ByteField("i2c_clk", None),
ByteField("i2c_data", None),
ByteField("tx_enable", None),
ByteField("tx_fault", None),
ByteField("tx_enable_polarity", None),
ByteField("tx_fault_polarity", None),
]
class PAS5211MsgSetOpticsIoControlResponse(PAS5211Msg):
name = "PAS5211MsgSetOpticsIoControlResponse"
fields_desc = []
def extract_padding(self, p):
return "", p
class PAS5211MsgStartDbaAlgorithm(PAS5211Msg):
opcode = 55
name = "PAS5211MsgStartDbaAlgorithm"
fields_desc = [
LEShortField("size", 0),
ByteField("initialization_data", None)
]
class PAS5211MsgStartDbaAlgorithmResponse(PAS5211Msg):
name = "PAS5211MsgStartDbaAlgorithmResponse"
opcode = 10295
fields_desc = []
class PAS5211MsgSetGeneralParam(PAS5211Msg):
opcode = 164
name = "PAS5211MsgSetGeneralParam"
fields_desc = [
LEIntField("parameter", None),
LEIntField("reserved", 0),
LEIntField("value", None)
]
class PAS5211MsgSetGeneralParamResponse(PAS5211Msg):
name = "PAS5211MsgSetGeneralParamResponse"
fields_desc = []
class PAS5211MsgGetGeneralParam(PAS5211Msg):
opcode = 165
name = "PAS5211MsgGetGeneralParam"
fields_desc = [
LEIntField("parameter", None),
LEIntField("reserved", 0),
]
class PAS5211MsgGetGeneralParamResponse(PAS5211Msg):
name = "PAS5211MsgGetGeneralParamResponse"
fields_desc = [
LEIntField("parameter", None),
LEIntField("reserved", 0),
LEIntField("value", None)
]
class PAS5211MsgGetDbaMode(PAS5211Msg):
opcode = 57
name = "PAS5211MsgGetDbaMode"
fields_desc = []
class PAS5211MsgGetDbaModeResponse(PAS5211Msg):
name = "PAS5211MsgGetDbaModeResponse"
fields_desc = [
LEIntField("dba_mode", None),
]
class PAS5211MsgAddOltChannel(PAS5211Msg):
opcode = 4
name = "PAS5211MsgAddOltChannel"
fields_desc = [
]
class PAS5211MsgAddOltChannelResponse(PAS5211Msg):
name = "PAS5211MsgAddOltChannelResponse"
fields_desc = [
]
class PAS5211MsgSetAlarmConfig(PAS5211Msg):
opcode = 48
name = "PAS5211MsgSetAlarmConfig"
fields_desc = [
LEShortField("type", None),
LEShortField("activate", None),
LEIntField("parameter1", None),
LEIntField("parameter2", None),
LEIntField("parameter3", None),
LEIntField("parameter4", None)
]
class PAS5211MsgSetOltChannelActivationPeriod(PAS5211Msg):
opcode = 11
name = "PAS5211MsgSetOltChannelActivationPeriod"
fields_desc = [
LEIntField("activation_period", None)
]
class PAS5211MsgSetOltChannelActivationPeriodResponse(PAS5211Msg):
name = "PAS5211MsgSetOltChannelActivationPeriodResponse"
fields_desc = []
class PAS5211MsgSetAlarmConfigResponse(PAS5211Msg):
name = "PAS5211MsgSetAlarmConfigResponse"
fields_desc = []
class PAS5211MsgSendCliCommand(PAS5211Msg):
opcode = 15
name = "PAS5211MsgSendCliCommand"
fields_desc = [
FieldLenField("size", None, fmt="<H", length_of="command"),
StrField("command", "")
]
class PAS5211MsgSwitchToInboundMode(PAS5211Msg):
opcode = 0xec
name = "PAS5211MsgSwitchToInboundMode"
fields_desc = [
MACField("mac", None),
LEShortField("mode", 0)
]
class PAS5211MsgGetActivationAuthMode(PAS5211Msg):
opcode = 145
name = "PAS5211MsgGetActivationAuthMode"
fields_desc = [
LEShortField("nothing", 0) # no idea why this is here
]
class PAS5211MsgGetActivationAuthModeResponse(PAS5211Msg):
opcode = 10385
name = "PAS5211MsgGetActivationAuthModeResponse"
fields_desc = [
LEShortField("mode", 0),
LEShortField("reserved", 0),
]
class PAS5211MsgSetOnuOmciPortId(PAS5211Msg):
opcode = 41
name = "PAS5211MsgSetOnuOmciPortId"
fields_desc = [
LEShortField("port_id", 0),
LEShortField("activate", PON_ENABLE)
]
class PAS5211MsgSetOnuOmciPortIdResponse(PAS5211Msg):
opcode = 10281
name = "PAS5211MsgSetOnuOmciPortIdResponse"
fields_desc = []
class PAS5211MsgGetLogicalObjectStatus(PAS5211Msg):
opcode = 223
name = "PAS5211MsgGetLogicalObjectStatus"
fields_desc = [
LEIntField("type", None),
LEIntField("value", None)
]
class PAS5211MsgGetLogicalObjectStatusResponse(PAS5211Msg):
opcode = 10463
name = "PAS5211MsgGetLogicalObjectStatusResponse"
fields_desc = [
LEIntField("type", None),
LEIntField("value", None),
FieldLenField("return_length", None, fmt="<H", length_of="return_value"),
LEIntField("return_value", "")
]
class PAS5211MsgSetOnuAllocId(PAS5211Msg):
opcode = 8
name = "PAS5211MsgSetOnuAllocId"
fields_desc = [
LEShortField("alloc_id", None),
LEShortField("allocate", None)
]
class PAS5211MsgSetOnuAllocIdResponse(PAS5211Msg):
opcode = 10248
name = "PAS5211MsgSetOnuAllocIdResponse"
fields_desc = []
class PAS5211MsgSendDbaAlgorithmMsg(PAS5211Msg):
opcode = 47
name = "PAS5211MsgSendDbaAlgorithmMsg"
fields_desc = [
# LEShortField("id", None),
FieldLenField("size", None, fmt="<H", length_of="data"),
StrLenField("data", "", length_from=lambda x: x.size)
]
class PAS5211MsgSendDbaAlgorithmMsgResponse(PAS5211Msg):
opcode = 10287
name = "PAS5211MsgSendDbaAlgorithmMsgResponse"
fields_desc = []
class PAS5211MsgSetPortIdConfig(PAS5211Msg):
opcode = 18
name = "PAS5211MsgSetPortIdConfig"
fields_desc = [
LEShortField("port_id", None),
LEShortField("activate", PON_ENABLE),
LEShortField("alloc_id", None),
LEIntField("type", None),
LEIntField("destination", None), # Is this the CNI port
# if yes then values are 0-11 (for ruby)
LEShortField("reserved", None)
]
class PAS5211MsgSetPortIdConfigResponse(PAS5211Msg):
opcode = 10258
name = "PAS5211MsgSetPortIdConfigResponse"
fields_desc = []
class PAS5211MsgGetOnuIdByPortId(PAS5211Msg):
opcode = 196
name = "PAS5211MsgGetOnuIdByPortId"
fields_desc = [
LEShortField("port_id", None),
LEShortField("reserved", 0)
]
class PAS5211MsgGetOnuIdByPortIdResponse(PAS5211Msg):
opcode = 196
name = "PAS5211MsgGetOnuIdByPortIdResponse"
fields_desc = [
LEShortField("valid", None),
LEShortField("onu_id", None)
]
class PAS5211SetVlanUplinkConfiguration(PAS5211Msg):
opcode = 39
name = "PAS5211SetVlanUplinkConfiguration"
fields_desc = [
LEShortField("port_id", None),
LEShortField("pvid_config_enabled", None),
LEShortField("min_cos", None),
LEShortField("max_cos", None),
LEIntField("de_bit", None),
LEShortField("reserved", 0)
]
class PAS5211SetVlanUplinkConfigurationResponse(PAS5211Msg):
opcode = 10279
name = "PAS5211SetVlanUplinkConfigurationResponse"
fields_desc = []
class PAS5211GetOnuAllocs(PAS5211Msg):
opcode = 9
name = "PAS5211GetOnuAllocs"
fields_desc = [
LEShortField("nothing", None) # It's in the PMC code... so yeah.
]
class PAS5211GetOnuAllocsResponse(PAS5211Msg):
opcode = 9
name = "PAS5211GetOnuAllocsResponse"
fields_desc = [
LEShortField("allocs_number", None),
FieldListField("alloc_ids", None, LEShortField("alloc_id", None))
]
class PAS5211GetSnInfo(PAS5211Msg):
opcode = 7
name = "PAS5211GetSnInfo"
fields_desc = [
StrFixedLenField("serial_number", None, 8)
]
class PAS5211GetSnInfoResponse(PAS5211Msg):
opcode = 7
name = "PAS5211GetSnInfoResponse"
fields_desc = [
StrFixedLenField("serial_number", None, 8),
LEShortField("found", None),
LEShortField("type", None),
LEShortField("onu_state", None),
LELongField("equalization_delay", None),
LEShortField("reserved", None)
]
class PAS5211GetOnusRange(PAS5211Msg):
opcode = 116
name = "PAS5211GetOnusRange"
fields_desc = [
LEShortField("nothing", None)
]
class PAS5211GetOnusRangeResponse(PAS5211Msg):
opcode = 116
name = "PAS5211GetOnusRangeResponse"
fields_desc = [
LEIntField("min_distance", None),
LEIntField("max_distance", None),
LEIntField("actual_min_distance", None),
LEIntField("actual_max_distance", None)
]
class PAS5211GetPortIdConfig(PAS5211Msg):
opcode = 19
name = "PAS5211GetPortIdConfig"
fields_desc = [
LEShortField("port_id", None),
LEShortField("reserved", None)
]
class PAS5211GetPortIdConfigResponse(PAS5211Msg):
opcode = 19
name = "PAS5211GetPortIdConfigResponse"
fields_desc = [
LEShortField("activate", None),
LEShortField("encryption_state", None),
LEShortField("alloc_id", None),
LEShortField("type", None),
LEShortField("destination", None),
LEShortField("reserved", None),
]
class PAS5211SetSVlanAtConfig(PAS5211Msg):
opcode = 63
name = "PAS5211SetSVlanAtConfig"
fields_desc = [
LEShortField("svlan_id", None),
LEShortField("forwarding_mode", None),
LEShortField("use_svlan", None),
LEShortField("use_cvlan", None),
LEShortField("use_pbits", None),
LEShortField("discard_unknown", None),
]
class PAS5211SetSVlanAtConfigResponse(PAS5211Msg):
opcode = 63
name = "PAS5211SetSVlanAtConfigResponse"
fields_desc = []
class PAS5211SetUplinkVlanHandl(PAS5211Msg):
opcode = 34
name = "PAS5211SetUplinkVlanHandl"
fields_desc = [
LEShortField("source_port_id", None),
LEShortField("primary_vid", None),
LEShortField("pvid_config_enabled", None),
LEShortField("svlan_tag_operation", None),
LEShortField("cvlan_tag_operation", None),
LEShortField("new_svlan_tag", None),
LEShortField("new_cvlan_tag", None),
LEShortField("destination", None)
]
class PAS5211SetUplinkVlanHandlResponse(PAS5211Msg):
opcode = 34
name = "PAS5211SetUplinkVlanHandlResponse"
fields_desc = []
class PAS5211SetVlanGenConfig(PAS5211Msg):
opcode = 43
name = "PAS5211SetVlanGenConfig"
fields_desc = [
LEShortField("direction", None),
LEShortField("extended_svlan_type", None),
LEShortField("insertion_svlan_ethertype", None),
LEShortField("extended_cvlan_type", None),
LEShortField("insertion_cvlan_ethertype", None),
LEShortField("pon_pcp_code", None),
LEShortField("cni_pcp_code", None),
LEShortField("reserved", None)
]
class PAS5211SetVlanGenConfigResponse(PAS5211Msg):
opcode = 43
name = "PAS5211SetVlanGenConfigResponse"
fields_desc = []
class PAS5211SetVlanDownConfig(PAS5211Msg):
opcode = 32
name = "PAS5211SetVlanDownConfig"
fields_desc = [
LEShortField("svlan_id", None),
LEShortField("double_tag_handling", None),
LEShortField("vlan_priority_handling", None)
]
class PAS5211SetVlanDownConfigResponse(PAS5211Msg):
opcode = 32
name = "PAS5211SetVlanDownConfigResponse"
fields_desc = []
class PAS5211SetDownVlanHandl(PAS5211Msg):
opcode = 27
name = "PAS5211SetDownVlanHandl"
fields_desc = [
LEShortField("svlan_tag", None),
LEShortField("cvlan_tag", None),
LEShortField("double_tag_handling", None),
LEShortField("priority_handling", None),
LEShortField("input_priority", None),
LEShortField("svlan_tag_operation", None),
LEShortField("cvlan_tag_operation", None),
LEShortField("port_id", None),
LEShortField("new_cvlan_tag", None),
LEShortField("destination", None),
LEShortField("output_vlan_prio_handle", None),
LEShortField("output_priority", None)
]
class PAS5211SetDownVlanHandlResponse(PAS5211Msg):
opcode = 27
name = "PAS5211SetDownVlanHandlResponse"
fields_desc = []
class Frame(Packet):
pass
class PAS5211MsgSendFrame(PAS5211Msg):
opcode = 42
name = "PAS5211MsgSendFrame"
fields_desc = [
FieldLenField("length", None, fmt="<H", length_of="frame"),
LEShortField("port_type", PON_PORT_PON),
LEShortField("port_id", 0),
LEShortField("management_frame", PON_FALSE),
ConditionalField(PacketField("frame", None, Packet), lambda pkt: pkt.management_frame == PON_FALSE),
ConditionalField(PacketField("frame", None, OmciFrame), lambda pkt: pkt.management_frame == PON_TRUE)
]
def extract_padding(self, p):
return "", p
class PAS5211MsgSendFrameResponse(PAS5211Msg):
name = "PAS5211MsgSendFrameResponse"
fields_desc = []
class PAS5211Event(PAS5211Msg):
opcode = 12
class PAS5211EventFrameReceived(PAS5211Event):
name = "PAS5211EventFrameReceived"
fields_desc = [
FieldLenField("length", None, length_of="frame", fmt="<H"),
LEShortField("port_type", PON_PORT_PON),
LEShortField("port_id", 0),
LEShortField("management_frame", PON_FALSE),
LEShortField("classification_entity", None),
LEShortField("l3_offset", None),
LEShortField("l4_offset", None),
LEShortField("ignored", 0), # TODO these do receive values, but there is no code in PMC using it
ConditionalField(PacketField("frame", None, Packet), lambda pkt: pkt.management_frame == PON_FALSE),
ConditionalField(PacketField("frame", None, OmciFrame), lambda pkt: pkt.management_frame == PON_TRUE)
]
class PAS5211EventDbaAlgorithm(PAS5211Event):
name = "PAS5211EventDbaAlgorithm"
fields_desc = [
FieldLenField("size", None, fmt="<H", length_of="data"),
StrLenField("data", "", length_from=lambda x: x.size)
]
class PAS5211EventOnuActivation(PAS5211Event):
name = "PAS5211EventOnuActivation"
event_type = 1
fields_desc = [
StrFixedLenField("serial_number", None, length=8),
LEIntField("equalization_period", None)
]
class PAS5211EventOnuDeactivation(PAS5211Event):
name = "PAS5211EventOnuDeactivation"
event_type = 2
fields_desc = [
LEShortField("code", None)
]
class PAS5211EventLogMsg(PAS5211Event):
name = "PAS5211EventLogMsg"
event_type = 3
fields_desc = []
class PAS5211EventFWGeneralPrint(PAS5211Event):
name = "PAS5211EventFWGeneralPrint"
event_type = 4
fields_desc = []
class PAS5211EventFWTracePrint(PAS5211Event):
name = "PAS5211EventFWTracePrint"
event_type = 5
fields_desc = []
class PAS5211EventStartEncryption(PAS5211Event):
name = "PAS5211EventStartEncryption"
event_type = 6
fields_desc = []
class PAS5211EventStopEncryption(PAS5211Event):
name = "PAS5211EventStopEncryption"
event_type = 7
fields_desc = []
class PAS5211EventUpdateEncryption(PAS5211Event):
name = "PAS5211EventUpdateEncryption"
event_type = 8
fields_desc = []
class PAS5211EventAlarmNotification(PAS5211Event):
name = "PAS5211EventAlarmNotification"
event_type = 9
fields_desc = [
LEShortField("code", None),
LEIntField("parameter1", None),
LEIntField("parameter2", None),
LEIntField("parameter3", None),
LEIntField("parameter4", None)
]
class PAS5211EventDBAAlgorithmEvent(PAS5211Event):
name = "PAS5211EventDBAAlgorithmEvent"
event_type = 11
fields_desc = []
class PAS5211EventOLTReset(PAS5211Event):
name = "PAS5211EventOLTReset"
event_type = 12
fields_desc = []
class PAS5211EventOnuSleepMode(PAS5211Event):
name = "PAS5211EventOnuSleepMode"
event_type = 13
fields_desc = []
class PAS5211EventAssignAllocId(PAS5211Event):
name = "PAS5211EventAssignAllocId"
event_type = 14
fields_desc = []
class PAS5211EventConfigOMCIPort(PAS5211Event):
name = "PAS5211EventConfigOMCIPort"
event_type = 15
fields_desc = []
class PAS5211EventPloamMessageReceived(PAS5211Event):
name = "PAS5211EventPloamMessageReceived"
event_type = 17
fields_desc = []
class PAS5211EventLoadOLTBinaryCompleted(PAS5211Event):
name = "PAS5211EventLoadOLTBinaryCompleted"
event_type = 18
fields_desc = []
class PAS5211EventMasterOLTFail(PAS5211Event):
name = "PAS5211EventMasterOLTFail"
event_type = 19
fields_desc = []
class PAS5211EventRedundantSwitchOverStatus(PAS5211Event):
name = "PAS5211EventRedundantSwitchOverStatus"
event_type = 20
fields_desc = []
class PAS5211EventSyncOLTData(PAS5211Event):
name = "PAS5211EventSyncOLTData"
event_type = 21
fields_desc = []
class PAS5211EventEQDChange(PAS5211Event):
name = "PAS5211EventEQDChange"
event_type = 22
fields_desc = []
class PAS5211EventXAUIStatusNotification(PAS5211Event):
name = "PAS5211EventXAUIStatusNotification"
event_type = 23
fields_desc = []
class PAS5211EventUnauthenticatedONU(PAS5211Event):
name = "PAS5211EventUnauthenticatedONU"
event_type = 24
fields_desc = []
class PAS5211EventFalseQFullReported(PAS5211Event):
name = "PAS5211EventFalseQFullReported"
event_type = 25
fields_desc = []
class PAS5211EventOpticalModuleIndication(PAS5211Event):
name = "PAS5211EventOpticalModuleIndication"
event_type = 27
fields_desc = []
class PAS5211EventActivationFailure(PAS5211Event):
name = "PAS5211EventActivationFailure"
event_type = 28
fields_desc = []
class PAS5211EventBipError(PAS5211Event):
name = "PAS5211EventBipError"
event_type = 29
fields_desc = []
class PAS5211EventREIError(PAS5211Event):
name = "PAS5211EventREIError"
event_type = 30
fields_desc = []
class PAS5211EventRDNMultiONUFailure(PAS5211Event):
name = "PAS5211EventRDNMultiONUFailure"
event_type = 31
fields_desc = []
class PAS5211EventUnexpectedSN(PAS5211Event):
name = "PAS5211EventUnexpectedSN"
event_type = 32
fields_desc = []
class PAS5211EventRDNSwitchOverONUResult(PAS5211Event):
name = "PAS5211EventRDNSwitchOverONUResult"
event_type = 33
fields_desc = []
class PAS5211EventGMacMalfucntionSuspected(PAS5211Event):
name = "PAS5211EventGMacMalfucntionSuspected"
event_type = 34
fields_desc = []
class PAS5211GetPortIdDownstreamPolicingConfig(PAS5211Msg):
opcode = 82
name = "PAS5211GetPortIdDownstreamPolicingConfig"
fields_desc = [
LEShortField("port_id", None),
LEShortField("reserved", None)]
class PAS5211GetPortIdDownstreamPolicingConfigResponse(PAS5211Msg):
opcode = 82
name = "PAS5211GetPortIdDownstreamPolicingConfigResponse"
fields_desc = [
LEIntField("committed_bandwidth", None),
LEIntField("excessive_bandwidth", None),
LEShortField("committed_burst_limit", None),
LEShortField("excessive_burst_limit", None),
LEShortField("ds_policing_config_id", None),
LEShortField("reserved", None)]
class PAS5211RemoveDownstreamPolicingConfig(PAS5211Msg):
opcode = 76
name = "PAS5211RemoveDownstreamPolicingConfig"
fields_desc = [
LEShortField("policing_config_id", None),
LEShortField("reserved", None)]
class PAS5211RemoveDownstreamPolicingConfigResponse(PAS5211Msg):
opcode = 76
name = "PAS5211RemoveDownstreamPolicingConfigResponse"
fields_desc = []
class PAS5211SetPortIdPolicingConfig(PAS5211Msg):
opcode = 80
name = "PAS5211SetPortIdPolicingConfig"
fields_desc = [
LEShortField("direction", None),
LEShortField("port_id", None),
LEShortField("policing_config_id", None),
LEShortField("reserved", None)]
class PAS5211SetPortIdPolicingConfigResponse(PAS5211Msg):
opcode = 80
name = "PAS5211SetPortIdPolicingConfigResponse"
fields_desc = []
class PAS5211UnsetPortIdPolicingConfig(PAS5211Msg):
opcode = 81
name = "PAS5211UnsetSetPortIdPolicingConfig"
fields_desc = [
LEShortField("direction", None),
LEShortField("port_id", None)]
class PAS5211UnsetPortIdPolicingConfigResponse(PAS5211Msg):
opcode = 81
name = "PAS5211UnsetSetPortIdPolicingConfigResponse"
fields_desc = []
class PAS5211SetDownstreamPolicingConfig(PAS5211Msg):
opcode = 74
name = "PAS5211SetDownstreamPolicingConfig"
fields_desc = [
LEIntField("committed_bandwidth", None),
LEIntField("excessive_bandwidth", None),
LEShortField("committed_burst_limit", None),
LEShortField("excessive_burst_limit", None)]
class PAS5211SetDownstreamPolicingConfigResponse(PAS5211Msg):
opcode = 74
name = "PAS5211SetDownstreamPolicingConfigResponse"
fields_desc = [
LEShortField("policing_config_id", None),
LEShortField("reserved", None)]
class PAS5211SetUpstreamPolicingConfig(PAS5211Msg):
opcode = 77
name = "PAS5211SetUpstreamPolicingConfig"
fields_desc = [
LEIntField("bandwidth", None),
LEShortField("burst_limit", None),
LEShortField("reserved", None)]
class PAS5211SetUpstreamPolicingConfigResponse(PAS5211Msg):
opcode = 77
name = "PAS5211SetDownstreamPolicingResponse"
fields_desc = [
LEShortField("policing_config_id", None),
LEShortField("reserved", None)]
class PAS5211Dot3(Packet):
name = "PAS5211Dot3"
fields_desc = [DestMACField("dst"),
MACField("src", ETHER_ANY),
LenField("len", None, "H")]
MIN_FRAME_SIZE = 60
def post_build(self, pkt, payload):
pkt += payload
size = ord(payload[4]) + (ord(payload[5]) << 8)
length = size + 6 # this is a idiosyncracy of the PASCOMM protocol
pkt = pkt[:12] + chr(length >> 8) + chr(length & 0xff) + pkt[14:]
padding = self.MIN_FRAME_SIZE - len(pkt)
if padding > 0:
pkt = pkt + ("\x00" * padding)
return pkt
'''
This is needed in order to force scapy to use PAS5211Dot3
instead of the default Dot3 that the Ether class uses.
'''
@classmethod
def PAS_dispatch_hook(cls, _pkt=None, *args, **kargs):
if _pkt and len(_pkt) >= 14:
if struct.unpack("!H", _pkt[12:14])[0] <= 1500:
return PAS5211Dot3
return cls
Ether.dispatch_hook = PAS_dispatch_hook
# bindings for messages received
# fix for v2 of Microsemi OLT.
bind_layers(Ether, PAS5211FrameHeader, type=0x0a00)
bind_layers(PAS5211Dot3, PAS5211FrameHeader)
bind_layers(PAS5211FrameHeader, PAS5211MsgHeader)
bind_layers(PAS5211MsgHeader, PAS5211MsgGetProtocolVersion, opcode=0x3000 | 2)
bind_layers(PAS5211MsgHeader, PAS5211MsgGetProtocolVersionResponse, opcode=0x2800 | 2)
bind_layers(PAS5211MsgHeader, PAS5211MsgGetOltVersion, opcode=0x3000 | 3)
bind_layers(PAS5211MsgHeader, PAS5211MsgGetOltVersionResponse, opcode=0x3800 | 3)
bind_layers(PAS5211MsgHeader, PAS5211MsgSetOltOptics, opcode=0x3000 | 106)
bind_layers(PAS5211MsgHeader, PAS5211MsgSetOltOpticsResponse, opcode=0x2800 | 106)
bind_layers(PAS5211MsgHeader, PAS5211MsgSetOpticsIoControl, opcode=0x3000 | 108)
bind_layers(PAS5211MsgHeader, PAS5211MsgSetOpticsIoControlResponse, opcode=0x2800 | 108)
bind_layers(PAS5211MsgHeader, PAS5211MsgSetGeneralParam, opcode=0x3000 | 164)
bind_layers(PAS5211MsgHeader, PAS5211MsgSetGeneralParamResponse, opcode=0x2800 | 164)
bind_layers(PAS5211MsgHeader, PAS5211MsgGetGeneralParam, opcode=0x3000 | 165)
bind_layers(PAS5211MsgHeader, PAS5211MsgGetGeneralParamResponse, opcode=0x2800 | 165)
bind_layers(PAS5211MsgHeader, PAS5211MsgAddOltChannel, opcode=0x3000 | 4)
bind_layers(PAS5211MsgHeader, PAS5211MsgAddOltChannelResponse, opcode=0x2800 | 4)
bind_layers(PAS5211MsgHeader, PAS5211MsgSetAlarmConfig, opcode=0x3000 | 48)
bind_layers(PAS5211MsgHeader, PAS5211MsgSetAlarmConfigResponse, opcode=0x2800 | 48)
bind_layers(PAS5211MsgHeader, PAS5211MsgSetOltChannelActivationPeriod, opcode=0x3000 | 11)
bind_layers(PAS5211MsgHeader, PAS5211MsgSetOltChannelActivationPeriodResponse, opcode=0x2800 | 11)
bind_layers(PAS5211MsgHeader, PAS5211MsgStartDbaAlgorithm, opcode=0x3000 | 55)
bind_layers(PAS5211MsgHeader, PAS5211MsgStartDbaAlgorithmResponse, opcode=0x2800 | 55)
bind_layers(PAS5211MsgHeader, PAS5211MsgGetDbaMode, opcode=0x3000 | 57)
bind_layers(PAS5211MsgHeader, PAS5211MsgGetDbaModeResponse, opcode=0x2800 | 57)
bind_layers(PAS5211MsgHeader, PAS5211MsgSendFrame, opcode=0x3000 | 42)
bind_layers(PAS5211MsgHeader, PAS5211MsgSendFrameResponse, opcode=0x2800 | 42)
bind_layers(PAS5211MsgHeader, PAS5211MsgGetActivationAuthMode, opcode=0x3000 | 145)
bind_layers(PAS5211MsgHeader, PAS5211MsgGetActivationAuthModeResponse, opcode=0x2800 | 145)
bind_layers(PAS5211MsgHeader, PAS5211MsgSetOnuOmciPortId, opcode=0x3000 | 41)
bind_layers(PAS5211MsgHeader, PAS5211MsgSetOnuOmciPortIdResponse, opcode=0x2800 | 41)
bind_layers(PAS5211MsgHeader, PAS5211MsgGetLogicalObjectStatus, opcode=0x3000 | 223)
bind_layers(PAS5211MsgHeader, PAS5211MsgGetLogicalObjectStatusResponse, opcode=0x2800 | 223)
bind_layers(PAS5211MsgHeader, PAS5211MsgSetOnuAllocId, opcode=0x3000 | 8)
bind_layers(PAS5211MsgHeader, PAS5211MsgSetOnuAllocIdResponse, opcode=0x2800 | 8)
bind_layers(PAS5211MsgHeader, PAS5211MsgSendDbaAlgorithmMsg, opcode=0x3000 | 47)
bind_layers(PAS5211MsgHeader, PAS5211MsgSendDbaAlgorithmMsgResponse, opcode=0x2800 | 47)
bind_layers(PAS5211MsgHeader, PAS5211MsgSetPortIdConfig, opcode=0x3000 | 18)
bind_layers(PAS5211MsgHeader, PAS5211MsgSetPortIdConfigResponse, opcode=0x2800 | 18)
bind_layers(PAS5211MsgHeader, PAS5211MsgGetOnuIdByPortId, opcode=0x3000 | 196)
bind_layers(PAS5211MsgHeader, PAS5211MsgGetOnuIdByPortIdResponse, opcode=0x2800 | 196)
bind_layers(PAS5211MsgHeader, PAS5211SetVlanUplinkConfiguration, opcode=0x3000 | 39)
bind_layers(PAS5211MsgHeader, PAS5211SetVlanUplinkConfigurationResponse, opcode=0x2800 | 39)
bind_layers(PAS5211MsgHeader, PAS5211GetOnuAllocs, opcode=0x3000 | 9)
bind_layers(PAS5211MsgHeader, PAS5211GetOnuAllocsResponse, opcode=0x2800 | 9)
bind_layers(PAS5211MsgHeader, PAS5211GetSnInfo, opcode=0x3000 | 7)
bind_layers(PAS5211MsgHeader, PAS5211GetSnInfoResponse, opcode=0x2800 | 7)
bind_layers(PAS5211MsgHeader, PAS5211GetOnusRange, opcode=0x3000 | 116)
bind_layers(PAS5211MsgHeader, PAS5211GetOnusRangeResponse, opcode=0x2800 | 116)
bind_layers(PAS5211MsgHeader, PAS5211GetPortIdConfig, opcode=0x3000 | 19)
bind_layers(PAS5211MsgHeader, PAS5211GetPortIdConfigResponse, opcode=0x2800 | 19)
bind_layers(PAS5211MsgHeader, PAS5211SetSVlanAtConfig, opcode=0x3000 | 63)
bind_layers(PAS5211MsgHeader, PAS5211SetSVlanAtConfigResponse, opcode=0x2800 | 63)
bind_layers(PAS5211MsgHeader, PAS5211SetUplinkVlanHandl, opcode=0x3000 | 34)
bind_layers(PAS5211MsgHeader, PAS5211SetUplinkVlanHandlResponse, opcode=0x2800 | 34)
bind_layers(PAS5211MsgHeader, PAS5211SetVlanGenConfig, opcode=0x3000 | 43)
bind_layers(PAS5211MsgHeader, PAS5211SetVlanGenConfigResponse, opcode=0x2800 | 43)
bind_layers(PAS5211MsgHeader, PAS5211SetVlanDownConfig, opcode=0x3000 | 32)
bind_layers(PAS5211MsgHeader, PAS5211SetVlanDownConfigResponse, opcode=0x2800 | 32)
bind_layers(PAS5211MsgHeader, PAS5211SetDownVlanHandl, opcode=0x3000 | 27)
bind_layers(PAS5211MsgHeader, PAS5211SetDownVlanHandlResponse, opcode=0x2800 | 27)
bind_layers(PAS5211MsgHeader, PAS5211SetDownstreamPolicingConfig, opcode=0x3000 | 74)
bind_layers(PAS5211MsgHeader, PAS5211SetDownstreamPolicingConfigResponse, opcode=0x2800 | 74)
bind_layers(PAS5211MsgHeader, PAS5211SetUpstreamPolicingConfig, opcode=0x3000 | 77)
bind_layers(PAS5211MsgHeader, PAS5211SetUpstreamPolicingConfigResponse, opcode=0x2800 | 77)
bind_layers(PAS5211MsgHeader, PAS5211SetPortIdPolicingConfig, opcode=0x3000 | 80)
bind_layers(PAS5211MsgHeader, PAS5211SetPortIdPolicingConfigResponse, opcode=0x2800 | 80)
bind_layers(PAS5211MsgHeader, PAS5211UnsetPortIdPolicingConfig, opcode=0x3000 | 81)
bind_layers(PAS5211MsgHeader, PAS5211UnsetPortIdPolicingConfigResponse, opcode=0x2800 | 81)
bind_layers(PAS5211MsgHeader, PAS5211GetPortIdDownstreamPolicingConfig, opcode=0x3000 | 82)
bind_layers(PAS5211MsgHeader, PAS5211GetPortIdDownstreamPolicingConfigResponse, opcode=0x2800 | 82)
bind_layers(PAS5211MsgHeader, PAS5211RemoveDownstreamPolicingConfig, opcode=0x3000 | 76)
bind_layers(PAS5211MsgHeader, PAS5211RemoveDownstreamPolicingConfigResponse, opcode=0x2800 | 76)
# bindings for events received
bind_layers(PAS5211MsgHeader, PAS5211EventOnuActivation, opcode=0x2800 | 12, event_type=1)
bind_layers(PAS5211MsgHeader, PAS5211EventOnuDeactivation, opcode=0x2800 | 12, event_type=2)
bind_layers(PAS5211MsgHeader, PAS5211EventFrameReceived, opcode=0x2800 | 12, event_type=10)
bind_layers(PAS5211MsgHeader, PAS5211EventDbaAlgorithm, opcode=0x2800 | 12, event_type=11)
bind_layers(PAS5211MsgHeader, PAS5211EventAlarmNotification, opcode=0x2800 | 12, event_type=9)
bind_layers(PAS5211MsgHeader, PAS5211Event, opcode=0x2800 | 12)
class Display(object):
def __init__(self, pkts):
self.pkts = pkts
def show(self, seq):
self.pkts[seq].show()
def __getitem__(self, key):
self.show(key)
def walk(self, index=0):
while index < len(self.pkts):
self.show(index)
try:
input("(current packet - %s) Next packet?" % index)
except Exception as e:
pass
index += 1
if __name__ == '__main__':
from scapy.utils import rdpcap
import sys
import code
packets = rdpcap(sys.argv[1])
p = Display(packets)
def walk(index=0, interactive=True, channel=-1):
if interactive is not True:
for packet in packets:
if PAS5211MsgHeader in packet:
if PAS5211MsgGetOltVersion not in packet and PAS5211MsgGetOltVersionResponse not in packet:
if channel is not -1:
if packet[PAS5211MsgHeader].channel_id == channel:
packet.show()
else:
packet.show()
else:
p.walk(index=index)
code.interact(local=locals())
|
|
"""Graph mode TF policy built using build_tf_policy()."""
from collections import OrderedDict
import logging
import numpy as np
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.tf_policy import TFPolicy
from ray.rllib.models.catalog import ModelCatalog
from ray.rllib.utils.annotations import override
from ray.rllib.utils import try_import_tf
from ray.rllib.utils.debug import log_once, summarize
from ray.rllib.utils.tracking_dict import UsageTrackingDict
tf = try_import_tf()
logger = logging.getLogger(__name__)
class DynamicTFPolicy(TFPolicy):
"""A TFPolicy that auto-defines placeholders dynamically at runtime.
Initialization of this class occurs in two phases.
* Phase 1: the model is created and model variables are initialized.
* Phase 2: a fake batch of data is created, sent to the trajectory
postprocessor, and then used to create placeholders for the loss
function. The loss and stats functions are initialized with these
placeholders.
Initialization defines the static graph.
Attributes:
observation_space (gym.Space): observation space of the policy.
action_space (gym.Space): action space of the policy.
config (dict): config of the policy
model (TorchModel): TF model instance
dist_class (type): TF action distribution class
"""
def __init__(self,
obs_space,
action_space,
config,
loss_fn,
stats_fn=None,
grad_stats_fn=None,
before_loss_init=None,
make_model=None,
action_sampler_fn=None,
existing_inputs=None,
existing_model=None,
get_batch_divisibility_req=None,
obs_include_prev_action_reward=True):
"""Initialize a dynamic TF policy.
Arguments:
observation_space (gym.Space): Observation space of the policy.
action_space (gym.Space): Action space of the policy.
config (dict): Policy-specific configuration data.
loss_fn (func): function that returns a loss tensor the policy
graph, and dict of experience tensor placeholders
stats_fn (func): optional function that returns a dict of
TF fetches given the policy and batch input tensors
grad_stats_fn (func): optional function that returns a dict of
TF fetches given the policy and loss gradient tensors
before_loss_init (Optional[callable]): Optional function to run
prior to loss init that takes the same arguments as __init__.
make_model (func): optional function that returns a ModelV2 object
given (policy, obs_space, action_space, config).
All policy variables should be created in this function. If not
specified, a default model will be created.
action_sampler_fn (func): optional function that returns a
tuple of action and action logp tensors given
(policy, model, input_dict, obs_space, action_space, config).
If not specified, a default action distribution will be used.
existing_inputs (OrderedDict): When copying a policy, this
specifies an existing dict of placeholders to use instead of
defining new ones
existing_model (ModelV2): when copying a policy, this specifies
an existing model to clone and share weights with
get_batch_divisibility_req (func): optional function that returns
the divisibility requirement for sample batches
obs_include_prev_action_reward (bool): whether to include the
previous action and reward in the model input
"""
self.config = config
self._loss_fn = loss_fn
self._stats_fn = stats_fn
self._grad_stats_fn = grad_stats_fn
self._obs_include_prev_action_reward = obs_include_prev_action_reward
# Setup standard placeholders
prev_actions = None
prev_rewards = None
if existing_inputs is not None:
obs = existing_inputs[SampleBatch.CUR_OBS]
if self._obs_include_prev_action_reward:
prev_actions = existing_inputs[SampleBatch.PREV_ACTIONS]
prev_rewards = existing_inputs[SampleBatch.PREV_REWARDS]
else:
obs = tf.placeholder(
tf.float32,
shape=[None] + list(obs_space.shape),
name="observation")
if self._obs_include_prev_action_reward:
prev_actions = ModelCatalog.get_action_placeholder(
action_space)
prev_rewards = tf.placeholder(
tf.float32, [None], name="prev_reward")
self._input_dict = {
SampleBatch.CUR_OBS: obs,
SampleBatch.PREV_ACTIONS: prev_actions,
SampleBatch.PREV_REWARDS: prev_rewards,
"is_training": self._get_is_training_placeholder(),
}
self._seq_lens = tf.placeholder(
dtype=tf.int32, shape=[None], name="seq_lens")
# Setup model
if action_sampler_fn:
if not make_model:
raise ValueError(
"make_model is required if action_sampler_fn is given")
self.dist_class = None
else:
self.dist_class, logit_dim = ModelCatalog.get_action_dist(
action_space, self.config["model"])
if existing_model:
self.model = existing_model
elif make_model:
self.model = make_model(self, obs_space, action_space, config)
else:
self.model = ModelCatalog.get_model_v2(
obs_space,
action_space,
logit_dim,
self.config["model"],
framework="tf")
if existing_inputs:
self._state_in = [
v for k, v in existing_inputs.items()
if k.startswith("state_in_")
]
if self._state_in:
self._seq_lens = existing_inputs["seq_lens"]
else:
self._state_in = [
tf.placeholder(shape=(None, ) + s.shape, dtype=s.dtype)
for s in self.model.get_initial_state()
]
model_out, self._state_out = self.model(self._input_dict,
self._state_in, self._seq_lens)
# Setup action sampler
if action_sampler_fn:
action_sampler, action_logp = action_sampler_fn(
self, self.model, self._input_dict, obs_space, action_space,
config)
else:
action_dist = self.dist_class(model_out, self.model)
action_sampler = action_dist.sample()
action_logp = action_dist.sampled_action_logp()
# Phase 1 init
sess = tf.get_default_session() or tf.Session()
if get_batch_divisibility_req:
batch_divisibility_req = get_batch_divisibility_req(self)
else:
batch_divisibility_req = 1
TFPolicy.__init__(
self,
obs_space,
action_space,
config,
sess,
obs_input=obs,
action_sampler=action_sampler,
action_logp=action_logp,
loss=None, # dynamically initialized on run
loss_inputs=[],
model=self.model,
state_inputs=self._state_in,
state_outputs=self._state_out,
prev_action_input=prev_actions,
prev_reward_input=prev_rewards,
seq_lens=self._seq_lens,
max_seq_len=config["model"]["max_seq_len"],
batch_divisibility_req=batch_divisibility_req)
# Phase 2 init.
if before_loss_init is not None:
before_loss_init(self, obs_space, action_space, config)
if not existing_inputs:
self._initialize_loss()
@override(TFPolicy)
def copy(self, existing_inputs):
"""Creates a copy of self using existing input placeholders."""
# Note that there might be RNN state inputs at the end of the list
if self._state_inputs:
num_state_inputs = len(self._state_inputs) + 1
else:
num_state_inputs = 0
if len(self._loss_inputs) + num_state_inputs != len(existing_inputs):
raise ValueError("Tensor list mismatch", self._loss_inputs,
self._state_inputs, existing_inputs)
for i, (k, v) in enumerate(self._loss_inputs):
if v.shape.as_list() != existing_inputs[i].shape.as_list():
raise ValueError("Tensor shape mismatch", i, k, v.shape,
existing_inputs[i].shape)
# By convention, the loss inputs are followed by state inputs and then
# the seq len tensor
rnn_inputs = []
for i in range(len(self._state_inputs)):
rnn_inputs.append(("state_in_{}".format(i),
existing_inputs[len(self._loss_inputs) + i]))
if rnn_inputs:
rnn_inputs.append(("seq_lens", existing_inputs[-1]))
input_dict = OrderedDict(
[(k, existing_inputs[i])
for i, (k, _) in enumerate(self._loss_inputs)] + rnn_inputs)
instance = self.__class__(
self.observation_space,
self.action_space,
self.config,
existing_inputs=input_dict,
existing_model=self.model)
instance._loss_input_dict = input_dict
loss = instance._do_loss_init(input_dict)
loss_inputs = [(k, existing_inputs[i])
for i, (k, _) in enumerate(self._loss_inputs)]
TFPolicy._initialize_loss(instance, loss, loss_inputs)
if instance._grad_stats_fn:
instance._stats_fetches.update(
instance._grad_stats_fn(instance, input_dict, instance._grads))
return instance
@override(Policy)
def get_initial_state(self):
if self.model:
return self.model.get_initial_state()
else:
return []
def _initialize_loss(self):
def fake_array(tensor):
shape = tensor.shape.as_list()
shape = [s if s is not None else 1 for s in shape]
return np.zeros(shape, dtype=tensor.dtype.as_numpy_dtype)
dummy_batch = {
SampleBatch.CUR_OBS: fake_array(self._obs_input),
SampleBatch.NEXT_OBS: fake_array(self._obs_input),
SampleBatch.DONES: np.array([False], dtype=np.bool),
SampleBatch.ACTIONS: fake_array(
ModelCatalog.get_action_placeholder(self.action_space)),
SampleBatch.REWARDS: np.array([0], dtype=np.float32),
}
if self._obs_include_prev_action_reward:
dummy_batch.update({
SampleBatch.PREV_ACTIONS: fake_array(self._prev_action_input),
SampleBatch.PREV_REWARDS: fake_array(self._prev_reward_input),
})
state_init = self.get_initial_state()
state_batches = []
for i, h in enumerate(state_init):
dummy_batch["state_in_{}".format(i)] = np.expand_dims(h, 0)
dummy_batch["state_out_{}".format(i)] = np.expand_dims(h, 0)
state_batches.append(np.expand_dims(h, 0))
if state_init:
dummy_batch["seq_lens"] = np.array([1], dtype=np.int32)
for k, v in self.extra_compute_action_fetches().items():
dummy_batch[k] = fake_array(v)
# postprocessing might depend on variable init, so run it first here
self._sess.run(tf.global_variables_initializer())
postprocessed_batch = self.postprocess_trajectory(
SampleBatch(dummy_batch))
# model forward pass for the loss (needed after postprocess to
# overwrite any tensor state from that call)
self.model(self._input_dict, self._state_in, self._seq_lens)
if self._obs_include_prev_action_reward:
train_batch = UsageTrackingDict({
SampleBatch.PREV_ACTIONS: self._prev_action_input,
SampleBatch.PREV_REWARDS: self._prev_reward_input,
SampleBatch.CUR_OBS: self._obs_input,
})
loss_inputs = [
(SampleBatch.PREV_ACTIONS, self._prev_action_input),
(SampleBatch.PREV_REWARDS, self._prev_reward_input),
(SampleBatch.CUR_OBS, self._obs_input),
]
else:
train_batch = UsageTrackingDict({
SampleBatch.CUR_OBS: self._obs_input,
})
loss_inputs = [
(SampleBatch.CUR_OBS, self._obs_input),
]
for k, v in postprocessed_batch.items():
if k in train_batch:
continue
elif v.dtype == np.object:
continue # can't handle arbitrary objects in TF
elif k == "seq_lens" or k.startswith("state_in_"):
continue
shape = (None, ) + v.shape[1:]
dtype = np.float32 if v.dtype == np.float64 else v.dtype
placeholder = tf.placeholder(dtype, shape=shape, name=k)
train_batch[k] = placeholder
for i, si in enumerate(self._state_in):
train_batch["state_in_{}".format(i)] = si
train_batch["seq_lens"] = self._seq_lens
if log_once("loss_init"):
logger.debug(
"Initializing loss function with dummy input:\n\n{}\n".format(
summarize(train_batch)))
self._loss_input_dict = train_batch
loss = self._do_loss_init(train_batch)
for k in sorted(train_batch.accessed_keys):
if k != "seq_lens" and not k.startswith("state_in_"):
loss_inputs.append((k, train_batch[k]))
TFPolicy._initialize_loss(self, loss, loss_inputs)
if self._grad_stats_fn:
self._stats_fetches.update(
self._grad_stats_fn(self, train_batch, self._grads))
self._sess.run(tf.global_variables_initializer())
def _do_loss_init(self, train_batch):
loss = self._loss_fn(self, self.model, self.dist_class, train_batch)
if self._stats_fn:
self._stats_fetches.update(self._stats_fn(self, train_batch))
# override the update ops to be those of the model
self._update_ops = self.model.update_ops()
return loss
|
|
# -*- coding: utf-8 -*-
import datetime, string, random, decimal
import json
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
import settings as cart_settings
import helpers
class CartProductInterface(object):
"""Minimal CartProductInterface implementation, raising NotImplementedError
for all required methods."""
def get_thumbnail(self, options={}):
"""Returns an image instance for the product"""
raise NotImplementedError()
def get_price(self, quantity, options={}):
"""Returns product price, given the quantity added and options"""
raise NotImplementedError()
# optional methods
@staticmethod
def complete_purchase(orderlines, order):
"""Called on purchase completion for each item in the order."""
pass
class DefaultCartProductInterface(CartProductInterface):
"""CartProductInterface interface implementation, giving sensible defaults for
all required methods."""
def get_thumbnail(self, options={}):
return None
class Order(models.Model):
"""Stores information that should apply to every purchase."""
hash = models.CharField(max_length=16, unique=True, editable=False)
name = models.CharField(max_length=255, default='', blank=True)
email = models.EmailField(default='', blank=True)
phone = models.CharField(max_length=20, default='', blank=True)
street_address = models.CharField(max_length=255, blank=True, default='')
suburb = models.CharField(max_length=255, blank=True, default='')
city = models.CharField(max_length=255, blank=True, default='')
post_code = models.CharField(max_length=20, blank=True, default='')
country = models.CharField(max_length=255, blank=True, default='')
status = models.CharField(max_length=20, choices=cart_settings.ORDER_STATUSES, default='pending')
payment_successful = models.BooleanField(default=False)
notification_sent = models.BooleanField(default=False, editable=False)
acknowledgement_sent = models.BooleanField(default=False, editable=False)
creation_date = models.DateTimeField(auto_now_add=True, editable=False)
payment_date = models.DateTimeField(null=True, blank=True, editable=False)
completion_date = models.DateTimeField(null=True, blank=True, editable=False)
session_id = models.CharField(max_length=32, editable=False)
shipping_cost = models.DecimalField(max_digits=10, decimal_places=2, default=0)
class Meta:
ordering = ('-creation_date',)
def __unicode__(self):
return "Order #%s - %s, %s" % (self.pk, self.name, self.total())
def save(self, *args, **kwargs):
if (not self.completion_date) and self.status == 'shipped':
self.completion_date = datetime.datetime.now()
if (not self.payment_date) and self.payment_successful:
self.payment_date = datetime.datetime.now()
self.complete_purchase()
super(Order, self).save(*args, **kwargs)
def complete_purchase(self):
groups = {}
for line in self.orderline_set.all():
ctype = ContentType.objects.get_for_model(line.product)
if not groups.get(ctype.pk, None):
groups[ctype.pk] = []
groups[ctype.pk].append(line)
for ctype_pk in groups:
cls = ContentType.objects.get(pk=ctype_pk).model_class()
if hasattr(cls, 'complete_purchase'):
cls.complete_purchase(groups[ctype_pk], self)
def total(self):
total = sum(line.price for line in self.orderline_set.all()) + self.shipping_cost
detail_cls = helpers.get_order_detail()
if detail_cls:
try:
detail = self.get_detail()
except detail_cls.DoesNotExist:
pass
else:
total += getattr(detail, 'additional_total', lambda: 0)()
return total
def total_str(self, prefix='$'):
return "%s%.2f" % (prefix, self.total())
total_str.short_description = "Total"
def total_quantity(self):
return sum(line.quantity for line in self.orderline_set.all())
@models.permalink
def get_absolute_url(self):
return ('cart.views.complete', (self.hash,))
@models.permalink
def get_admin_url(self):
return ('admin:cart_order_change', (self.pk,))
def get_detail(self):
"""Returns extra detail as defined by get_order_detail()"""
if not hasattr(self, '_detail_cache'):
model_cls = helpers.get_order_detail()
if model_cls:
self._detail_cache = model_cls._default_manager.using(self._state.db).get(order__id__exact=self.id)
self._detail_cache.order = self
else:
self._detail_cache = None
return self._detail_cache
class OrderLine(models.Model):
"""Stores information about a single product/options combination for an order."""
order = models.ForeignKey(Order)
product_content_type = models.ForeignKey(ContentType)
product_object_id = models.PositiveIntegerField()
product = generic.GenericForeignKey('product_content_type', 'product_object_id')
quantity = models.IntegerField(default=1)
# total price for the line, not per-unit
price = models.DecimalField(max_digits=10, decimal_places=2)
options = models.TextField(blank=True, default='', editable=False)
def options_text(self):
if self.options:
options = json.loads(self.options)
return ", ".join([options[key] for key in options])
else:
return ''
class Meta:
pass
def __unicode__(self):
return unicode(self.product) + (" (%s)" % self.options_text() if self.options else '')
def latest_payment_attempt(self):
if self.payment_attempt_set.count():
return self.payment_attempt_set.order_by('-creation_date')[0]
else:
return None
class PaymentAttempt(models.Model):
"""Stores information about each attempted payment for an order."""
order = models.ForeignKey(Order)
hash = models.CharField(max_length=16, unique=True, editable=False)
result = models.TextField(default='', blank=True)
user_message = models.TextField(default='', blank=True)
transaction_ref = models.CharField(max_length=32, blank=True, default='')
amount = models.DecimalField(max_digits=10, decimal_places=2, default=0)
success = models.BooleanField(default=False)
creation_date = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return "Payment attempt #%s on Order #%s" % (self.pk, self.order.pk)
class Meta:
ordering = ('-creation_date',)
CHARS = string.digits + string.letters
def create_hash(sender, **kwargs):
while not kwargs['instance'].hash or sender.objects.filter(hash=kwargs['instance'].hash).exclude(pk=kwargs['instance'].pk):
hash = ''.join(random.choice(CHARS) for i in xrange(8))
kwargs['instance'].hash = hash
models.signals.pre_save.connect(create_hash, sender=PaymentAttempt)
models.signals.pre_save.connect(create_hash, sender=Order)
|
|
"""
sentry.utils.email
~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import toronado
from django.conf import settings
from django.core.mail import get_connection, EmailMultiAlternatives
from django.core.signing import Signer
from django.utils.encoding import force_bytes
from django.utils.functional import cached_property
from email.utils import parseaddr
from sentry.web.helpers import render_to_string
from sentry.utils import metrics
from sentry.utils.safe import safe_execute
signer = Signer()
SMTP_HOSTNAME = getattr(settings, 'SENTRY_SMTP_HOSTNAME', 'localhost')
ENABLE_EMAIL_REPLIES = getattr(settings, 'SENTRY_ENABLE_EMAIL_REPLIES', False)
def email_to_group_id(address):
"""
Email address should be in the form of:
{group_id}+{signature}@example.com
"""
address = address.split('@', 1)[0]
signed_data = address.replace('+', ':')
return int(force_bytes(signer.unsign(signed_data)))
def group_id_to_email(group_id):
signed_data = signer.sign(str(group_id))
return '@'.join((signed_data.replace(':', '+'), SMTP_HOSTNAME))
def email_id_for_model(model):
return '<%s/%s@%s>' % (type(model).__name__.lower(), model.pk, FROM_EMAIL_DOMAIN)
def domain_from_email(email):
email = parseaddr(email)[1]
try:
return email.split('@', 1)[1]
except IndexError:
# The email address is likely malformed or something
return email
FROM_EMAIL_DOMAIN = domain_from_email(settings.DEFAULT_FROM_EMAIL)
class MessageBuilder(object):
def __init__(self, subject, context=None, template=None, html_template=None,
body=None, html_body=None, headers=None, reference=None,
reply_reference=None):
assert not (body and template)
assert not (html_body and html_template)
assert context or not (template or html_template)
self.subject = subject
self.context = context or {}
self.template = template
self.html_template = html_template
self._txt_body = body
self._html_body = html_body
self.headers = headers
self.reference = reference # The object that generated this message
self.reply_reference = reply_reference # The object this message is replying about
self._send_to = set()
@cached_property
def html_body(self):
html_body = None
if self.html_template:
html_body = render_to_string(self.html_template, self.context)
else:
html_body = self._html_body
if html_body is not None:
return inline_css(html_body)
@cached_property
def txt_body(self):
if self.template:
return render_to_string(self.template, self.context)
return self._txt_body
@cached_property
def message_id(self):
if self.reference is not None:
return email_id_for_model(self.reference)
@cached_property
def reply_to_id(self):
if self.reply_reference is not None:
return email_id_for_model(self.reply_reference)
def add_users(self, user_ids, project=None):
from sentry.models import User, UserOption
email_list = set()
user_ids = set(user_ids)
# XXX: It's possible that options have been set to an empty value
if project:
queryset = UserOption.objects.filter(
project=project,
user__in=user_ids,
key='mail:email',
)
for option in (o for o in queryset if o.value):
user_ids.remove(option.user_id)
email_list.add(option.value)
if user_ids:
queryset = UserOption.objects.filter(
user__in=user_ids,
key='alert_email',
)
for option in (o for o in queryset if o.value):
try:
user_ids.remove(option.user_id)
email_list.add(option.value)
except KeyError:
# options.user_id might not exist in user_ids set
pass
if user_ids:
email_list |= set(filter(bool, User.objects.filter(
pk__in=user_ids, is_active=True,
).values_list('email', flat=True)))
self._send_to.update(email_list)
def build(self, to, reply_to=()):
if self.headers is None:
headers = {}
else:
headers = self.headers.copy()
if ENABLE_EMAIL_REPLIES and 'X-Sentry-Reply-To' in headers:
reply_to = headers['X-Sentry-Reply-To']
else:
reply_to = set(reply_to)
reply_to.remove(to)
reply_to = ', '.join(reply_to)
if reply_to:
headers.setdefault('Reply-To', reply_to)
if self.message_id is not None:
headers.setdefault('Message-Id', self.message_id)
subject = self.subject
if self.reply_to_id is not None:
headers.setdefault('In-Reply-To', self.reply_to_id)
headers.setdefault('References', self.reply_to_id)
subject = 'Re: %s' % subject
msg = EmailMultiAlternatives(
subject,
self.txt_body,
settings.SERVER_EMAIL,
(to,),
headers=headers
)
if self.html_body:
msg.attach_alternative(self.html_body, 'text/html')
return msg
def get_built_messages(self, to=None):
send_to = set(to or ())
send_to.update(self._send_to)
return [self.build(to=email, reply_to=send_to) for email in send_to]
def send(self, to=None, fail_silently=False):
messages = self.get_built_messages(to)
self.send_all(messages, fail_silently=fail_silently)
def send_all(self, messages, fail_silently=False):
connection = get_connection(fail_silently=fail_silently)
metrics.incr('email.sent', len(messages))
return connection.send_messages(messages)
def send_async(self, to=None):
from sentry.tasks.email import send_email
messages = self.get_built_messages(to)
for message in messages:
safe_execute(send_email.delay, message=message)
def inline_css(html):
return toronado.from_string(html)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A fake (in-memory) hypervisor+api.
Allows nova testing w/o a hypervisor. This module also documents the
semantics of real hypervisor connections.
"""
from oslo.config import cfg
from nova.compute import power_state
from nova.compute import task_states
from nova import db
from nova import exception
from nova.openstack.common import log as logging
from nova.virt import driver
from nova.virt import virtapi
CONF = cfg.CONF
CONF.import_opt('host', 'nova.netconf')
LOG = logging.getLogger(__name__)
_FAKE_NODES = None
def set_nodes(nodes):
"""Sets FakeDriver's node.list.
It has effect on the following methods:
get_available_nodes()
get_available_resource
get_host_stats()
To restore the change, call restore_nodes()
"""
global _FAKE_NODES
_FAKE_NODES = nodes
def restore_nodes():
"""Resets FakeDriver's node list modified by set_nodes().
Usually called from tearDown().
"""
global _FAKE_NODES
_FAKE_NODES = [CONF.host]
class FakeInstance(object):
def __init__(self, name, state):
self.name = name
self.state = state
def __getitem__(self, key):
return getattr(self, key)
class FakeDriver(driver.ComputeDriver):
capabilities = {
"has_imagecache": True,
"supports_recreate": True,
}
"""Fake hypervisor driver."""
def __init__(self, virtapi, read_only=False):
super(FakeDriver, self).__init__(virtapi)
self.instances = {}
self.host_status_base = {
'host_name-description': 'Fake Host',
'host_hostname': CONF.host,
'host_memory_total': 8000000000,
'host_memory_overhead': 10000000,
'host_memory_free': 7900000000,
'host_memory_free_computed': 7900000000,
'host_other_config': {},
'host_ip_address': '192.168.1.109',
'host_cpu_info': {},
'disk_available': 500000000000,
'disk_total': 600000000000,
'disk_used': 100000000000,
'host_uuid': 'cedb9b39-9388-41df-8891-c5c9a0c0fe5f',
'host_name_label': 'fake-host',
'hypervisor_hostname': CONF.host,
}
self._mounts = {}
self._interfaces = {}
if not _FAKE_NODES:
set_nodes([CONF.host])
def init_host(self, host):
return
def list_instances(self):
return self.instances.keys()
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
pass
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
pass
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
name = instance['name']
state = power_state.RUNNING
fake_instance = FakeInstance(name, state)
self.instances[name] = fake_instance
def live_snapshot(self, context, instance, name, update_task_state):
if instance['name'] not in self.instances:
raise exception.InstanceNotRunning(instance_id=instance['uuid'])
update_task_state(task_state=task_states.IMAGE_UPLOADING)
def snapshot(self, context, instance, name, update_task_state):
if instance['name'] not in self.instances:
raise exception.InstanceNotRunning(instance_id=instance['uuid'])
update_task_state(task_state=task_states.IMAGE_UPLOADING)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
pass
@staticmethod
def get_host_ip_addr():
return '192.168.0.1'
def set_admin_password(self, instance, new_pass):
pass
def inject_file(self, instance, b64_path, b64_contents):
pass
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
pass
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
pass
def unrescue(self, instance, network_info):
pass
def poll_rebooting_instances(self, timeout, instances):
pass
def migrate_disk_and_power_off(self, context, instance, dest,
instance_type, network_info,
block_device_info=None):
pass
def finish_revert_migration(self, instance, network_info,
block_device_info=None, power_on=True):
pass
def post_live_migration_at_destination(self, context, instance,
network_info,
block_migration=False,
block_device_info=None):
pass
def power_off(self, instance):
pass
def power_on(self, context, instance, network_info, block_device_info):
pass
def soft_delete(self, instance):
pass
def restore(self, instance):
pass
def pause(self, instance):
pass
def unpause(self, instance):
pass
def suspend(self, instance):
pass
def resume(self, instance, network_info, block_device_info=None):
pass
def destroy(self, instance, network_info, block_device_info=None,
destroy_disks=True):
key = instance['name']
if key in self.instances:
del self.instances[key]
else:
LOG.warning(_("Key '%(key)s' not in instances '%(inst)s'") %
{'key': key,
'inst': self.instances}, instance=instance)
def attach_volume(self, connection_info, instance, mountpoint):
"""Attach the disk to the instance at mountpoint using info."""
instance_name = instance['name']
if instance_name not in self._mounts:
self._mounts[instance_name] = {}
self._mounts[instance_name][mountpoint] = connection_info
return True
def detach_volume(self, connection_info, instance, mountpoint):
"""Detach the disk attached to the instance."""
try:
del self._mounts[instance['name']][mountpoint]
except KeyError:
pass
return True
def attach_interface(self, instance, image_meta, network_info):
for (network, mapping) in network_info:
if mapping['vif_uuid'] in self._interfaces:
raise exception.InterfaceAttachFailed('duplicate')
self._interfaces[mapping['vif_uuid']] = mapping
def detach_interface(self, instance, network_info):
for (network, mapping) in network_info:
try:
del self._interfaces[mapping['vif_uuid']]
except KeyError:
raise exception.InterfaceDetachFailed('not attached')
def get_info(self, instance):
if instance['name'] not in self.instances:
raise exception.InstanceNotFound(instance_id=instance['name'])
i = self.instances[instance['name']]
return {'state': i.state,
'max_mem': 0,
'mem': 0,
'num_cpu': 2,
'cpu_time': 0}
def get_diagnostics(self, instance_name):
return {'cpu0_time': 17300000000,
'memory': 524288,
'vda_errors': -1,
'vda_read': 262144,
'vda_read_req': 112,
'vda_write': 5778432,
'vda_write_req': 488,
'vnet1_rx': 2070139,
'vnet1_rx_drop': 0,
'vnet1_rx_errors': 0,
'vnet1_rx_packets': 26701,
'vnet1_tx': 140208,
'vnet1_tx_drop': 0,
'vnet1_tx_errors': 0,
'vnet1_tx_packets': 662,
}
def get_all_bw_counters(self, instances):
"""Return bandwidth usage counters for each interface on each
running VM.
"""
bw = []
return bw
def get_all_volume_usage(self, context, compute_host_bdms):
"""Return usage info for volumes attached to vms on
a given host.
"""
volusage = []
return volusage
def block_stats(self, instance_name, disk_id):
return [0L, 0L, 0L, 0L, None]
def interface_stats(self, instance_name, iface_id):
return [0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L]
def get_console_output(self, instance):
return 'FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE'
def get_vnc_console(self, instance):
return {'internal_access_path': 'FAKE',
'host': 'fakevncconsole.com',
'port': 6969}
def get_spice_console(self, instance):
return {'internal_access_path': 'FAKE',
'host': 'fakespiceconsole.com',
'port': 6969,
'tlsPort': 6970}
def get_console_pool_info(self, console_type):
return {'address': '127.0.0.1',
'username': 'fakeuser',
'password': 'fakepassword'}
def refresh_security_group_rules(self, security_group_id):
return True
def refresh_security_group_members(self, security_group_id):
return True
def refresh_instance_security_rules(self, instance):
return True
def refresh_provider_fw_rules(self):
pass
def get_available_resource(self, nodename):
"""Updates compute manager resource info on ComputeNode table.
Since we don't have a real hypervisor, pretend we have lots of
disk and ram.
"""
if nodename not in _FAKE_NODES:
return {}
dic = {'vcpus': 1,
'memory_mb': 8192,
'local_gb': 1028,
'vcpus_used': 0,
'memory_mb_used': 0,
'local_gb_used': 0,
'hypervisor_type': 'fake',
'hypervisor_version': '1.0',
'hypervisor_hostname': nodename,
'cpu_info': '?'}
return dic
def ensure_filtering_rules_for_instance(self, instance_ref, network_info):
"""This method is supported only by libvirt."""
raise NotImplementedError('This method is supported only by libvirt.')
def get_instance_disk_info(self, instance_name):
return
def live_migration(self, context, instance_ref, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
return
def check_can_live_migrate_destination_cleanup(self, ctxt,
dest_check_data):
return
def check_can_live_migrate_destination(self, ctxt, instance_ref,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
return {}
def check_can_live_migrate_source(self, ctxt, instance_ref,
dest_check_data):
return
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
return
def confirm_migration(self, migration, instance, network_info):
return
def pre_live_migration(self, context, instance_ref, block_device_info,
network_info, migrate_data=None):
return
def unfilter_instance(self, instance_ref, network_info):
"""This method is supported only by libvirt."""
raise NotImplementedError('This method is supported only by libvirt.')
def test_remove_vm(self, instance_name):
"""Removes the named VM, as if it crashed. For testing."""
self.instances.pop(instance_name)
def get_host_stats(self, refresh=False):
"""Return fake Host Status of ram, disk, network."""
stats = []
for nodename in _FAKE_NODES:
host_status = self.host_status_base.copy()
host_status['hypervisor_hostname'] = nodename
host_status['host_hostname'] = nodename
host_status['host_name_label'] = nodename
stats.append(host_status)
if len(stats) == 0:
raise exception.NovaException("FakeDriver has no node")
elif len(stats) == 1:
return stats[0]
else:
return stats
def host_power_action(self, host, action):
"""Reboots, shuts down or powers up the host."""
return action
def host_maintenance_mode(self, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
if not mode:
return 'off_maintenance'
return 'on_maintenance'
def set_host_enabled(self, host, enabled):
"""Sets the specified host's ability to accept new instances."""
if enabled:
return 'enabled'
return 'disabled'
def get_disk_available_least(self):
pass
def get_volume_connector(self, instance):
return {'ip': '127.0.0.1', 'initiator': 'fake', 'host': 'fakehost'}
def get_available_nodes(self):
return _FAKE_NODES
def instance_on_disk(self, instance):
return False
def list_instance_uuids(self):
return []
def legacy_nwinfo(self):
return True
class FakeVirtAPI(virtapi.VirtAPI):
def instance_update(self, context, instance_uuid, updates):
return db.instance_update_and_get_original(context,
instance_uuid,
updates)
def aggregate_get_by_host(self, context, host, key=None):
return db.aggregate_get_by_host(context, host, key=key)
def aggregate_metadata_add(self, context, aggregate, metadata,
set_delete=False):
return db.aggregate_metadata_add(context, aggregate['id'], metadata,
set_delete=set_delete)
def aggregate_metadata_delete(self, context, aggregate, key):
return db.aggregate_metadata_delete(context, aggregate['id'], key)
def security_group_get_by_instance(self, context, instance):
return db.security_group_get_by_instance(context, instance['uuid'])
def security_group_rule_get_by_security_group(self, context,
security_group):
return db.security_group_rule_get_by_security_group(
context, security_group['id'])
def provider_fw_rule_get_all(self, context):
return db.provider_fw_rule_get_all(context)
def agent_build_get_by_triple(self, context, hypervisor, os, architecture):
return db.agent_build_get_by_triple(context,
hypervisor, os, architecture)
def instance_type_get(self, context, instance_type_id):
return db.instance_type_get(context, instance_type_id)
|
|
#!/usr/bin/python
from __future__ import print_function
import argparse
import hashlib
import json
import logging
import os
import pipes
import sys
READ_SIZE = 1024 * 4
if not sys.version_info > (3, 0):
if not hasattr(sys, 'setdefaultencoding'):
reload(sys)
sys.setdefaultencoding('utf8')
class Error(Exception):
pass
class BadRoot(Error):
pass
class CannotCreateCacheFile(Error):
def __init__(self, cache_path):
self.cache_path = cache_path
def __str__(self):
return 'Cannot create cache file "%s"' % self.cache_path
class CacheItem(object):
"""Represents a file on disk with properties evaluated lazily."""
def __init__(self, abspath, mtime=None, size=None, checksum=None):
self.abspath = abspath
self._mtime = mtime
self._size = size
self._checksum = checksum
def __str__(self):
return '%(abspath)s: mtime=%(_mtime)s size=%(_size)s checksum=%(_checksum)s' % (self.__dict__)
def __repr__(self):
return str(self)
@property
def checksum(self):
if self._checksum:
return self._checksum
logging.info('calculating checksum for %s', self.abspath)
m = hashlib.sha1()
with open(self.abspath, 'rb') as f:
while True:
b = f.read(READ_SIZE)
if not b:
break
m.update(b)
self._checksum = m.hexdigest()
return self._checksum
def _stat(self):
stat = os.stat(self.abspath)
logging.debug('statting %s', self.abspath)
self._size = stat.st_size
self._mtime = int(stat.st_mtime)
def verify(self):
"""Verify the cache item hasn't changed.
For speed, only looks at mtime and size."""
if self._size is None or self._mtime is None:
logging.debug('%s does not have stats in the cache', self.abspath)
return False
logging.debug('checking if %s has changed', self.abspath)
stat = os.stat(self.abspath)
return self._size == stat.st_size and self._mtime == int(stat.st_mtime)
@property
def size(self):
if self._size is not None:
return self._size
self._stat()
return self._size
@property
def mtime(self):
if self._mtime is not None:
return self._mtime
self._stat()
return self._mtime
class CacheItemEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, CacheItem):
# Return the serialized item, and avoid evaluation of properties.
return [obj.abspath, obj._mtime, obj._size, obj._checksum]
return json.JSONEncoder.default(self, obj)
def hashwalk(root, cache=None):
"""Walk a tree and create CacheItems for files.
Args:
cache: optional dict keyed by abspath, containing CacheItems. Will be
modified.
Yields:
CacheItems found.
"""
if not os.path.exists(root):
raise BadRoot('%s does not exist' % root)
if not os.path.isdir(root):
raise BadRoot('%s is not a directory' % root)
logging.debug('walking %s', root)
for (dirpath, dirnames, filenames) in os.walk(root):
for filename in filenames:
abspath = os.path.join(dirpath, filename)
if os.path.islink(abspath):
if abspath in cache:
logging.debug('removing cache entry for %s', abspath)
del cache[abspath]
continue
# If the item is in the cache and it hasn't changed.
if cache is not None:
if abspath in cache and cache[abspath].verify():
logging.debug('cache hit for %s', abspath)
yield cache[abspath]
else:
logging.debug('cache miss for %s', abspath)
cache[abspath] = CacheItem(abspath)
yield cache[abspath]
else:
yield CacheItem(abspath)
def get_tree_filesizes(
root, cache, min_filesize=1024*1024*1024, show_source_dupes=True):
"""Walk a tree, get CacheItems.
Args:
root: root directory path
cache: optional dict keyed by abspath, containing CacheItems. Will be
modified.
min_filesize: minimum filesize in bytes to look at (default: 1GB)
show_source_dupes: report dupes in the source tree (default: True)
Returns:
Dictionary keyed by file size, containing lists of CacheItem objects.
"""
logging.info('walking %s...', root)
sizes = {}
num_items = 0
for cacheitem in hashwalk(root, cache=cache):
if cacheitem.size < min_filesize:
logging.info('skipping small file %s (size=%s < min=%s)',
cacheitem.abspath, cacheitem.size, min_filesize)
continue
num_items += 1
if show_source_dupes and cacheitem.size in sizes:
for other in sizes[cacheitem.size]:
logging.debug('found files with same size: %s and %s',
cacheitem.abspath, other.abspath)
if cacheitem.checksum == other.checksum:
# Print collisions in the tree
logging.warning('%s: %s == %s',
root,
cacheitem.abspath.replace(root, '', 1).lstrip('/'),
other.abspath.replace(root, '', 1).lstrip('/'))
sizes.setdefault(cacheitem.size, []).append(cacheitem)
logging.info('saw %d files and %d sizes in %s.', num_items, len(sizes), root)
return sizes
def print_cleanup_command(other, cleanup):
print('ln -sf %s %s' % (pipes.quote(other), pipes.quote(cleanup)))
def create_symlink(other, cleanup):
logging.info('creating link %s -> %s', cleanup, other)
os.unlink(cleanup)
os.symlink(other, cleanup)
def prompt_before_symlinking(other, cleanup):
sys.stdout.write('Create symlink %s -> %s? [Y/n] ' % (cleanup, other))
choice = raw_input().lower()
if choice.startswith('n'):
logging.info('Not creating symlink for %s', cleanup)
else:
create_symlink(other, cleanup)
def cleanup_tree(root, sizes, cache, callback):
logging.info('attempting to clean up %s', root)
num_deduped = 0
for cacheitem in hashwalk(root, cache):
if cacheitem.size not in sizes:
logging.debug('%s is unique', cacheitem)
continue
for other in sizes[cacheitem.size]:
if other.checksum != cacheitem.checksum:
continue
if os.path.realpath(other.abspath) == os.path.realpath(cacheitem.abspath):
logging.error("not symlinking %s to %s: they're the same file",
other.abspath, cacheitem.abspath)
else:
callback(other.abspath, cacheitem.abspath)
num_deduped += 1
break
logging.info('deduped %d files', num_deduped)
def load_cache(cache_path):
"""Read a dict of CacheItems from a JSON-encoded file.
Args:
cache_path: file to write
Returns:
dictionary keyed by abspath of CacheItems.
"""
cache = {}
if os.path.exists(cache_path):
with open(cache_path, 'r') as f:
for line in f:
values = json.loads(line)
cache_item = CacheItem(
values[0], mtime=values[1], size=values[2],
checksum=values[3])
cache[cache_item.abspath] = cache_item
else:
if not os.access(os.path.dirname(cache_path), os.W_OK):
raise CannotCreateCacheFile(cache_path)
logging.warning('cache file %s does not exist' % cache_path)
return cache
def write_cache(cache_path, cache):
"""Write a dictionary of CacheItems to a JSON-encoded file.
Args:
cache_path: file to write
cache: dictionary of CacheItems
"""
# Write to a temp file, then swap to the final destination.
tmp_output = cache_path + '.tmp'
with open(tmp_output, 'w') as f:
for cache_key in cache:
assert cache[cache_key].abspath == cache_key
print(json.dumps(cache[cache_key], cls=CacheItemEncoder), file=f)
os.rename(tmp_output, cache_path)
logging.info('wrote cache to %s', cache_path)
def main():
parser = argparse.ArgumentParser(
'Find duplicate files and turn half of them into symlinks.')
parser.add_argument(
'source', help='Directory root containing original files.')
parser.add_argument(
'cleanup',
help='Directory root containing duplicate files '
'to be turned into symlinks.')
parser.add_argument(
'--cache_file',
default=os.path.expanduser('~/.dedooper.cache'),
help='where to store the cache')
parser.add_argument(
'--show_source_dupes',
dest='show_source_dupes',
action='store_true',
help='Show duplicate files in the source directory')
parser.add_argument(
'--noshow_source_dupes',
dest='show_source_dupes',
action='store_false',
help="Don't show duplicate files in the source directory")
parser.set_defaults(show_source_dupes=False)
parser.add_argument(
'--dry_run',
dest='dry_run',
action='store_true',
help="Print commands, don't execute them")
parser.add_argument(
'--nodry_run',
dest='dry_run',
action='store_false',
help='Execute commands')
parser.set_defaults(dry_run=True)
parser.add_argument(
'--prompt',
dest='prompt',
action='store_true',
help='Prompt before creating symlinks')
parser.add_argument(
'--noprompt',
dest='prompt',
action='store_false',
help="Don't prompt before creating symlinks")
parser.set_defaults(prompt=True)
parser.add_argument(
'--log_level',
dest='log_level',
default=logging.INFO,
help='DEBUG, INFO, WARNING, ERROR, or CRITICAL (default: INFO)')
parser.add_argument(
'--min_filesize',
dest='min_filesize',
type=int,
default=1024*1024*300,
help='minimum filesize in bytes to look at (default: 300MB)')
args = parser.parse_args()
logging.basicConfig(
level=args.log_level,
format='%(levelname)-8s: %(message)s')
if os.path.realpath(args.source) == os.path.realpath(args.cleanup):
logging.critical('source dir is the same as cleanup dir')
sys.exit(1)
# Find out what's in the source tree.
cache = {}
try:
cache = load_cache(args.cache_file)
except CannotCreateCacheFile as e:
logging.critical(e)
sys.exit(1)
try:
src_filesizes = get_tree_filesizes(
os.path.abspath(args.source),
cache,
min_filesize=args.min_filesize,
show_source_dupes=args.show_source_dupes)
except BadRoot as e:
logging.critical(e)
sys.exit(1)
finally:
write_cache(args.cache_file, cache)
# Pick a cleanup strategy.
callback = print_cleanup_command
if not args.dry_run:
if args.prompt:
callback = prompt_before_symlinking
else:
callback = create_symlink
# Clean up the cleanup tree.
try:
cleanup_tree(
os.path.abspath(args.cleanup),
src_filesizes,
cache,
callback=callback)
except BadRoot as e:
logging.critical(e)
sys.exit(1)
finally:
write_cache(args.cache_file, cache)
if __name__ == '__main__':
main()
|
|
"""
Created on 17 Dec 2012
@author: Zehan Wang
"""
from __future__ import division
import os
import math
from numpy.lib.stride_tricks import as_strided as ast
from numpy.core.umath import logical_and
import numpy
from spatch.utilities.io import open_image, get_voxel_size
from spatch.utilities.misc import auto_non_background_labels
import mask
from transform import interpolate_to_shape
from spatialcontext import region_dict_from_dt_dict, get_dt_spatial_context_dict, \
EDT, GDT, GENERIC_SPATIAL_INFO_TYPES, get_generic_spatial_info
from transform import zero_out_boundary, image_boundary_expand
from intensity import rescale_data
__all__ = ["PatchMaker", "DataSetPatchMaker", "get_patches"]
class PatchMaker(object):
"""
Creates patches from an image
"""
def __init__(self, imagePath, labelsPath, imageExpand=False, specificLabels=None, boundaryLabels=None,
minValue=None, maxValue=None, dtLabelsPath=None, gdtImagePath=None,
is2D=False, rescaleIntensities=False):
"""
Constructor
if rescale: patches and spatial data will be rescaled to be in range [0, 100]
"""
self.fileName = os.path.basename(imagePath)
self.imagePath = imagePath
self.labelsPath = labelsPath
self.dtLabelsPath = dtLabelsPath
# self.image = open_image(imagePath)
# self.labelsData = numpy.int8(open_image(labelsPath))
self.imageExpand = imageExpand
self.minValue = minValue
self.maxValue = maxValue
self.specificLabels = specificLabels
self.boundaryLabels = boundaryLabels
self.is2D = is2D
self.rescaleIntensities = rescaleIntensities
self.gdtImagePath = gdtImagePath
def _get_labels_data(self):
labelsData = open_image(self.labelsPath)
if self.imageExpand:
labelsData = image_boundary_expand(labelsData, useGradient=False, is2D=self.is2D)
return labelsData
def _get_image_data(self):
imageData = open_image(self.imagePath)
if self.imageExpand:
imageData = image_boundary_expand(imageData, is2D=self.is2D)
return imageData
def _get_dt_image_data(self, spatialInfoType, imageData):
dtImage = None
if spatialInfoType == GDT:
if self.gdtImagePath is not None:
dtImage = open_image(self.gdtImagePath)
elif self.imageExpand:
dtImage = imageData[1:-1, 1:-1, 1:-1]
else:
dtImage = imageData
return dtImage
def _get_dt_voxel_size(self):
if self.gdtImagePath is not None:
return get_voxel_size(self.gdtImagePath)
else:
return get_voxel_size(self.labelsPath)
def get_patch_dict(self, patchSize, spatialWeight=0, boundaryDilation=None, spatialRegionLabels=None,
spatialRegionIndex=None, spatialLabels=None, dtSeeds=None,
labelErosion=0, boundaryClipSize=0, spatialInfoType=EDT, roiMask=None, separateSpatial=False,
includePatchSizeKey=False):
"""
@param patchSize: int or tuple to determine size of patch. if int, assume isotropic
@param spatialWeight: spatial weighting to apply if using spatial info
@param boundaryDilation: boundary around labels (if getting boundary refinement patches)
@param spatialRegionIndex: (index of region -> indexed by edt labels) or counts from 0 if spatial labels not
provided
@param spatialLabels: labels to use to get edt/gdt based spatial info, if none
@param labelErosion:
@param boundaryClipSize:
@param roiMask:
@return: a dictionary of {label: patches}
"""
imageData = self._get_image_data()
labelsData = open_image(self.labelsPath)
# put check in that transformed atlas actually overlaps
if labelsData.max() == 0:
return None
elif spatialLabels is not None:
uniqueLabels = numpy.unique(labelsData)
if set(uniqueLabels).intersection(set(spatialLabels)) == set():
return None
voxelSize = self._get_dt_voxel_size()
getBoundaryPatches = roiMask is None
if self.imageExpand:
try:
boundaryClipSize -= 1
except TypeError:
boundaryClipSize = numpy.asarray(boundaryClipSize) - 1
spatialLabelDict = None
spatialData = None
# get spatial information
if spatialWeight > 0 and spatialInfoType is not None:
if spatialInfoType in GENERIC_SPATIAL_INFO_TYPES:
spatialData = get_generic_spatial_info(imageData, spatialInfoType)
else:
if dtSeeds is not None:
dtLabelsData = dtSeeds
else:
dtLabelsData = labelsData
if self.dtLabelsPath is not None:
dtLabelsData = open_image(self.dtLabelsPath)
if dtLabelsData.shape != labelsData.shape:
dtLabelsData = interpolate_to_shape(dtLabelsData, labelsData.shape)
dtImage = self._get_dt_image_data(spatialInfoType, imageData)
spatialLabelDict = get_dt_spatial_context_dict(dtLabelsData, spatialInfoType,
spatialLabels=spatialLabels,
voxelSize=voxelSize, labelErosion=labelErosion,
boundaryClipSize=boundaryClipSize,
imageData=dtImage, is2D=self.is2D,
imageExpand=self.imageExpand)
spatialData = spatialLabelDict.values()
spatialData = numpy.asarray(spatialData) * spatialWeight
# get regional mask
if roiMask is not None:
roiMask = interpolate_to_shape(roiMask, imageData.shape)
if spatialRegionIndex is not None:
if spatialRegionLabels is None:
spatialRegionLabels = spatialLabels
if spatialLabelDict is None:
dtImage = self._get_dt_image_data(spatialInfoType, imageData)
dtLabelsData = labelsData
if self.dtLabelsPath is not None:
dtLabelsData = open_image(self.dtLabelsPath)
spatialLabelDict = get_dt_spatial_context_dict(dtLabelsData, spatialInfoType,
spatialLabels=spatialLabels,
voxelSize=voxelSize, labelErosion=labelErosion,
boundaryClipSize=boundaryClipSize,
imageData=dtImage, is2D=self.is2D,
imageExpand=self.imageExpand)
regionMask = region_dict_from_dt_dict(dict((l, spatialLabelDict[l]) for l in spatialRegionLabels),
regionalOverlap=boundaryDilation,
specificRegionIndex=spatialRegionIndex, is2D=self.is2D)
if roiMask is None:
roiMask = regionMask
else:
roiMask = logical_and(roiMask, regionMask)
# get overall label and data masks
if self.specificLabels is None:
self.specificLabels = numpy.unique(labelsData)
if self.boundaryLabels is None:
self.boundaryLabels = auto_non_background_labels(labelsData)
if self.imageExpand:
labelsData = image_boundary_expand(labelsData, useGradient=False, is2D=self.is2D)
labelMasks = mask.get_label_masks(labelsData, self.specificLabels)
boundarySize = boundaryDilation
if not getBoundaryPatches:
boundarySize = None
maskData = mask.get_data_mask(imageData, labelsData, boundarySize=boundarySize,
specificLabels=self.boundaryLabels, roiMask=roiMask,
minValue=self.minValue, maxValue=self.maxValue)
if maskData is not None:
labelMasks = [logical_and(m, maskData) for m in labelMasks]
if self.rescaleIntensities:
imageData = rescale_data(imageData, maskData=maskData)
patchDict = dict((self.specificLabels[i], get_patches(imageData, patchSize, labelMasks[i],
spatialData=spatialData, separateSpatial=separateSpatial))
for i in xrange(len(labelMasks)))
if includePatchSizeKey:
patchDict["patchSize"] = patchSize
return patchDict
def patch_view(data, patchSize=(3, 3, 3)):
"""Returns a view of overlapping patches from the data"""
if isinstance(patchSize, int):
patchSize = (patchSize,) * 3
elif len(patchSize) == 1:
patchSize = (patchSize[0],) * 3
else:
patchSize = tuple(patchSize)
shape = tuple(numpy.asarray(data.shape) - numpy.asarray(patchSize) + 1) + patchSize
strides = data.strides + data.strides
patchMatrix = ast(data, shape=shape, strides=strides)
return patchMatrix
def non_overlapping_patch_view(data, patchSize):
if isinstance(patchSize, int):
patchSize = (patchSize,) * 3
elif len(patchSize) == 1:
patchSize = (patchSize[0],) * 3
shape = tuple(numpy.int(numpy.asarray(data.shape) / numpy.asarray(patchSize))) + patchSize
strides = tuple(numpy.asarray(data.strides) * numpy.asarray(patchSize)) + data.strides
patchMatrix = ast(data, shape=shape, strides=strides)
return patchMatrix
def get_patches(imageData, patchSize, maskData=None, spatialData=None, separateSpatial=False,
verbose=False, overlapping=True):
if overlapping:
return get_patches_from_ast_data(patch_view(imageData, patchSize), maskData=maskData,
spatialData=spatialData, verbose=verbose, separateSpatial=separateSpatial)
else:
#TODO may need to trim maskData and spatial data if shape not exact multiple of patchSize
return get_patches_from_ast_data(non_overlapping_patch_view(imageData, patchSize), maskData=maskData,
spatialData=spatialData, verbose=verbose, separateSpatial=separateSpatial)
def get_non_overlapping_patches(imageData, patchSize, maskData=None, spatialData=None, verbose=False):
return get_patches_from_ast_data(non_overlapping_patch_view(imageData, patchSize), maskData=maskData,
spatialData=spatialData, verbose=verbose)
def get_patches_from_ast_data(imageData, maskData=None, spatialData=None, verbose=False, separateSpatial=False):
""""
Fast efficient patch extraction
Assumes imageData is patch_view on data
maskData is of the same shape as original image data (not as patch view)
Assumes spatialData is a list of 3D matrices if provided
If normaliseFeatures: imageData and spatialData will each be normalised to be in range [0, 100]
by max of imageData and spatialData respectively
"""
# align mask to strided data
offsetX = int(math.floor(imageData.shape[3] / 2))
offsetY = int(math.floor(imageData.shape[4] / 2))
offsetZ = int(math.floor(imageData.shape[5] / 2))
if maskData is None:
maskData = numpy.ones(numpy.asarray(imageData.shape[:3]) + numpy.asarray(imageData.shape[3:]) - 1, numpy.bool)
if offsetZ > 0:
alignedMask = maskData[offsetX:-offsetX, offsetY:-offsetY, offsetZ:-offsetZ]
else:
alignedMask = maskData[offsetX:-offsetX, offsetY:-offsetY]
numPatches = numpy.count_nonzero(alignedMask)
patches = imageData[alignedMask].reshape(numPatches, numpy.prod(imageData.shape[3:]))
if spatialData is not None:
# don't include anything outside boundaries
maskData = zero_out_boundary(maskData, (offsetX, offsetY, offsetZ))
spatialData = numpy.asarray(spatialData)
try:
spatialInfo = spatialData[..., maskData]
except:
print "Shapes:", spatialData.shape, maskData.shape
raise
spatialInfo = numpy.atleast_2d(numpy.transpose(spatialInfo))
# assumes returned spatialInfo will be in the same ordering as patches
if separateSpatial:
patches = (patches, spatialInfo)
else:
patches = numpy.append(patches, spatialInfo, 1)
if verbose:
print "Number of patches created:", numPatches
return patches
|
|
from collections import OrderedDict
from numpy import array, intersect1d, random, zeros
from pymc import database
from uncertainties import ufloat
from uncertainties.unumpy import uarray, nominal_values, std_devs
from Astro_Libraries.Abundances_Class import Chemical_Analysis
def import_plots_wording(pv):
Titles_dict = OrderedDict()
Colors_dict = OrderedDict()
Titles_dict['O_Regression'] = ('Oxygen abundance', 'Primordial Helium regression: Oxygen metallicity tracer', r'$\frac{O}{H}$', r'$Y_{\frac{O}{H}}$', "Oxigen", 1e-6)
Titles_dict['N_Regression'] = ('Nitrogen abundance', 'Primordial Helium regression: Nitrogen metallicity tracer', r'$\frac{N}{H}$', r'$Y_{\frac{N}{H}}$', "Nitrogen", 1e-5)
Titles_dict['S_Regression'] = ('Sulfur abundance', 'Primordial Helium regression: Sulfur metallicity tracer', r'$\frac{S}{H}$', r'$Y_{\frac{S}{H}}$', "Sulfur", 1e-5)
Titles_dict['S_ArCorr_Regression'] = ('Sulfur abundance with argon correction', 'Primordial Helium regression: Sulfur metallicity tracer', r'$\frac{S}{H}$', r'$Y_{\frac{S}{H}}$', "Sulfur", 1e-5)
Titles_dict['O_Regression_Inference'] = ('Oxygen abundance inference', 'Primordial Helium regression: Oxygen metallicity tracer', r'$\frac{O}{H}$', r'$Y_{\frac{O}{H}}$', "Oxigen", 1e-6)
Titles_dict['N_Regression_Inference'] = ('Nitrogen abundance inference', 'Primordial Helium regression: Nitrogen metallicity tracer', r'$\frac{N}{H}$', r'$Y_{\frac{N}{H}}$', "Nitrogen", 1e-5)
Titles_dict['S_Regression_Inference'] = ('Sulfur abundance inference', 'Primordial Helium regression: Sulfur metallicity tracer', r'$\frac{S}{H}$', r'$Y_{\frac{S}{H}}$', "Sulfur", 1e-5)
Titles_dict['S_ArCorr_Regression_Inference'] = ('Sulfur abundance with argon correction inference', 'Primordial Helium regression: Sulfur metallicity tracer', r'$\frac{S}{H}$', r'$Y_{\frac{S}{H}}$', "Sulfur", 1e-5)
Colors_dict['O_Regression'] = 1
Colors_dict['N_Regression'] = 2
Colors_dict['S_Regression'] = 3
Colors_dict['S_ArCorr_Regression'] = 3
Colors_dict['O_Regression_Inference'] = 1
Colors_dict['N_Regression_Inference'] = 2
Colors_dict['S_Regression_Inference'] = 3
Colors_dict['S_ArCorr_Regression_Inference'] = 3
return Titles_dict, Colors_dict
def import_data_from_objLog(FilesList, Objects_Include, pv):
List_Abundances = ['OI_HI', 'NI_HI', 'SI_HI', 'SI_HI_ArCorr', 'Y_Mass_O', 'Y_Mass_S', 'Y_Inference_O', 'Y_Inference_S']
#List_Abundances = ['OI_HI', 'NI_HI', 'SI_HI', 'SI_HI_ArCorr', 'Y_Mass_O', 'Y_Mass_S', 'Y_inf_O', 'Y_inf_S']
#Dictionary of dictionaries to store object abundances
Abund_dict = OrderedDict()
for abund in List_Abundances:
Abund_dict[abund] = OrderedDict()
#Loop through files
for i in range(len(FilesList)):
#Analyze file address
CodeName, FileName, FileFolder = pv.Analyze_Address(FilesList[i])
if CodeName in Objects_Include:
#Loop through abundances in the log
for abund in List_Abundances:
Abund_Mag = pv.GetParameter_ObjLog(CodeName, FileFolder, Parameter = abund, Assumption = 'float')
#If the abundance was measure store it
if Abund_Mag != None:
Abund_dict[abund][CodeName] = Abund_Mag
#Dictionary to store objects with abundances pairs for regressions.
#As an initial value for the keys we define the abundances we want to use for the regression
Abundances_Pairs_dict = OrderedDict()
Abundances_Pairs_dict['O_Regression'] = ('OI_HI','Y_Mass_O')
Abundances_Pairs_dict['N_Regression'] = ('NI_HI','Y_Mass_O')
Abundances_Pairs_dict['S_Regression'] = ('SI_HI','Y_Mass_S')
Abundances_Pairs_dict['S_ArCorr_Regression'] = ('SI_HI_ArCorr','Y_Mass_S')
Abundances_Pairs_dict['O_Regression_Inference'] = ('OI_HI','Y_Inference_O')
Abundances_Pairs_dict['N_Regression_Inference'] = ('NI_HI','Y_Inference_O')
Abundances_Pairs_dict['S_Regression_Inference'] = ('SI_HI','Y_Inference_S')
Abundances_Pairs_dict['S_ArCorr_Regression_Inference'] = ('SI_HI_ArCorr','Y_Inference_S')
#Loop through the regression lists and get objects with both abundances observed
for j in range(len(Abundances_Pairs_dict)):
#Get the elements keys for the regression
Vector, Elem_X, Elem_Y = Abundances_Pairs_dict.keys()[j], Abundances_Pairs_dict.values()[j][0], Abundances_Pairs_dict.values()[j][1]
#Determine objects with both abundances observed
Obj_vector = intersect1d(Abund_dict[Elem_X].keys(), Abund_dict[Elem_Y].keys(), assume_unique = True)
X_vector = zeros(len(Obj_vector))
Y_vector = zeros(len(Obj_vector))
X_vector_E = zeros(len(Obj_vector))
Y_vector_E = zeros(len(Obj_vector))
#Generate abundances vectors
for z in range(len(Obj_vector)):
X_vector[z] = nominal_values(Abund_dict[Elem_X][Obj_vector[z]])
X_vector_E[z] = std_devs(Abund_dict[Elem_X][Obj_vector[z]])
Y_vector[z] = nominal_values(Abund_dict[Elem_Y][Obj_vector[z]])
Y_vector_E[z] = std_devs(Abund_dict[Elem_Y][Obj_vector[z]])
Abundances_Pairs_dict[Vector] = (list(Obj_vector), uarray(X_vector, X_vector_E), uarray(Y_vector, Y_vector_E))
return Abundances_Pairs_dict
def import_data_from_objLog_pyneb(FilesList, Objects_Include, pv):
List_Abundances = ['OI_HI_pn', 'NI_HI_pn', 'SI_HI_pn', 'SI_HI_ArCorr_pn', 'Y_Mass_O_pn', 'Y_Mass_S_pn', 'Y_Inference_O_pn', 'Y_Inference_S_pn']
#List_Abundances = ['OI_HI', 'NI_HI', 'SI_HI', 'SI_HI_ArCorr', 'Y_Mass_O', 'Y_Mass_S', 'Y_inf_O', 'Y_inf_S']
#Dictionary of dictionaries to store object abundances
Abund_dict = OrderedDict()
for abund in List_Abundances:
Abund_dict[abund] = OrderedDict()
#Loop through files
for i in range(len(FilesList)):
#Analyze file address
CodeName, FileName, FileFolder = pv.Analyze_Address(FilesList[i])
if CodeName in Objects_Include:
#Loop through abundances in the log
for abund in List_Abundances:
Abund_Mag = pv.GetParameter_ObjLog(CodeName, FileFolder, Parameter = abund, Assumption = 'float')
#If the abundance was measure store it
if Abund_Mag != None:
Abund_dict[abund][CodeName] = Abund_Mag
#Dictionary to store objects with abundances pairs for regressions.
#As an initial value for the keys we define the abundances we want to use for the regression
Abundances_Pairs_dict = OrderedDict()
Abundances_Pairs_dict['O_Regression'] = ('OI_HI_pn','Y_Mass_O_pn')
Abundances_Pairs_dict['N_Regression'] = ('NI_HI_pn','Y_Mass_O_pn')
Abundances_Pairs_dict['S_Regression'] = ('SI_HI_pn','Y_Mass_S_pn')
Abundances_Pairs_dict['S_ArCorr_Regression'] = ('SI_HI_ArCorr_pn','Y_Mass_S_pn')
Abundances_Pairs_dict['O_Regression_Inference'] = ('OI_HI_pn','Y_Inference_O_pn')
Abundances_Pairs_dict['N_Regression_Inference'] = ('NI_HI_pn','Y_Inference_O_pn')
Abundances_Pairs_dict['S_Regression_Inference'] = ('SI_HI_pn','Y_Inference_S_pn')
Abundances_Pairs_dict['S_ArCorr_Regression_Inference'] = ('SI_HI_ArCorr_pn','Y_Inference_S_pn')
#Loop through the regression lists and get objects with both abundances observed
for j in range(len(Abundances_Pairs_dict)):
#Get the elements keys for the regression
Vector, Elem_X, Elem_Y = Abundances_Pairs_dict.keys()[j], Abundances_Pairs_dict.values()[j][0], Abundances_Pairs_dict.values()[j][1]
#Determine objects with both abundances observed
Obj_vector = intersect1d(Abund_dict[Elem_X].keys(), Abund_dict[Elem_Y].keys(), assume_unique = True)
X_vector = zeros(len(Obj_vector))
Y_vector = zeros(len(Obj_vector))
X_vector_E = zeros(len(Obj_vector))
Y_vector_E = zeros(len(Obj_vector))
#Generate abundances vectors
for z in range(len(Obj_vector)):
X_vector[z] = nominal_values(Abund_dict[Elem_X][Obj_vector[z]])
X_vector_E[z] = std_devs(Abund_dict[Elem_X][Obj_vector[z]])
Y_vector[z] = nominal_values(Abund_dict[Elem_Y][Obj_vector[z]])
Y_vector_E[z] = std_devs(Abund_dict[Elem_Y][Obj_vector[z]])
Abundances_Pairs_dict[Vector] = (list(Obj_vector), uarray(X_vector, X_vector_E), uarray(Y_vector, Y_vector_E))
return Abundances_Pairs_dict
def import_data_from_objLog_Triple(FilesList, Objects_Include, pv):
List_Abundances = ['OI_HI', 'NI_HI', 'SI_HI', 'SI_HI_ArCorr', 'Y_Mass_O', 'Y_Mass_S', 'Y_Inference_O', 'Y_Inference_S']
#List_Abundances = ['OI_HI', 'NI_HI', 'SI_HI', 'SI_HI_ArCorr', 'Y_Mass_O', 'Y_Mass_S', 'Y_inf_O', 'Y_inf_S']
#Dictionary of dictionaries to store object abundances
Abund_dict = OrderedDict()
for abund in List_Abundances:
Abund_dict[abund] = OrderedDict()
#Loop through files
for i in range(len(FilesList)):
#Analyze file address
CodeName, FileName, FileFolder = pv.Analyze_Address(FilesList[i])
if CodeName in Objects_Include:
#Loop through abundances in the log
for abund in List_Abundances:
Abund_Mag = pv.GetParameter_ObjLog(CodeName, FileFolder, Parameter = abund, Assumption = 'float')
#If the abundance was measure store it
if Abund_Mag != None:
Abund_dict[abund][CodeName] = Abund_Mag
#Dictionary to store objects with abundances pairs for regressions.
#As an initial value for the keys we define the abundances we want to use for the regression
Abundances_Pairs_dict = OrderedDict()
Abundances_Pairs_dict['O_Regression_Inference'] = ('OI_HI','Y_Inference_O')
Abundances_Pairs_dict['N_Regression_Inference'] = ('NI_HI','Y_Inference_O')
Abundances_Pairs_dict['S_ArCorr_Regression_Inference'] = ('SI_HI_ArCorr','Y_Inference_S')
#Loop through the regression lists and get objects with both abundances observed
for j in range(len(Abundances_Pairs_dict)):
#Get the elements keys for the regression
Vector, Elem_X, Elem_Y = Abundances_Pairs_dict.keys()[j], Abundances_Pairs_dict.values()[j][0], Abundances_Pairs_dict.values()[j][1]
#Determine objects with both abundances observed
Obj_vector = intersect1d(Abund_dict[Elem_X].keys(), Abund_dict[Elem_Y].keys(), assume_unique = True)
X_vector = zeros(len(Obj_vector))
Y_vector = zeros(len(Obj_vector))
X_vector_E = zeros(len(Obj_vector))
Y_vector_E = zeros(len(Obj_vector))
#Generate abundances vectors
for z in range(len(Obj_vector)):
X_vector[z] = nominal_values(Abund_dict[Elem_X][Obj_vector[z]])
X_vector_E[z] = std_devs(Abund_dict[Elem_X][Obj_vector[z]])
Y_vector[z] = nominal_values(Abund_dict[Elem_Y][Obj_vector[z]])
Y_vector_E[z] = std_devs(Abund_dict[Elem_Y][Obj_vector[z]])
Abundances_Pairs_dict[Vector] = (list(Obj_vector), uarray(X_vector, X_vector_E), uarray(Y_vector, Y_vector_E))
return Abundances_Pairs_dict
def Galaxy_sample():
# Objects_Include = ['70', 'SDSS2', '06', '08']
# Objects_Include = ['03', '04v2', '06', '08', '09', '10', '14', '24', '27', '71', 'SDSS1', '70', 'SDSS3', '51959-092', '51991-224', '52235-602', '52319-521', 'J2225', 'SHOC575v1', 'SHOC579']
# Objects_Include = ['03', '04v2', '06', '08', '09', '10', '14', '24', '27', '71', 'SDSS1', '70', 'SDSS3', '51959-092', '51991-224', '52235-602', '52319-521', 'J2225', 'SHOC575v2', 'SHOC579']
Objects_Include = ['03',
# '04v1', #Repeated
'04v2', #Repeated
'06',
'08',
'09',
# '10',
# '11',
'14',
'24',
'27',
# '70', #RepeatedSDSS2 #It is poorly loaded but why it is not loaded
'71',
'SDSS1',
'SDSS2',
'SDSS3',
# '51959-092',#Repeated11
'51991-224',
'52235-602',
'52319-521',
'52703-612v2', #Repeated24
'J2225',
# 'SHOC036',#Repeated06 No TSIII measurement
# 'SHOC575v1', #Repeated Poorly starlight fitting
# 'SHOC575v2',
# 'SHOC579',
'SHOC588', #RepeatedSDSS3 Helium abundance well fitted but sulfur worse SDSS3
# 'SHOC593', R_S3 = 6
]
return Objects_Include
def WMAP_Coordinates():
WMAP_coordinates = array([ufloat(0.0, 0.0), ufloat(0.24709, 0.00025)])
return WMAP_coordinates
def Get_Traces(pv, Regressiontype, Abundances_dict, catalogue_folder, database_extension):
Y_dist_dict = OrderedDict()
Metal_Dist_dict = OrderedDict()
Candidate_Objects = Abundances_dict[Regressiontype][0]
Element_abundance = Abundances_dict[Regressiontype][1]
Y_abundance = Abundances_dict[Regressiontype][2]
ch_an = Chemical_Analysis()
for i in range(len(Candidate_Objects)):
CodeName = Candidate_Objects[i]
Metallic_elem = Element_abundance[i]
Element = Regressiontype[0:Regressiontype.find('_')]
FileFolder = catalogue_folder + CodeName + '/'
db_Address = FileFolder + 'J0_' + CodeName + database_extension
#SI_HI = pv.GetParameter_ObjLog(CodeName, FileFolder, Parameter='SI_HI_ArCorr', Assumption='float')
HeIII_HII = pv.GetParameter_ObjLog(CodeName, FileFolder, Parameter='HeIII_HII', Assumption='float')
#Get y_plus trace
pymc_database = database.pickle.load(db_Address)
HeII_HII_trace = pymc_database.trace('He_abud')[:]
pymc_database.close()
#Generate the 5000 array from the distribution
HeII_HII_dist = random.choice(HeII_HII_trace, size=len(HeII_HII_trace)*2)
Elemental_dist = random.normal(Metallic_elem.nominal_value, Metallic_elem.std_dev, size = len(HeII_HII_trace)*2)
#Set to zero the HeIII ionic abundance if not observed
if HeIII_HII != None:
HeIII_HII_dist = random.normal(HeIII_HII.nominal_value, HeIII_HII.std_dev, size = len(HeII_HII_trace)*2)
else:
HeIII_HII_dist = zeros(len(HeII_HII_trace)*2)
#Calculate the HeI/HI distribution
HeI_dist = HeII_HII_dist + HeIII_HII_dist
#Calculate the Y distribution for the Hydrogen and helium abundance
if Regressiontype in ['S_Regression', 'S_ArCorr_Regression', 'S_Regression_Inference', 'S_ArCorr_Regression_Inference']:
OI_SI_dist = random.normal(ch_an.OI_SI.nominal_value, ch_an.OI_SI.std_dev, size = len(HeII_HII_trace)*2)
Y_dist_dict[CodeName] = (4 * HeI_dist * (1 - 20 * OI_SI_dist * Elemental_dist)) / (1 + 4 * HeI_dist) #WARNING WE ARE NOT TAKING INTO CONSIDERATION THE INCREASE IN ERROR DUE TO THE UNCERTAINTY IN OI_SI
Metal_Dist_dict[CodeName] = Elemental_dist
elif Regressiontype in ['O_Regression', 'O_Regression_Inference']:
Y_dist_dict[CodeName] = (4 * HeI_dist * (1 - 20 * Elemental_dist)) / (1 + 4 * HeI_dist)
Metal_Dist_dict[CodeName] = Elemental_dist
elif Regressiontype in ['N_Regression', 'N_Regression_Inference']:
OI_HI = pv.GetParameter_ObjLog(CodeName, FileFolder, Parameter='OI_HI', Assumption='float')
OI_HI_dist = random.normal(OI_HI.nominal_value, OI_HI.std_dev, size = len(HeII_HII_trace)*2)
Y_dist_dict[CodeName] = (4 * HeI_dist * (1 - 20 * OI_HI_dist)) / (1 + 4 * HeI_dist) #WARNING WE ARE NOT TAKING INTO CONSIDERATION THE INCREASE IN ERROR DUE TO THE UNCERTAINTY IN OI_SI
Metal_Dist_dict[CodeName] = Elemental_dist
return Metal_Dist_dict, Y_dist_dict
def Get_Traces_pn(pv, Regressiontype, Abundances_dict, catalogue_folder, database_extension):
Y_dist_dict = OrderedDict()
Metal_Dist_dict = OrderedDict()
Candidate_Objects = Abundances_dict[Regressiontype][0]
Element_abundance = Abundances_dict[Regressiontype][1]
Y_abundance = Abundances_dict[Regressiontype][2]
ch_an = Chemical_Analysis()
for i in range(len(Candidate_Objects)):
CodeName = Candidate_Objects[i]
Metallic_elem = Element_abundance[i]
Element = Regressiontype[0:Regressiontype.find('_')]
FileFolder = catalogue_folder + CodeName + '/'
db_Address = FileFolder + 'J0_' + CodeName + database_extension
#SI_HI = pv.GetParameter_ObjLog(CodeName, FileFolder, Parameter='SI_HI_ArCorr', Assumption='float')
HeIII_HII = pv.GetParameter_ObjLog(CodeName, FileFolder, Parameter='HeIII_HII', Assumption='float')
#Get y_plus trace
pymc_database = database.pickle.load(db_Address)
HeII_HII_trace = pymc_database.trace('He_abud')[:]
pymc_database.close()
#Generate the 5000 array from the distribution
HeII_HII_dist = random.choice(HeII_HII_trace, size=len(HeII_HII_trace)*2)
Elemental_dist = random.normal(Metallic_elem.nominal_value, Metallic_elem.std_dev, size = len(HeII_HII_trace)*2)
#Set to zero the HeIII ionic abundance if not observed
if HeIII_HII != None:
HeIII_HII_dist = random.normal(HeIII_HII.nominal_value, HeIII_HII.std_dev, size = len(HeII_HII_trace)*2)
else:
HeIII_HII_dist = zeros(len(HeII_HII_trace)*2)
#Calculate the HeI/HI distribution
HeI_dist = HeII_HII_dist + HeIII_HII_dist
#Calculate the Y distribution for the Hydrogen and helium abundance
if Regressiontype in ['S_Regression', 'S_ArCorr_Regression', 'S_Regression_Inference', 'S_ArCorr_Regression_Inference']:
OI_SI_dist = random.normal(ch_an.OI_SI.nominal_value, ch_an.OI_SI.std_dev, size = len(HeII_HII_trace)*2)
Y_dist_dict[CodeName] = (4 * HeI_dist * (1 - 20 * OI_SI_dist * Elemental_dist)) / (1 + 4 * HeI_dist) #WARNING WE ARE NOT TAKING INTO CONSIDERATION THE INCREASE IN ERROR DUE TO THE UNCERTAINTY IN OI_SI
Metal_Dist_dict[CodeName] = Elemental_dist
elif Regressiontype in ['O_Regression', 'O_Regression_Inference']:
Y_dist_dict[CodeName] = (4 * HeI_dist * (1 - 20 * Elemental_dist)) / (1 + 4 * HeI_dist)
Metal_Dist_dict[CodeName] = Elemental_dist
elif Regressiontype in ['N_Regression', 'N_Regression_Inference']:
OI_HI = pv.GetParameter_ObjLog(CodeName, FileFolder, Parameter='OI_HI_pn', Assumption='float')
OI_HI_dist = random.normal(OI_HI.nominal_value, OI_HI.std_dev, size = len(HeII_HII_trace)*2)
Y_dist_dict[CodeName] = (4 * HeI_dist * (1 - 20 * OI_HI_dist)) / (1 + 4 * HeI_dist) #WARNING WE ARE NOT TAKING INTO CONSIDERATION THE INCREASE IN ERROR DUE TO THE UNCERTAINTY IN OI_SI
Metal_Dist_dict[CodeName] = Elemental_dist
return Metal_Dist_dict, Y_dist_dict
|
|
from django.db import models
from django.core import validators
from django.contrib.sites.models import Site
__all__ = [
'State', 'Tag', 'Keyword', 'Link',
'PostalAddress', 'PhoneNumber',
'LocalBusiness', 'Website',
]
class State(models.Model):
name = models.CharField(
max_length=25
)
abbreviation = models.CharField(
max_length=2
)
def __str__(self):
return '%s (%s)' % (
self.name,
self.abbreviation
)
class Meta:
ordering = ('name',)
class Tag(models.Model):
name = models.CharField(
max_length=200
)
def __str__(self):
return self.name
class Keyword(models.Model):
name = models.CharField(
max_length=200
)
def __str__(self):
return self.name
class Link(models.Model):
name = models.CharField(
max_length=200
)
tags = models.ManyToManyField(
Tag,
blank=True
)
url = models.URLField(
max_length=500,
verbose_name='url'
)
def __str__(self):
return self.url
class PostalAddress(models.Model):
name = models.CharField(
max_length=200,
blank=True,
)
street = models.CharField(
max_length=200,
verbose_name='streetAddress'
)
locality = models.CharField(
max_length=200,
verbose_name='addressLocality'
)
state = models.ForeignKey(
State,
verbose_name='addressRegion',
null=True
)
postal_code = models.CharField(
max_length=200,
verbose_name='postalCode'
)
def __str__(self):
return "{0}\n{1},{2} {3}".format(
self.street, self.locality,
self.state.abbreviation,
self.postal_code
)
class Meta:
verbose_name_plural = "Postal Addresses"
class PhoneNumber(models.Model):
(MAIN, HOME, CELL,
WORK, FAX, OTHER,) = (
'MA', 'HO', 'CE',
'WO', 'FX', 'OT',
)
TYPE_CHOICES = (
(MAIN, 'Main'), (HOME, 'Home'),
(CELL, 'Cell'), (WORK, 'Work'),
(FAX, 'Fax'), (OTHER, 'Other'),
)
kind = models.CharField(
max_length=2,
choices=TYPE_CHOICES,
default=MAIN,
verbose_name='Type'
)
country_code = models.CharField(
max_length=3,
blank=True,
)
area_code = models.CharField(
max_length=3,
validators=[
validators.MinLengthValidator(3),
]
)
main_digits = models.CharField(
max_length=10,
validators=[
validators.MinLengthValidator(7),
]
)
def __str__(self):
return '({0}) {1}-{2}'.format(
self.area_code,
self.main_digits[:3],
self.main_digits[3:]
)
class Schema(models.Model):
context = models.CharField(
max_length=200,
verbose_name='@context',
default="http://schema.org"
)
class Meta:
abstract = True
class Thing(Schema):
name = models.CharField(
max_length=200,
verbose_name='name'
)
alt_name = models.CharField(
max_length=200,
verbose_name='alternateName',
blank=True
)
description = models.TextField(
verbose_name='description',
blank=True
)
links = models.ManyToManyField(
Link,
verbose_name='sameAs',
)
url = models.URLField(
verbose_name='url',
blank=True
)
def __str__(self):
return self.name
class Meta(Schema.Meta):
abstract = True
class Organization(Thing):
address = models.ForeignKey(
PostalAddress,
verbose_name='address'
)
telephone = models.ForeignKey(
PhoneNumber,
on_delete=models.CASCADE
)
logo = models.URLField(
verbose_name='logo',
blank=True
)
founder = models.CharField(
max_length=200,
verbose_name='founder'
)
class Meta(Thing.Meta):
abstract = True
class LocalBusiness(Organization):
email = models.EmailField(
verbose_name='email',
blank=True
)
class Meta:
verbose_name_plural = "Local Businesses"
class Website(models.Model):
site = models.ForeignKey(
Site,
on_delete=models.CASCADE
)
schema = models.ForeignKey(
LocalBusiness,
on_delete=models.CASCADE
)
keywords = models.ManyToManyField(
Keyword,
verbose_name='keywords'
)
def __str__(self):
return self.site.name
|
|
# Copyright 2014 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
from oslo_serialization import jsonutils
from oslo_utils import versionutils
from nova.db import api as db
from nova import exception
from nova.objects import base
from nova.objects import fields as obj_fields
from nova.virt import hardware
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class InstanceNUMACell(base.NovaEphemeralObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Add pagesize field
# Version 1.2: Add cpu_pinning_raw and topology fields
# Version 1.3: Add cpu_policy and cpu_thread_policy fields
# Version 1.4: Add cpuset_reserved field
VERSION = '1.4'
def obj_make_compatible(self, primitive, target_version):
super(InstanceNUMACell, self).obj_make_compatible(primitive,
target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (1, 4):
primitive.pop('cpuset_reserved', None)
if target_version < (1, 3):
primitive.pop('cpu_policy', None)
primitive.pop('cpu_thread_policy', None)
fields = {
'id': obj_fields.IntegerField(),
'cpuset': obj_fields.SetOfIntegersField(),
'memory': obj_fields.IntegerField(),
'pagesize': obj_fields.IntegerField(nullable=True,
default=None),
'cpu_topology': obj_fields.ObjectField('VirtCPUTopology',
nullable=True),
'cpu_pinning_raw': obj_fields.DictOfIntegersField(nullable=True,
default=None),
'cpu_policy': obj_fields.CPUAllocationPolicyField(nullable=True,
default=None),
'cpu_thread_policy': obj_fields.CPUThreadAllocationPolicyField(
nullable=True, default=None),
# These physical CPUs are reserved for use by the hypervisor
'cpuset_reserved': obj_fields.SetOfIntegersField(nullable=True,
default=None),
}
cpu_pinning = obj_fields.DictProxyField('cpu_pinning_raw')
def __len__(self):
return len(self.cpuset)
@classmethod
def _from_dict(cls, data_dict):
# NOTE(sahid): Used as legacy, could be renamed in
# _legacy_from_dict_ to the future to avoid confusing.
cpuset = hardware.parse_cpu_spec(data_dict.get('cpus', ''))
memory = data_dict.get('mem', {}).get('total', 0)
cell_id = data_dict.get('id')
pagesize = data_dict.get('pagesize')
return cls(id=cell_id, cpuset=cpuset,
memory=memory, pagesize=pagesize)
@property
def siblings(self):
cpu_list = sorted(list(self.cpuset))
threads = 0
if ('cpu_topology' in self) and self.cpu_topology:
threads = self.cpu_topology.threads
if threads == 1:
threads = 0
return list(map(set, zip(*[iter(cpu_list)] * threads)))
@property
def cpu_pinning_requested(self):
return self.cpu_policy == obj_fields.CPUAllocationPolicy.DEDICATED
def pin(self, vcpu, pcpu):
if vcpu not in self.cpuset:
return
pinning_dict = self.cpu_pinning or {}
pinning_dict[vcpu] = pcpu
self.cpu_pinning = pinning_dict
def pin_vcpus(self, *cpu_pairs):
for vcpu, pcpu in cpu_pairs:
self.pin(vcpu, pcpu)
def clear_host_pinning(self):
"""Clear any data related to how this cell is pinned to the host.
Needed for aborting claims as we do not want to keep stale data around.
"""
self.id = -1
self.cpu_pinning = {}
return self
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class InstanceNUMATopology(base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Takes into account pagesize
# Version 1.2: InstanceNUMACell 1.2
# Version 1.3: Add emulator threads policy
VERSION = '1.3'
def obj_make_compatible(self, primitive, target_version):
super(InstanceNUMATopology, self).obj_make_compatible(primitive,
target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (1, 3):
primitive.pop('emulator_threads_policy', None)
fields = {
# NOTE(danms): The 'id' field is no longer used and should be
# removed in the future when convenient
'id': obj_fields.IntegerField(),
'instance_uuid': obj_fields.UUIDField(),
'cells': obj_fields.ListOfObjectsField('InstanceNUMACell'),
'emulator_threads_policy': (
obj_fields.CPUEmulatorThreadsPolicyField(nullable=True)),
}
@classmethod
def obj_from_primitive(cls, primitive, context=None):
if 'nova_object.name' in primitive:
obj_topology = super(InstanceNUMATopology, cls).obj_from_primitive(
primitive, context=None)
else:
# NOTE(sahid): This compatibility code needs to stay until we can
# guarantee that there are no cases of the old format stored in
# the database (or forever, if we can never guarantee that).
obj_topology = InstanceNUMATopology._from_dict(primitive)
obj_topology.id = 0
return obj_topology
@classmethod
def obj_from_db_obj(cls, instance_uuid, db_obj):
primitive = jsonutils.loads(db_obj)
obj_topology = cls.obj_from_primitive(primitive)
if 'nova_object.name' not in db_obj:
obj_topology.instance_uuid = instance_uuid
# No benefit to store a list of changed fields
obj_topology.obj_reset_changes()
return obj_topology
# TODO(ndipanov) Remove this method on the major version bump to 2.0
@base.remotable
def create(self):
values = {'numa_topology': self._to_json()}
db.instance_extra_update_by_uuid(self._context, self.instance_uuid,
values)
self.obj_reset_changes()
@base.remotable_classmethod
def get_by_instance_uuid(cls, context, instance_uuid):
db_extra = db.instance_extra_get_by_instance_uuid(
context, instance_uuid, columns=['numa_topology'])
if not db_extra:
raise exception.NumaTopologyNotFound(instance_uuid=instance_uuid)
if db_extra['numa_topology'] is None:
return None
return cls.obj_from_db_obj(instance_uuid, db_extra['numa_topology'])
def _to_json(self):
return jsonutils.dumps(self.obj_to_primitive())
def __len__(self):
"""Defined so that boolean testing works the same as for lists."""
return len(self.cells)
@classmethod
def _from_dict(cls, data_dict):
# NOTE(sahid): Used as legacy, could be renamed in _legacy_from_dict_
# in the future to avoid confusing.
return cls(cells=[
InstanceNUMACell._from_dict(cell_dict)
for cell_dict in data_dict.get('cells', [])])
@property
def cpu_pinning(self):
"""Return a set of all host CPUs this NUMATopology is pinned to."""
return set(itertools.chain.from_iterable([
cell.cpu_pinning.values() for cell in self.cells
if cell.cpu_pinning]))
@property
def cpu_pinning_requested(self):
return all(cell.cpu_pinning_requested for cell in self.cells)
def clear_host_pinning(self):
"""Clear any data related to how instance is pinned to the host.
Needed for aborting claims as we do not want to keep stale data around.
"""
for cell in self.cells:
cell.clear_host_pinning()
return self
@property
def emulator_threads_isolated(self):
"""Determines whether emulator threads should be isolated"""
return (self.obj_attr_is_set('emulator_threads_policy') and
(self.emulator_threads_policy ==
obj_fields.CPUEmulatorThreadsPolicy.ISOLATE))
|
|
#!/usr/bin/env python
#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Regular expression based JavaScript parsing classes."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
import copy
import re
from closure_linter import javascripttokens
from closure_linter.common import matcher
from closure_linter.common import tokenizer
# Shorthand
Type = javascripttokens.JavaScriptTokenType
Matcher = matcher.Matcher
class JavaScriptModes(object):
"""Enumeration of the different matcher modes used for JavaScript."""
TEXT_MODE = 'text'
SINGLE_QUOTE_STRING_MODE = 'single_quote_string'
DOUBLE_QUOTE_STRING_MODE = 'double_quote_string'
BLOCK_COMMENT_MODE = 'block_comment'
DOC_COMMENT_MODE = 'doc_comment'
DOC_COMMENT_LEX_SPACES_MODE = 'doc_comment_spaces'
LINE_COMMENT_MODE = 'line_comment'
PARAMETER_MODE = 'parameter'
FUNCTION_MODE = 'function'
class JavaScriptTokenizer(tokenizer.Tokenizer):
"""JavaScript tokenizer.
Convert JavaScript code in to an array of tokens.
"""
# Useful patterns for JavaScript parsing.
IDENTIFIER_CHAR = r'A-Za-z0-9_$.'
# Number patterns based on:
# http://www.mozilla.org/js/language/js20-2000-07/formal/lexer-grammar.html
MANTISSA = r"""
(\d+(?!\.)) | # Matches '10'
(\d+\.(?!\d)) | # Matches '10.'
(\d*\.\d+) # Matches '.5' or '10.5'
"""
DECIMAL_LITERAL = r'(%s)([eE][-+]?\d+)?' % MANTISSA
HEX_LITERAL = r'0[xX][0-9a-fA-F]+'
NUMBER = re.compile(r"""
((%s)|(%s))
""" % (HEX_LITERAL, DECIMAL_LITERAL), re.VERBOSE)
# Strings come in three parts - first we match the start of the string, then
# the contents, then the end. The contents consist of any character except a
# backslash or end of string, or a backslash followed by any character, or a
# backslash followed by end of line to support correct parsing of multi-line
# strings.
SINGLE_QUOTE = re.compile(r"'")
SINGLE_QUOTE_TEXT = re.compile(r"([^'\\]|\\(.|$))+")
DOUBLE_QUOTE = re.compile(r'"')
DOUBLE_QUOTE_TEXT = re.compile(r'([^"\\]|\\(.|$))+')
START_SINGLE_LINE_COMMENT = re.compile(r'//')
END_OF_LINE_SINGLE_LINE_COMMENT = re.compile(r'//$')
START_DOC_COMMENT = re.compile(r'/\*\*')
START_BLOCK_COMMENT = re.compile(r'/\*')
END_BLOCK_COMMENT = re.compile(r'\*/')
BLOCK_COMMENT_TEXT = re.compile(r'([^*]|\*(?!/))+')
# Comment text is anything that we are not going to parse into another special
# token like (inline) flags or end comments. Complicated regex to match
# most normal characters, and '*', '{', '}', and '@' when we are sure that
# it is safe. Expression [^*{\s]@ must come first, or the other options will
# match everything before @, and we won't match @'s that aren't part of flags
# like in email addresses in the @author tag.
DOC_COMMENT_TEXT = re.compile(r'([^*{}\s]@|[^*{}@]|\*(?!/))+')
DOC_COMMENT_NO_SPACES_TEXT = re.compile(r'([^*{}\s]@|[^*{}@\s]|\*(?!/))+')
# Match the prefix ' * ' that starts every line of jsdoc. Want to include
# spaces after the '*', but nothing else that occurs after a '*', and don't
# want to match the '*' in '*/'.
DOC_PREFIX = re.compile(r'\s*\*(\s+|(?!/))')
START_BLOCK = re.compile('{')
END_BLOCK = re.compile('}')
REGEX_CHARACTER_CLASS = r"""
\[ # Opening bracket
([^\]\\]|\\.)* # Anything but a ] or \,
# or a backslash followed by anything
\] # Closing bracket
"""
# We ensure the regex is followed by one of the above tokens to avoid
# incorrectly parsing something like x / y / z as x REGEX(/ y /) z
POST_REGEX_LIST = [
';', ',', r'\.', r'\)', r'\]', '$', r'\/\/', r'\/\*', ':', '}']
REGEX = re.compile(r"""
/ # opening slash
(?!\*) # not the start of a comment
(\\.|[^\[\/\\]|(%s))* # a backslash followed by anything,
# or anything but a / or [ or \,
# or a character class
/ # closing slash
[gimsx]* # optional modifiers
(?=\s*(%s))
""" % (REGEX_CHARACTER_CLASS, '|'.join(POST_REGEX_LIST)),
re.VERBOSE)
ANYTHING = re.compile(r'.*')
PARAMETERS = re.compile(r'[^\)]+')
CLOSING_PAREN_WITH_SPACE = re.compile(r'\)\s*')
FUNCTION_DECLARATION = re.compile(r'\bfunction\b')
OPENING_PAREN = re.compile(r'\(')
CLOSING_PAREN = re.compile(r'\)')
OPENING_BRACKET = re.compile(r'\[')
CLOSING_BRACKET = re.compile(r'\]')
# We omit these JS keywords from the list:
# function - covered by FUNCTION_DECLARATION.
# delete, in, instanceof, new, typeof - included as operators.
# this - included in identifiers.
# null, undefined - not included, should go in some "special constant" list.
KEYWORD_LIST = ['break', 'case', 'catch', 'continue', 'default', 'do', 'else',
'finally', 'for', 'if', 'return', 'switch', 'throw', 'try', 'var',
'while', 'with']
# Match a keyword string followed by a non-identifier character in order to
# not match something like doSomething as do + Something.
KEYWORD = re.compile('(%s)((?=[^%s])|$)' % (
'|'.join(KEYWORD_LIST), IDENTIFIER_CHAR))
# List of regular expressions to match as operators. Some notes: for our
# purposes, the comma behaves similarly enough to a normal operator that we
# include it here. r'\bin\b' actually matches 'in' surrounded by boundary
# characters - this may not match some very esoteric uses of the in operator.
# Operators that are subsets of larger operators must come later in this list
# for proper matching, e.g., '>>' must come AFTER '>>>'.
OPERATOR_LIST = [',', r'\+\+', '===', '!==', '>>>=', '>>>', '==', '>=', '<=',
'!=', '<<=', '>>=', '<<', '>>', '>', '<', r'\+=', r'\+',
'--', '\^=', '-=', '-', '/=', '/', r'\*=', r'\*', '%=', '%',
'&&', r'\|\|', '&=', '&', r'\|=', r'\|', '=', '!', ':', '\?',
r'\bdelete\b', r'\bin\b', r'\binstanceof\b', r'\bnew\b',
r'\btypeof\b', r'\bvoid\b']
OPERATOR = re.compile('|'.join(OPERATOR_LIST))
WHITESPACE = re.compile(r'\s+')
SEMICOLON = re.compile(r';')
# Technically JavaScript identifiers can't contain '.', but we treat a set of
# nested identifiers as a single identifier.
NESTED_IDENTIFIER = r'[a-zA-Z_$][%s.]*' % IDENTIFIER_CHAR
IDENTIFIER = re.compile(NESTED_IDENTIFIER)
SIMPLE_LVALUE = re.compile(r"""
(?P<identifier>%s) # a valid identifier
(?=\s* # optional whitespace
\= # look ahead to equal sign
(?!=)) # not follwed by equal
""" % NESTED_IDENTIFIER, re.VERBOSE)
# A doc flag is a @ sign followed by non-space characters that appears at the
# beginning of the line, after whitespace, or after a '{'. The look-behind
# check is necessary to not match someone@google.com as a flag.
DOC_FLAG = re.compile(r'(^|(?<=\s))@(?P<name>[a-zA-Z]+)')
# To properly parse parameter names, we need to tokenize whitespace into a
# token.
DOC_FLAG_LEX_SPACES = re.compile(r'(^|(?<=\s))@(?P<name>%s)\b' %
'|'.join(['param']))
DOC_INLINE_FLAG = re.compile(r'(?<={)@(?P<name>[a-zA-Z]+)')
# Star followed by non-slash, i.e a star that does not end a comment.
# This is used for TYPE_GROUP below.
SAFE_STAR = r'(\*(?!/))'
COMMON_DOC_MATCHERS = [
# Find the end of the comment.
Matcher(END_BLOCK_COMMENT, Type.END_DOC_COMMENT,
JavaScriptModes.TEXT_MODE),
# Tokenize documented flags like @private.
Matcher(DOC_INLINE_FLAG, Type.DOC_INLINE_FLAG),
Matcher(DOC_FLAG_LEX_SPACES, Type.DOC_FLAG,
JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE),
# Encountering a doc flag should leave lex spaces mode.
Matcher(DOC_FLAG, Type.DOC_FLAG, JavaScriptModes.DOC_COMMENT_MODE),
# Tokenize braces so we can find types.
Matcher(START_BLOCK, Type.DOC_START_BRACE),
Matcher(END_BLOCK, Type.DOC_END_BRACE),
Matcher(DOC_PREFIX, Type.DOC_PREFIX, None, True)]
# The token matcher groups work as follows: it is an list of Matcher objects.
# The matchers will be tried in this order, and the first to match will be
# returned. Hence the order is important because the matchers that come first
# overrule the matchers that come later.
JAVASCRIPT_MATCHERS = {
# Matchers for basic text mode.
JavaScriptModes.TEXT_MODE: [
# Check a big group - strings, starting comments, and regexes - all
# of which could be intertwined. 'string with /regex/',
# /regex with 'string'/, /* comment with /regex/ and string */ (and so
# on)
Matcher(START_DOC_COMMENT, Type.START_DOC_COMMENT,
JavaScriptModes.DOC_COMMENT_MODE),
Matcher(START_BLOCK_COMMENT, Type.START_BLOCK_COMMENT,
JavaScriptModes.BLOCK_COMMENT_MODE),
Matcher(END_OF_LINE_SINGLE_LINE_COMMENT,
Type.START_SINGLE_LINE_COMMENT),
Matcher(START_SINGLE_LINE_COMMENT, Type.START_SINGLE_LINE_COMMENT,
JavaScriptModes.LINE_COMMENT_MODE),
Matcher(SINGLE_QUOTE, Type.SINGLE_QUOTE_STRING_START,
JavaScriptModes.SINGLE_QUOTE_STRING_MODE),
Matcher(DOUBLE_QUOTE, Type.DOUBLE_QUOTE_STRING_START,
JavaScriptModes.DOUBLE_QUOTE_STRING_MODE),
Matcher(REGEX, Type.REGEX),
# Next we check for start blocks appearing outside any of the items
# above.
Matcher(START_BLOCK, Type.START_BLOCK),
Matcher(END_BLOCK, Type.END_BLOCK),
# Then we search for function declarations.
Matcher(FUNCTION_DECLARATION, Type.FUNCTION_DECLARATION,
JavaScriptModes.FUNCTION_MODE),
# Next, we convert non-function related parens to tokens.
Matcher(OPENING_PAREN, Type.START_PAREN),
Matcher(CLOSING_PAREN, Type.END_PAREN),
# Next, we convert brackets to tokens.
Matcher(OPENING_BRACKET, Type.START_BRACKET),
Matcher(CLOSING_BRACKET, Type.END_BRACKET),
# Find numbers. This has to happen before operators because scientific
# notation numbers can have + and - in them.
Matcher(NUMBER, Type.NUMBER),
# Find operators and simple assignments
Matcher(SIMPLE_LVALUE, Type.SIMPLE_LVALUE),
Matcher(OPERATOR, Type.OPERATOR),
# Find key words and whitespace.
Matcher(KEYWORD, Type.KEYWORD),
Matcher(WHITESPACE, Type.WHITESPACE),
# Find identifiers.
Matcher(IDENTIFIER, Type.IDENTIFIER),
# Finally, we convert semicolons to tokens.
Matcher(SEMICOLON, Type.SEMICOLON)],
# Matchers for single quote strings.
JavaScriptModes.SINGLE_QUOTE_STRING_MODE: [
Matcher(SINGLE_QUOTE_TEXT, Type.STRING_TEXT),
Matcher(SINGLE_QUOTE, Type.SINGLE_QUOTE_STRING_END,
JavaScriptModes.TEXT_MODE)],
# Matchers for double quote strings.
JavaScriptModes.DOUBLE_QUOTE_STRING_MODE: [
Matcher(DOUBLE_QUOTE_TEXT, Type.STRING_TEXT),
Matcher(DOUBLE_QUOTE, Type.DOUBLE_QUOTE_STRING_END,
JavaScriptModes.TEXT_MODE)],
# Matchers for block comments.
JavaScriptModes.BLOCK_COMMENT_MODE: [
# First we check for exiting a block comment.
Matcher(END_BLOCK_COMMENT, Type.END_BLOCK_COMMENT,
JavaScriptModes.TEXT_MODE),
# Match non-comment-ending text..
Matcher(BLOCK_COMMENT_TEXT, Type.COMMENT)],
# Matchers for doc comments.
JavaScriptModes.DOC_COMMENT_MODE: COMMON_DOC_MATCHERS + [
Matcher(DOC_COMMENT_TEXT, Type.COMMENT)],
JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE: COMMON_DOC_MATCHERS + [
Matcher(WHITESPACE, Type.COMMENT),
Matcher(DOC_COMMENT_NO_SPACES_TEXT, Type.COMMENT)],
# Matchers for single line comments.
JavaScriptModes.LINE_COMMENT_MODE: [
# We greedy match until the end of the line in line comment mode.
Matcher(ANYTHING, Type.COMMENT, JavaScriptModes.TEXT_MODE)],
# Matchers for code after the function keyword.
JavaScriptModes.FUNCTION_MODE: [
# Must match open paren before anything else and move into parameter
# mode, otherwise everything inside the parameter list is parsed
# incorrectly.
Matcher(OPENING_PAREN, Type.START_PARAMETERS,
JavaScriptModes.PARAMETER_MODE),
Matcher(WHITESPACE, Type.WHITESPACE),
Matcher(IDENTIFIER, Type.FUNCTION_NAME)],
# Matchers for function parameters
JavaScriptModes.PARAMETER_MODE: [
# When in function parameter mode, a closing paren is treated specially.
# Everything else is treated as lines of parameters.
Matcher(CLOSING_PAREN_WITH_SPACE, Type.END_PARAMETERS,
JavaScriptModes.TEXT_MODE),
Matcher(PARAMETERS, Type.PARAMETERS, JavaScriptModes.PARAMETER_MODE)]}
# When text is not matched, it is given this default type based on mode.
# If unspecified in this map, the default default is Type.NORMAL.
JAVASCRIPT_DEFAULT_TYPES = {
JavaScriptModes.DOC_COMMENT_MODE: Type.COMMENT,
JavaScriptModes.DOC_COMMENT_LEX_SPACES_MODE: Type.COMMENT
}
def __init__(self, parse_js_doc = True):
"""Create a tokenizer object.
Args:
parse_js_doc: Whether to do detailed parsing of javascript doc comments,
or simply treat them as normal comments. Defaults to parsing JsDoc.
"""
matchers = self.JAVASCRIPT_MATCHERS
if not parse_js_doc:
# Make a copy so the original doesn't get modified.
matchers = copy.deepcopy(matchers)
matchers[JavaScriptModes.DOC_COMMENT_MODE] = matchers[
JavaScriptModes.BLOCK_COMMENT_MODE]
tokenizer.Tokenizer.__init__(self, JavaScriptModes.TEXT_MODE, matchers,
self.JAVASCRIPT_DEFAULT_TYPES)
def _CreateToken(self, string, token_type, line, line_number, values=None):
"""Creates a new JavaScriptToken object.
Args:
string: The string of input the token contains.
token_type: The type of token.
line: The text of the line this token is in.
line_number: The line number of the token.
values: A dict of named values within the token. For instance, a
function declaration may have a value called 'name' which captures the
name of the function.
"""
return javascripttokens.JavaScriptToken(string, token_type, line,
line_number, values)
|
|
######################################################################
# CliCon - crf.py #
# #
# Willie Boag wboag@cs.uml.edu #
# #
# Purpose: Implement CRF (using python-crfsuite) #
######################################################################
import sys
import os
import tempfile
import pycrfsuite
from tools import compute_performance_stats
from feature_extraction.read_config import enabled_modules
cliner_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
tmp_dir = os.path.join(cliner_dir, 'data', 'tmp')
def format_features(rows, labels=None):
retVal = []
# For each line
for i,line in enumerate(rows):
# For each word in the line
for j,features in enumerate(line):
# Nonzero dimensions
inds = features.nonzero()[1]
# If label exists
values = []
if labels:
values.append( str(labels[i][j]) )
# Value for each dimension
for k in inds:
values.append( '%d=%d' % (k, features[0,k]) )
retVal.append("\t".join(values).strip())
# Sentence boundary seperator
retVal.append('')
'''
# Sanity check
global count
if labels:
out_f = 'a.txt' + str(count)
start = 0 # 2
else:
out_f = 'b.txt' + str(count)
start = 0
count += 1
with open(out_f, 'w') as f:
for line in retVal:
print >>f, line[start:]
'''
return retVal
def pycrf_instances(fi, labeled):
xseq = []
yseq = []
# Skip first element
if labeled:
begin = 1
else:
begin = 0
for line in fi:
line = line.strip('\n')
if not line:
# An empty line presents an end of a sequence.
if labeled:
yield xseq, tuple(yseq)
else:
yield xseq
xseq = []
yseq = []
continue
# Split the line with TAB characters.
fields = line.split('\t')
# Append the item to the item sequence.
feats = fields[begin:]
xseq.append(feats)
# Append the label to the label sequence.
if labeled:
yseq.append(fields[0])
def train(X, Y, val_X=None, val_Y=None, test_X=None, test_Y=None):
'''
train()
Train a Conditional Random Field for sequence tagging.
@param X. List of sparse-matrix sequences. Each sequence is one sentence.
@param Y. List of sequence tags. Each sequence is the sentence's per-token tags.
@param val_X. More X data, but a heldout dev set.
@param val_Y. More Y data, but a heldout dev set.
@return A tuple of encoded parameter weights and hyperparameters for predicting.
'''
# Sanity Check detection: features & label
#with open('a','w') as f:
# for xline,yline in zip(X,Y):
# for x,y in zip(xline,yline):
# print >>f, y, '\t', x.nonzero()[1][0]
# print >>f
# Format features fot crfsuite
feats = format_features(X,Y)
# Create a Trainer object.
trainer = pycrfsuite.Trainer(verbose=False)
for xseq, yseq in pycrf_instances(feats, labeled=True):
trainer.append(xseq, yseq)
# Train the model
os_handle,tmp_file = tempfile.mkstemp(dir=tmp_dir, suffix="crf_temp")
trainer.train(tmp_file)
# Read the trained model into a string (so it can be pickled)
model = ''
with open(tmp_file, 'rb') as f:
model = f.read()
os.close(os_handle)
# Remove the temporary file
os.remove(tmp_file)
######################################################################
# information about fitting the model
scores = {}
# how well does the model fir the training data?
train_pred = predict(model, X)
train_stats = compute_performance_stats('train', train_pred, Y)
scores['train'] = train_stats
if val_X:
val_pred = predict(model, val_X)
val_stats = compute_performance_stats('dev', val_pred, val_Y)
scores['dev'] = val_stats
if test_X:
test_pred = predict(model, test_X)
test_stats = compute_performance_stats('test', test_pred, test_Y)
scores['test'] = test_stats
# keep track of which external modules were used for building this model!
scores['hyperparams'] = {}
enabled_mods = enabled_modules()
for module,enabled in enabled_mods.items():
e = bool(enabled)
scores['hyperparams'][module] = e
return model, scores
def predict(clf, X):
# Format features fot crfsuite
feats = format_features(X)
# Dump the model into a temp file
os_handle,tmp_file = tempfile.mkstemp(dir=tmp_dir, suffix="crf_temp")
with open(tmp_file, 'wb') as f:
clf_byte = bytearray(clf, 'latin1')
f.write(clf_byte)
# Create the Tagger object
tagger = pycrfsuite.Tagger()
tagger.open(tmp_file)
# Remove the temp file
os.close(os_handle)
os.remove(tmp_file)
# Tag the sequence
retVal = []
Y = []
for xseq in pycrf_instances(feats, labeled=False):
yseq = [ int(n) for n in tagger.tag(xseq) ]
retVal += list(yseq)
Y.append(list(yseq))
# Sanity Check detection: feature & label predictions
#with open('a','w') as f:
# for x,y in zip(xseq,Y):
# x = x[0]
# print >>f, y, '\t', x[:-2]
# print >>f
return Y
|
|
import os
import subprocess
import re
### Used in both YAN01_00015 and YAN01_00016
objdump_path = "/usr/i386-linux-cgc/bin/objdump"
objdump_options = ["-d", "--insn-width=20"] #we use a long insn width so we can figure out the length of the insn
objdump_header_options = ["-h"]
dump_ext = ".dump"
bin_path = "bin"
build_path = "build"
cb = "YAN01_00015"
cb_path = os.path.join(bin_path, cb)
cb_dump_path = os.path.join(build_path, cb + dump_ext)
patched_path = os.path.join(bin_path, cb + "_patched")
patched_dump_path = os.path.join(build_path, cb + "_patched" + dump_ext)
def getDumps(inFile_path, outFile_path, dumpCmd_path, dumpCmd_options, bForce = False) :
if bForce or not os.path.exists(outFile_path) :
outFile = open(outFile_path, "w")
ret = 0
try :
ret = subprocess.call([dumpCmd_path] + dumpCmd_options + [inFile_path], stdout=outFile)
except OSError :
ret = -1
outFile.close()
if ret != 0 :
os.remove(outFile_path)
def getObjdumps(bForce = False) :
getDumps(cb_path, cb_dump_path, objdump_path, objdump_options, bForce)
getDumps(patched_path, patched_dump_path, objdump_path, objdump_options, bForce)
header_ext = ".header"
cb_header_path = os.path.join(build_path, cb + header_ext)
patched_header_path = os.path.join(build_path, cb + "_patched" + header_ext)
def getObjdumpHeaders(bForce = False) :
getDumps(cb_path, cb_header_path, objdump_path, objdump_header_options, bForce)
getDumps(patched_path, patched_header_path, objdump_path, objdump_header_options, bForce)
diff_ext = ".diff"
diffFile_path = os.path.join(build_path, cb + diff_ext)
def getDiffFile(bForce = False) :
if os.path.exists(diffFile_path) and not bForce:
return
if not os.path.exists(cb_dump_path) or not os.path.exists(patched_dump_path) or bForce:
getObjdumps(bForce)
assert os.path.exists(cb_dump_path) and os.path.exists(patched_dump_path)
outFile = open(diffFile_path, "w")
ret = 0
try :
ret = subprocess.call(["/usr/bin/diff", cb_dump_path, patched_dump_path], stdout=outFile)
except OSError :
ret = -1
outFile.close()
#if ret != 0 :
if ret != 1 : #seems like diff returns 1 on success?????
os.remove(diffFile_path)
offsets_ext = ".off"
offsets_path = os.path.join(build_path, cb + offsets_ext)
def processDiffToOffsets(out_path, df_path, getDiffFunc, bForce = False) :
if os.path.exists(out_path) and not bForce :
return #nothing to do
if not os.path.exists(df_path) or bForce:
getDiffFunc(bForce)
assert os.path.exists(df_path)
#now that we have the diff file, lets just process it
outFile = open(out_path, "w")
for l in open(df_path) :
mat = re.match("[\<\>]\s+([0-9a-fA-F]+):\s+([0-9a-fA-F ]+)\t", l)
if mat :
print "MATCH: " + l
outFile.write(mat.group(1) + ':' + mat.group(2) + '\n')
else :
print "NO MATCH: " + l
pass
outFile.close()
hexdump_path = "hexdump"
hexdump_options = ['-e', '" %08_ax:" 16/1 " %02x""\t\n"'] #address then colon then the raw bytes -similar to objdump output (notice the \t)
hd_diff_ext = ".hddiff"
hd_ext = ".hexdump"
hd_offsets_ext = ".hdoff"
hd_offsets_path = os.path.join(build_path, cb + hd_offsets_ext)
hd_diffFile_path = os.path.join(build_path, cb + hd_diff_ext)
cb_hd_path = os.path.join(build_path, cb + hd_ext)
patched_hd_path = os.path.join(build_path, cb + "_patched" + hd_ext)
def getHexdumps(bForce = False) :
getDumps(cb_path, cb_hd_path, hexdump_path, hexdump_options, bForce)
getDumps(patched_path, patched_hd_path, hexdump_path, hexdump_options, bForce)
def getHDDiffFile(bForce = False) :
if os.path.exists(hd_diffFile_path) and not bForce :
return
if not os.path.exists(cb_hd_path) or not os.path.exists(patched_hd_path) or bForce :
getHexdumps(bForce)
assert os.path.exists(cb_hd_path) and os.path.exists(patched_hd_path)
outFile = open(hd_diffFile_path, "w")
ret = 0
try :
ret = subprocess.call(["/usr/bin/diff", cb_hd_path, patched_hd_path], stdout=outFile)
except OSError :
ret = -1
outFile.close()
#if ret != 0 :
if ret != 1 : #seems like diff returns 1 on success?????
os.remove(hd_diffFile_path)
def loadOffsets(off_path, diff_path, getDiffFunc, bForce = False) :
processDiffToOffsets(off_path, diff_path, getDiffFunc, bForce)
try :
if os.path.getsize(off_path) == 0 : #there MUST be differences
processDiffToOffsets(off_path, diff_path, getDiffFunc, True)
except OSError : #we get an OSError if the file doesn't exist
pass #nothing to do because we will check on the file afterwards
assert os.path.exists(off_path)
ret = {}
#now that we have the offsets file, just read it into a dict and return
i = 1
for l in open(off_path) :
elems = l.split(':')
if len(elems) != 2 :
print "WARNING: Can't Process this line @ %u [%s]" % (i, l)
else :
k = elems[0]
v = len("".join(elems[1].split())) / 2
if k not in ret :
ret[k] = v
else :
if v > ret[k] :
ret[k] = v
i += 1
return ret
def loadObjdumpOffsets(bForce = False) :
return loadOffsets(offsets_path, diffFile_path, getDiffFile, bForce)
def loadHexdumpOffsets(bForce = False) :
return loadOffsets(hd_offsets_path, hd_diffFile_path, getHDDiffFile, bForce)
def getHexdumpDifferences(bForce = False) :
processDiffToOffsets(hd_offsets_path, hd_diffFile_path, getHDDiffFile, bForce)
try :
if os.path.getsize(hd_offsets_path) == 0 : #there MUST be differences
processDiffToOffsets(hd_offsets_path, hd_diffFile_path, getHDDiffFile, True)
except OSError : #we get an OSError if the file doesn't exist
pass #nothing to do because we will check on the file afterwards
assert os.path.exists(hd_offsets_path)
ret = {}
curOffsets = {}
#now that we have the offsets file, just read it into a dict and return
i = 1
for l in open(hd_offsets_path) :
elems = l.split(':')
if len(elems) != 2 :
print "WARNING: Can't Process this line @ %u [%s]" % (i, l)
else :
k = elems[0]
v = elems[1]
if k not in curOffsets :
curOffsets[k] = v
else : #since it already exists, lets go through the values to see which bytes are different
ki = int(k,16) #let this except and die if necessary
va = v.split()
ca = curOffsets[k].split()
assert len(va) == len(ca) #both should be 16 bytes...? what if its at the end? - will handle it later perhaps
j = 0
bCurDiff = False
baseAddr = ki
endAddr = ki
while j < len(va) :
if va[j] != ca[j] :
if bCurDiff :
#if they are still different then update the end Addr
#endAddr += 1 #already done automatically
pass
else :
#if they are different, but didn't used to be then update the base addr and set the flag
baseAddr = ki + j
bCurDiff = True
else :
#now if they are the same
if bCurDiff :
#if they used to be different, then that means we have new range so add to ret
ret[baseAddr] = endAddr - baseAddr
baseAddr = endAddr
else :
#if they used to be the same, then just update the base addr
baseAddr += 1
bCurDiff = False
j += 1
endAddr += 1
#if we are all the way here then we need to make sure that there wasn't some difference
# we missed at the end of the line
if bCurDiff :
ret[baseAddr] = endAddr - baseAddr
i += 1
return ret
def parseHeaderOffsets(filename) :
temp = {}
for l in open(filename) :
mat = re.match("\s+\d+\s+(.[\w]+)\s+([0-9a-fA-F]+)\s+([0-9a-fA-F]+)\s+[0-9a-fA-F]+\s+([0-9a-fA-F]+)", l)
if mat :
if mat.group(1) in temp :
print "WARNING: Somehow the section [%s] already exists?" % mat.group(1)
#try :
temp[mat.group(1)] = (int(mat.group(2),16), int(mat.group(3),16), int(mat.group(4),16))
#except ValueError : I want the exception to stop things
else :
pass
return temp
def loadObjdumpHeaderOffsets(bForce = False) :
getObjdumpHeaders(bForce)
try :
if os.path.getsize(cb_header_path) == 0 or os.path.getsize(patched_header_path) == 0 :
assert False
except OSError :
assert False
header1 = parseHeaderOffsets(cb_header_path)
header2 = parseHeaderOffsets(patched_header_path)
return header1, header2
'''
objOffsets = loadObjdumpOffsets()
hexOffsets = loadHexdumpOffsets()
print objOffsets
print hexOffsets
h1, h2 = loadObjdumpHeaderOffsets()
print h1
print h2
print getHexdumpDifferences()
'''
#now that we have the dumps (or should have the dumps) we can do a diff
|
|
import atexit
from concurrent.futures import ThreadPoolExecutor
from math import log, ceil
from tempfile import TemporaryFile
import numpy as np
import scipy
import time
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils import check_array, check_random_state, gen_batches
from sklearn.utils.validation import check_is_fitted
from modl.utils import get_sub_slice
from modl.utils.randomkit import RandomState
from modl.utils.randomkit import Sampler
from .dict_fact_fast import _enet_regression_multi_gram, \
_enet_regression_single_gram, _update_G_average, _batch_weight
from ..utils.math.enet import enet_norm, enet_projection, enet_scale
MAX_INT = np.iinfo(np.int64).max
class CodingMixin(TransformerMixin):
def _set_coding_params(self,
n_components,
code_alpha=1,
code_l1_ratio=1,
tol=1e-2,
max_iter=100,
code_pos=False,
random_state=None,
n_threads=1
):
self.n_components = n_components
self.code_l1_ratio = code_l1_ratio
self.code_alpha = code_alpha
self.code_pos = code_pos
self.random_state = random_state
self.tol = tol
self.max_iter = max_iter
self.n_threads = n_threads
if self.n_threads > 1:
self._pool = ThreadPoolExecutor(n_threads)
def transform(self, X):
"""
Compute the codes associated to input matrix X, decomposing it onto
the dictionary
Parameters
----------
X: ndarray, shape = (n_samples, n_features)
Returns
-------
code: ndarray, shape = (n_samples, n_components)
"""
check_is_fitted(self, 'components_')
dtype = self.components_.dtype
X = check_array(X, order='C', dtype=dtype.type)
if X.flags['WRITEABLE'] is False:
X = X.copy()
n_samples, n_features = X.shape
if not hasattr(self, 'G_agg') or self.G_agg != 'full':
G = self.components_.dot(self.components_.T)
else:
G = self.G_
Dx = X.dot(self.components_.T)
code = np.ones((n_samples, self.n_components), dtype=dtype)
sample_indices = np.arange(n_samples)
size_job = ceil(n_samples / self.n_threads)
batches = list(gen_batches(n_samples, size_job))
par_func = lambda batch: _enet_regression_single_gram(
G, Dx[batch], X[batch], code,
get_sub_slice(sample_indices, batch),
self.code_l1_ratio, self.code_alpha, self.code_pos,
self.tol, self.max_iter)
if self.n_threads > 1:
res = self._pool.map(par_func, batches)
_ = list(res)
else:
_enet_regression_single_gram(
G, Dx, X, code,
sample_indices,
self.code_l1_ratio, self.code_alpha, self.code_pos,
self.tol, self.max_iter)
return code
def score(self, X):
"""
Objective function value on test data X
Parameters
----------
X: ndarray, shape=(n_samples, n_features)
Input matrix
Returns
-------
score: float, positive
"""
check_is_fitted(self, 'components_')
code = self.transform(X)
loss = np.sum((X - code.dot(self.components_)) ** 2) / 2
norm1_code = np.sum(np.abs(code))
norm2_code = np.sum(code ** 2)
regul = self.code_alpha * (norm1_code * self.code_l1_ratio
+ (1 - self.code_l1_ratio) * norm2_code / 2)
return (loss + regul) / X.shape[0]
def __getstate__(self):
state = dict(self.__dict__)
state.pop('_pool', None)
return state
def __setstate__(self, state):
self.__dict__ = state
if self.n_threads > 1:
self._pool = ThreadPoolExecutor(self.n_threads)
class DictFact(CodingMixin, BaseEstimator):
def __init__(self,
reduction=1,
learning_rate=1,
sample_learning_rate=0.76,
Dx_agg='masked',
G_agg='masked',
optimizer='variational',
dict_init=None,
code_alpha=1,
code_l1_ratio=1,
comp_l1_ratio=0,
step_size=1,
tol=1e-2,
max_iter=100,
code_pos=False,
comp_pos=False,
random_state=None,
n_epochs=1,
n_components=10,
batch_size=10,
verbose=0,
callback=None,
n_threads=1,
rand_size=True,
replacement=True,
):
"""
Estimator to perform matrix factorization by streaming samples and
subsampling them randomly to increase speed. Solve for
argmin_{comp_l1_ratio ||D^j ||_1
+ (1 - comp_l1_ratio) || D^j ||_2^2 < 1, A}
1 / 2 || X - D A ||_2
+ code_alpha ((1 - code_l1_ratio) || A ||_2 / 2
+ code_l1_ratio || A ||_1)
References
----------
'Massive Online Dictionary Learning'
A. Mensch, J. Mairal, B. Thrion, G. Varoquaux, ICML '16
'Subsampled Online Matrix Factorization with Convergence Guarantees
A. Mensch, J. Mairal, G. Varoquaux, B. Thrion, OPT@NIPS '16
Parameters
----------
reduction: float
Ratio of reduction in accessing the features of the data stream.
The larger, the _faster the algorithm will go over data.
Too large reduction may lead to slower convergence.
learning_rate: float in ]0.917, 1]
Weights to use in learning the dictionary. 1 means no forgetting,
lower means forgetting the past _faster, 0.917 is the theoretical
limit for convergence.
sample_learning_rate: float in ]0.75, 3 * learning_rate - 2[
Weights to use in reducing the variance due to the stochastic
subsampling, when Dx_agg == 'average' or G_agg == 'average'.
Lower means forgetting the past _faster
Dx_agg: str in ['full', 'average', 'masked']
Estimator to use in estimating D^T x_t
G_agg: str in ['full', 'average', 'masked']
Estimator to use in estimating the Gram matrix D^T D
code_alpha: float, positive
Penalty applied to the code in the minimization problem
code_l1_ratio: float in [0, 1]
Ratio of l1 penalty for the code in the minimization problem
dict_init: ndarray, shape = (n_components, n_features)
Initial dictionary
n_epochs: int
Number of epochs to perform over data
n_components: int
Number of pipelining in the dictionary
batch_size: int
Size of mini-batches to use
code_pos: boolean,
Learn a positive code
comp_pos: boolean,
Learn a positive dictionary
random_state: np.random.RandomState or int
Seed randomness in the learning algorithm
comp_l1_ratio: float in [0, 1]
Ratio of l1 in the dictionary constraint
verbose: int, positive
Control the verbosity of the estimator
callback: callable,
Function called from time to time with local variables
n_threads: int
Number of processors to use in the algorithm
tol: float, positive
Tolerance for the elastic-net solver
max_iter: int, positive
Maximum iteration for the elastic-net solver
rand_size: boolean
Whether the masks should have fixed size
replacement: boolean
Whether to compute random or cycling masks
Attributes
----------
self.components_: ndarray, shape = (n_components, n_features)
Current estimation of the dictionary
self.code_: ndarray, shape = (n_samples, n_components)
Current estimation of each sample code
self.C_: ndarray, shape = (n_components, n_components)
For computing D gradient
self.B_: ndarray, shape = (n_components, n_features)
For computing D gradient
self.gradient_: ndarray, shape = (n_components, n_features)
D gradient, to perform block coordinate descent
self.G_: ndarray, shape = (n_components, n_components)
Gram matrix
self.Dx_average_: ndarray, shape = (n_samples, n_components)
Current estimate of D^T X
self.G_average_: ndarray, shape =
(n_samples, n_components, n_components)
Averaged previously seen subsampled Gram matrix. Memory-mapped
self.n_iter_: int
Number of seen samples
self.sample_n_iter_: int
Number of time each sample has been seen
self.verbose_iter_: int
List of verbose iteration
self.feature_sampler_: Sampler
Generator of masks
"""
self.batch_size = batch_size
self.learning_rate = learning_rate
self.sample_learning_rate = sample_learning_rate
self.Dx_agg = Dx_agg
self.G_agg = G_agg
self.reduction = reduction
self.dict_init = dict_init
self._set_coding_params(n_components,
code_l1_ratio=code_l1_ratio,
code_alpha=code_alpha,
code_pos=code_pos,
random_state=random_state,
tol=tol,
max_iter=max_iter,
n_threads=n_threads)
self.comp_l1_ratio = comp_l1_ratio
self.comp_pos = comp_pos
self.optimizer = optimizer
self.step_size = step_size
self.n_epochs = n_epochs
self.verbose = verbose
self.callback = callback
self.n_threads = n_threads
self.rand_size = rand_size
self.replacement = replacement
def fit(self, X):
"""
Compute the factorisation X ~ code_ x components_, solving for
D, code_ = argmin_{r2 ||D^j ||_1 + (1 - r2) || D^j ||_2^2 < 1}
1 / 2 || X - D A ||_2 + (1 - r) || A ||_2 / 2 + r || A ||_1
Parameters
----------
X: ndarray, shape= (n_samples, n_features)
Returns
-------
self
"""
X = check_array(X, order='C', dtype=[np.float32, np.float64])
if self.dict_init is None:
dict_init = X
else:
dict_init = check_array(self.dict_init,
dtype=X.dtype.type)
self.prepare(n_samples=X.shape[0], X=dict_init)
# Main loop
for _ in range(self.n_epochs):
self.partial_fit(X)
permutation = self.shuffle()
X = X[permutation]
return self
def partial_fit(self, X, sample_indices=None):
"""
Update the factorization using rows from X
Parameters
----------
X: ndarray, shape (n_samples, n_features)
Input data
sample_indices:
Indices for each row of X. If None, consider that row i index is i
(useful when providing the whole data to the function)
Returns
-------
self
"""
X = check_array(X, dtype=[np.float32, np.float64], order='C')
n_samples, n_features = X.shape
batches = gen_batches(n_samples, self.batch_size)
for batch in batches:
this_X = X[batch]
these_sample_indices = get_sub_slice(sample_indices, batch)
self._single_batch_fit(this_X, these_sample_indices)
return self
def set_params(self, **params):
"""Set the parameters of this estimator.
The optimizer works on simple estimators as well as on nested objects
(such as pipelines). The latter have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Returns
-------
self
"""
G_agg = params.pop('G_agg', None)
if G_agg == 'full' and self.G_agg != 'full':
if hasattr(self, 'components_'):
self.G_ = self.components_.dot(self.components_.T)
self.G_agg = 'full'
BaseEstimator.set_params(self, **params)
def shuffle(self):
"""
Shuffle regression statistics, code_,
G_average_ and Dx_average_ and return the permutation used
Returns
-------
permutation: ndarray, shape = (n_samples)
Permutation used in shuffling regression statistics
"""
random_seed = self.random_state.randint(MAX_INT)
random_state = RandomState(random_seed)
list = [self.code_]
if self.G_agg == 'average':
list.append(self.G_average_)
if self.Dx_agg == 'average':
list.append(self.Dx_average_)
perm = random_state.shuffle_with_trace(list)
self.labels_ = self.labels_[perm]
return perm
def prepare(self, n_samples=None, n_features=None,
dtype=None, X=None):
"""
Init estimator attributes based on input shape and type.
Parameters
----------
n_samples: int,
n_features: int,
dtype: dtype in np.float32, np.float64
to use in the estimator. Override X.dtype if provided
X: ndarray, shape (> n_components, n_features)
Array to use to determine shape and types, and init dictionary if
provided
Returns
-------
self
"""
if X is not None:
X = check_array(X, order='C', dtype=[np.float32, np.float64])
if dtype is None:
dtype = X.dtype
# Transpose to fit usual column streaming
this_n_samples = X.shape[0]
if n_samples is None:
n_samples = this_n_samples
if n_features is None:
n_features = X.shape[1]
else:
if n_features != X.shape[1]:
raise ValueError('n_features and X does not match')
else:
if n_features is None or n_samples is None:
raise ValueError('Either provide'
'shape or data to function prepare.')
if dtype is None:
dtype = np.float64
elif dtype not in [np.float32, np.float64]:
return ValueError('dtype should be float32 or float64')
if self.optimizer not in ['variational', 'sgd']:
return ValueError("optimizer should be 'variational' or 'sgd'")
if self.optimizer == 'sgd':
self.reduction = 1
self.G_agg = 'full'
self.Dx_agg = 'full'
# Regression statistics
if self.G_agg == 'average':
with TemporaryFile() as self.G_average_mmap_:
self.G_average_mmap_ = TemporaryFile()
self.G_average_ = np.memmap(self.G_average_mmap_, mode='w+',
shape=(n_samples,
self.n_components,
self.n_components),
dtype=dtype)
atexit.register(self._exit)
if self.Dx_agg == 'average':
self.Dx_average_ = np.zeros((n_samples, self.n_components),
dtype=dtype)
# Dictionary statistics
self.C_ = np.zeros((self.n_components, self.n_components), dtype=dtype)
self.B_ = np.zeros((self.n_components, n_features), dtype=dtype)
self.gradient_ = np.zeros((self.n_components, n_features), dtype=dtype,
order='F')
self.random_state = check_random_state(self.random_state)
if X is None:
self.components_ = np.empty((self.n_components,
n_features),
dtype=dtype)
self.components_[:, :] = self.random_state.randn(self.n_components,
n_features)
else:
# random_idx = self.random_state.permutation(this_n_samples)[
# :self.n_components]
self.components_ = check_array(X[:self.n_components],
dtype=dtype.type,
copy=True)
if self.comp_pos:
self.components_[self.components_ <= 0] = \
- self.components_[self.components_ <= 0]
for i in range(self.n_components):
enet_scale(self.components_[i],
l1_ratio=self.comp_l1_ratio,
radius=1)
self.code_ = np.ones((n_samples, self.n_components), dtype=dtype)
self.labels_ = np.arange(n_samples)
self.comp_norm_ = np.zeros(self.n_components, dtype=dtype)
if self.G_agg == 'full':
self.G_ = self.components_.dot(self.components_.T)
self.n_iter_ = 0
self.sample_n_iter_ = np.zeros(n_samples, dtype='int')
self.random_state = check_random_state(self.random_state)
random_seed = self.random_state.randint(MAX_INT)
self.feature_sampler_ = Sampler(n_features, self.rand_size,
self.replacement, random_seed)
if self.verbose:
self.verbose_iter_ = np.linspace(0, n_samples * self.n_epochs,
self.verbose).tolist()
self.time_ = 0
return self
def _callback(self):
if self.callback is not None:
self.callback(self)
def _single_batch_fit(self, X, sample_indices):
"""Fit a single batch X: compute code, update statistics, update the
dictionary"""
if (self.verbose and self.verbose_iter_
and self.n_iter_ >= self.verbose_iter_[0]):
print('Iteration %i' % self.n_iter_)
self.verbose_iter_ = self.verbose_iter_[1:]
self._callback()
if X.flags['WRITEABLE'] is False:
X = X.copy()
t0 = time.perf_counter()
subset = self.feature_sampler_.yield_subset(self.reduction)
batch_size = X.shape[0]
self.n_iter_ += batch_size
self.sample_n_iter_[sample_indices] += 1
this_sample_n_iter = self.sample_n_iter_[sample_indices]
w_sample = np.power(this_sample_n_iter, -self.sample_learning_rate). \
astype(self.components_.dtype)
w = _batch_weight(self.n_iter_, batch_size,
self.learning_rate, 0)
self._compute_code(X, sample_indices, w_sample, subset)
this_code = self.code_[sample_indices]
if self.n_threads == 1:
self._update_stat_and_dict(subset, X, this_code, w)
else:
self._update_stat_and_dict_parallel(subset, X,
this_code, w)
self.time_ += time.perf_counter() - t0
def _update_stat_and_dict(self, subset, X, code, w):
"""For multi-threading"""
self._update_C(code, w)
self._update_B(X, code, w)
self.gradient_[:, subset] = self.B_[:, subset]
self._update_dict(subset, w)
def _update_stat_and_dict_parallel(self, subset, X, this_code, w):
"""For multi-threading"""
self.gradient_[:, subset] = self.B_[:, subset]
dict_thread = self._pool.submit(self._update_stat_partial_and_dict,
subset, X, this_code, w)
B_thread = self._pool.submit(self._update_B, X,
this_code, w)
dict_thread.result()
B_thread.result()
def _update_stat_partial_and_dict(self, subset, X, code, w):
"""For multi-threading"""
self._update_C(code, w)
# Gradient update
batch_size = X.shape[0]
X_subset = X[:, subset]
if self.optimizer == 'variational':
self.gradient_[:, subset] *= 1 - w
self.gradient_[:, subset] += w * code.T.dot(X_subset) / batch_size
else:
self.gradient_[:, subset] = code.T.dot(X_subset) / batch_size
self._update_dict(subset, w)
def _update_B(self, X, code, w):
"""Update B statistics (for updating D)"""
batch_size = X.shape[0]
if self.optimizer == 'variational':
self.B_ *= 1 - w
self.B_ += w * code.T.dot(X) / batch_size
else:
self.B_ = code.T.dot(X) / batch_size
def _update_C(self, this_code, w):
"""Update C statistics (for updating D)"""
batch_size = this_code.shape[0]
if self.optimizer == 'variational':
self.C_ *= 1 - w
self.C_ += w * this_code.T.dot(this_code) / batch_size
else:
self.C_ = this_code.T.dot(this_code) / batch_size
def _compute_code(self, X, sample_indices,
w_sample, subset):
"""Update regression statistics if
necessary and compute code from X[:, subset]"""
batch_size, n_features = X.shape
reduction = self.reduction
if self.n_threads > 1:
size_job = ceil(batch_size / self.n_threads)
batches = list(gen_batches(batch_size, size_job))
if self.Dx_agg != 'full' or self.G_agg != 'full':
components_subset = self.components_[:, subset]
if self.Dx_agg == 'full':
Dx = X.dot(self.components_.T)
else:
X_subset = X[:, subset]
Dx = X_subset.dot(components_subset.T) * reduction
if self.Dx_agg == 'average':
self.Dx_average_[sample_indices] \
*= 1 - w_sample[:, np.newaxis]
self.Dx_average_[sample_indices] \
+= Dx * w_sample[:, np.newaxis]
Dx = self.Dx_average_[sample_indices]
if self.G_agg != 'full':
G = components_subset.dot(components_subset.T) * reduction
if self.G_agg == 'average':
G_average = np.array(self.G_average_[sample_indices],
copy=True)
if self.n_threads > 1:
par_func = lambda batch: _update_G_average(
G_average[batch],
G,
w_sample[batch],
)
res = self._pool.map(par_func, batches)
_ = list(res)
else:
_update_G_average(G_average, G, w_sample)
self.G_average_[sample_indices] = G_average
else:
G = self.G_
if self.n_threads > 1:
if self.G_agg == 'average':
par_func = lambda batch: _enet_regression_multi_gram(
G_average[batch], Dx[batch], X[batch], self.code_,
get_sub_slice(sample_indices, batch),
self.code_l1_ratio, self.code_alpha, self.code_pos,
self.tol, self.max_iter)
else:
par_func = lambda batch: _enet_regression_single_gram(
G, Dx[batch], X[batch], self.code_,
get_sub_slice(sample_indices, batch),
self.code_l1_ratio, self.code_alpha, self.code_pos,
self.tol, self.max_iter)
res = self._pool.map(par_func, batches)
_ = list(res)
else:
if self.G_agg == 'average':
_enet_regression_multi_gram(
G_average, Dx, X, self.code_,
sample_indices,
self.code_l1_ratio, self.code_alpha, self.code_pos,
self.tol, self.max_iter)
else:
_enet_regression_single_gram(
G, Dx, X, self.code_,
sample_indices,
self.code_l1_ratio, self.code_alpha, self.code_pos,
self.tol, self.max_iter)
def _update_dict(self, subset, w):
"""Dictionary update part
Parameters
----------
subset: ndarray,
Subset of features to update.
"""
ger, = scipy.linalg.get_blas_funcs(('ger',), (self.C_,
self.components_))
len_subset = subset.shape[0]
n_components, n_features = self.components_.shape
components_subset = self.components_[:, subset]
atom_temp = np.zeros(len_subset, dtype=self.components_.dtype)
gradient_subset = self.gradient_[:, subset]
if self.G_agg == 'full' and len_subset < n_features / 2.:
self.G_ -= components_subset.dot(components_subset.T)
gradient_subset -= self.C_.dot(components_subset)
order = self.random_state.permutation(n_components)
if self.optimizer == 'variational':
for k in order:
subset_norm = enet_norm(components_subset[k],
self.comp_l1_ratio)
self.comp_norm_[k] += subset_norm
gradient_subset = ger(1.0, self.C_[k], components_subset[k],
a=gradient_subset, overwrite_a=True)
if self.C_[k, k] > 1e-20:
components_subset[k] = gradient_subset[k] / self.C_[k, k]
# Else do not update
if self.comp_pos:
components_subset[components_subset < 0] = 0
enet_projection(components_subset[k],
atom_temp,
self.comp_norm_[k], self.comp_l1_ratio)
components_subset[k] = atom_temp
subset_norm = enet_norm(components_subset[k],
self.comp_l1_ratio)
self.comp_norm_[k] -= subset_norm
gradient_subset = ger(-1.0, self.C_[k], components_subset[k],
a=gradient_subset, overwrite_a=True)
else:
for k in order:
subset_norm = enet_norm(components_subset[k],
self.comp_l1_ratio)
self.comp_norm_[k] += subset_norm
components_subset += w * self.step_size * gradient_subset
for k in range(self.n_components):
enet_projection(components_subset[k],
atom_temp,
self.comp_norm_[k], self.comp_l1_ratio)
components_subset[k] = atom_temp
subset_norm = enet_norm(components_subset[k],
self.comp_l1_ratio)
self.comp_norm_[k] -= subset_norm
self.components_[:, subset] = components_subset
if self.G_agg == 'full':
if len_subset < n_features / 2.:
self.G_ += components_subset.dot(components_subset.T)
else:
self.G_[:] = self.components_.dot(self.components_.T)
def _exit(self):
"""Useful to delete G_average_ memorymap when the algorithm is
interrupted/completed"""
if hasattr(self, 'G_average_mmap_'):
self.G_average_mmap_.close()
class Coder(CodingMixin, BaseEstimator):
def __init__(self, dictionary,
code_alpha=1,
code_l1_ratio=1,
tol=1e-2,
max_iter=100,
code_pos=False,
random_state=None,
n_threads=1
):
self._set_coding_params(dictionary.shape[0],
code_l1_ratio=code_l1_ratio,
code_alpha=code_alpha,
code_pos=code_pos,
random_state=random_state,
tol=tol,
max_iter=max_iter,
n_threads=n_threads)
self.components_ = dictionary
def fit(self, X=None):
return self
|
|
import os
import sys
import logging
from c_wrapper import *
from .. import common_utils as msvc_utils
class definition_t(object):
#represents some other symbol
def __init__( self, def_id, bsc ):
self.__bsc = bsc
self.__def_id = def_id
@property
def def_id(self):
return self.__def_id
@utils.cached
def location( self ):
module = STRING()
line = LINE()
if not BSCIdefInfo( self.__bsc, self.def_id, byref( module ), byref( line ) ):
raise RuntimeError( "Unable to load information about instance(%s)" % str( self.__def_id ) )
return (module, line)
@utils.cached
def file_name(self):
return self.location[0].value
@utils.cached
def line(self):
return self.location[1].value
def __str__( self ):
return self.file_name + ': %d' % self.line + ' name: %s' % self.as_instance.name
@utils.cached
def as_instance(self):
return self.__bsc.create_instance( BSCIinstFrIdef( self.__bsc, self.def_id) )
class instance_t(object):
#represents some symbol
def __init__( self, inst_id, bsc ):
self.__bsc = bsc
self.__inst_id = inst_id
@property
def inst_id(self):
return self.__inst_id
@utils.cached
def name_type_attribute_mangled_name( self ):
name = STRING()
typ = TYP()
attribute = ATR()
if not BSCIinstInfo( self.__bsc, self.inst_id, byref( name ), byref( typ ), byref( attribute ) ):
raise RuntimeError( "Unable to load information about instance(%s)" % str( self.__inst_id ) )
undecorated_name = msvc_utils.undecorate_name( name.value )
if undecorated_name.startswith( ' ?? ' ):
undecorated_name = undecorated_name[4:]
#BSCFormatDname( self.__bsc, name )
return undecorated_name, typ, attribute, name.value
@utils.cached
def mangled_name(self):
return self.name_type_attribute_mangled_name[3]
@utils.cached
def name(self):
return self.name_type_attribute_mangled_name[0]
@utils.cached
def type(self):
return self.name_type_attribute_mangled_name[1].value
@utils.cached
def attribute(self):
return self.name_type_attribute_mangled_name[2].value
@utils.cached
def is_class(self):
return self.type in [ enums.TYPES.STRUCNAM
, enums.TYPES.UNIONNAM
, enums.TYPES.CLASSNAM ]
def __str__( self ):
tmp = []
if enums.TYPES.has_value( self.type ):
tmp.append( 'type( "%s" )' % enums.TYPES.name_of( self.type ) )
if enums.ATTRIBUTES.has_value( self.attribute ):
tmp.append( 'attribute( "%s" )' % enums.ATTRIBUTES.name_of( self.attribute ) )
tmp.append( 'name( "%s" )' % self.name )
tmp.append( 'mangled name( "%s" )' % self.mangled_name )
return ', '.join( tmp )
@utils.cached
def definitions( self ):
definitions_len = ULONG(0)
definitions_ids = pointer( IDEF() )
if not BSCGetDefArray( self.__bsc, self.inst_id, byref( definitions_ids ), byref( definitions_len ) ):
raise RuntimeError( "Unable to call BSCGetDefArray" )
definitions = map( lambda i: definition_t( definitions_ids[i], self.__bsc )
, range( definitions_len.value ) )
BSCDisposeArray( self.__bsc, definitions_ids )
return definitions
@utils.cached
def members( self ):
instances_len = ULONG(0)
instances_ids = pointer( IINST() )
if not BSCGetMembersArray( self.__bsc, self.inst_id, enums.MBF.ALL, byref( instances_ids ), byref( instances_len ) ):
raise RuntimeError( "Unable to call BSCGetMembersArray" )
instances = map( lambda i: self.__bsc.create_instance( instances_ids[i] )
, range( instances_len.value ) )
BSCDisposeArray( self.__bsc, instances_ids )
return instances
@utils.cached
def used_symbols(self):
instances_len = ULONG(0)
instances_ids = pointer( IINST() )
if not BSCGetUsesArray( self.__bsc, self.inst_id, enums.MBF.ALL, byref( instances_ids ), byref( instances_len ) ):
raise RuntimeError( "Unable to call BSCGetUsesArray" )
instances = map( lambda i: self.__bsc.create_instance( instances_ids[i] )
, range( instances_len.value ) )
BSCDisposeArray( self.__bsc, instances_ids )
return instances
@utils.cached
def base_classes(self):
instances_len = ULONG(0)
instances_ids = pointer( IINST() )
if not BSCGetBaseArray( self.__bsc, self.inst_id, byref( instances_ids ), byref( instances_len ) ):
raise RuntimeError( "Unable to call BSCGetBaseArray" )
instances = map( lambda i: self.__bsc.create_instance( instances_ids[i] )
, range( instances_len.value ) )
BSCDisposeArray( self.__bsc, instances_ids )
return instances
@utils.cached
def derived_classes(self):
instances_len = ULONG(0)
instances_ids = pointer( IINST() )
if not BSCGetDervArray( self.__bsc, self.inst_id, byref( instances_ids ), byref( instances_len ) ):
raise RuntimeError( "Unable to call BSCGetDervArray" )
instances = map( lambda i: self.__bsc.create_instance( instances_ids[i] )
, range( instances_len.value ) )
BSCDisposeArray( self.__bsc, instances_ids )
return instances
class module_t(object):
#represents file
def __init__( self, mod_id, bsc ):
self.__bsc = bsc
self.__mod_id = mod_id
@property
def mod_id( self ):
return self.__mod_id
@utils.cached
def path( self ):
name = STRING()
BSCImodInfo(self.__bsc, self.__mod_id, byref(name))
return name.value
@utils.cached
def instances( self ):
instances_len = ULONG(0)
instances_ids = pointer( IINST() )
if not BSCGetModuleContents( self.__bsc, self.mod_id, enums.MBF.ALL, byref( instances_ids ), byref( instances_len ) ):
raise RuntimeError( "Unable to call BSCGetModuleContents" )
instances = map( lambda i: self.__bsc.create_instance( instances_ids[i] )
, range( instances_len.value ) )
BSCDisposeArray( self.__bsc, instances_ids )
return instances
class reader_t( object ):
def __init__( self, bsc_file ):
self.logger = utils.loggers.pdb_reader
self.logger.setLevel(logging.INFO)
self.__bsc_file = bsc_file
self.__bsc = pointer( Bsc() )
if not BSCOpen( self.__bsc_file, byref( self.__bsc ) ):
raise RuntimeError( "Unable to open bsc file '%s'" % self.__bsc_file )
self.__instances_cache = {} #inst id : instance_t
self.__bsc.create_instance = lambda inst_id: self.__create_instance( inst_id )
@utils.cached
def instances(self):
return self.__instances_cache.values()
def __create_instance( self, inst_id ):
try:
return self.__instances_cache[ inst_id ]
except KeyError:
inst = instance_t( inst_id, self.__bsc )
self.__instances_cache[ inst_id ] = inst
return inst
def load_instances( self ):
instances_len = ULONG(0)
instances_ids = pointer( IINST() )
if not BSCGetAllGlobalsArray( self.__bsc, enums.MBF.ALL, byref( instances_ids ), byref( instances_len ) ):
raise RuntimeError( "Unable to load all globals symbols" )
for i in range( instances_len.value ):
self.__create_instance( instances_ids[i] )
BSCDisposeArray( self.__bsc, instances_ids )
@utils.cached
def is_case_sensitive( self ):
return bool( BSCFCaseSensitive( self.__bsc ) )
@utils.cached
def files(self):
module_ids = pointer( IMOD() )
module_len = ULONG()
bs = BSC_STAT()
if not BSCGetAllModulesArray( self.__bsc, module_ids, byref(module_len) ):
raise RuntimeError( "Unable to load all modules" )
modules = map( lambda i: module_t( module_ids[i], self.__bsc )
, range( module_len.value ) )
BSCDisposeArray( self.__bsc, module_ids )
return modules
def print_stat( self ):
stat = BSC_STAT()
BSCGetStatistics( self.__bsc, byref( stat ) )
for f, t in stat._fields_:
print '%s: %s' % ( f, str( getattr( stat, f) ) )
def print_classes(self, file_name=None):
for m in self.files:
if file_name and m.path != file_name:
continue
print 'File: ', m.path
if m.instances:
print '\tInstances:'
for inst in m.instances:
print '\t\t', str(inst)
if inst.definitions:
print '\t\t\tDefinitions:'
for definition in inst.definitions:
print '\t\t\t\t', str( definition )
if inst.members:
print '\t\t\tMembers:'
for member in inst.members:
print '\t\t\t\t', str( member )
if inst.used_symbols:
print '\t\t\tUsed symbols:'
for used_symbol in inst.used_symbols:
print '\t\t\t\t', str( used_symbol )
if inst.base_classes:
print '\t\t\tBase classes:'
for base_class in inst.base_classes:
print '\t\t\t\t', str( base_class )
if inst.derived_classes:
print '\t\t\tDerived classes:'
for derived_class in inst.derived_classes:
print '\t\t\t\t', str( derived_class )
def __del__( self ):
if self.__bsc:
BSCClose( self.__bsc )
|
|
"""
Unspecific helper classes
@author: Martin Kuemmel, Jonas Haase
@organization: Space Telescope - European Coordinating Facility (ST-ECF)
@license: Gnu Public Licence
@contact: mkuemmel@eso.org
@since: 2005/09/13
$LastChangedBy: mkuemmel $
$LastChangedDate: 2008-07-03 10:27:47 +0200 (Thu, 03 Jul 2008) $
$HeadURL: http://astropy.scipy.org/svn/astrolib/trunk/asciidata/Lib/asciiutils.py $
"""
__version__ = "Version 1.0 $LastChangedRevision: 503 $"
import string, sys, os, types
class NicePrinter(object):
"""
Class to print to I/O-streams
The class is a wrapper around an I/O stream. It offers
methods to format strings and print to a given I/O stream.
Linend, delimiter and linestarts are attributes of the
class and allow a nice formatting of the print.
"""
def __init__(self, stream=None, delimiter=None, linestart=None, linend=None):
"""
Initializes the class
A simple initializer. Most of the class attributes
are given as parameters
@param stream: I/O stream to write to
@type stream: I/O stream
@param delimiter: optional delimiter
@type delimiter: string
@param linend: optional linenend
@type linend: string
"""
#set the stream
self._stream = stream
# set a start value
self._start = ''
# set the delimiter
if delimiter != None:
# self._delimiter = ' '+delimiter+' '
self._delimiter = delimiter
else:
self._delimiter = ' '
# set the linend
if linend != None:
self._linend = linend
else:
self._linend = '\n'
# set the linestart
if linestart != None:
self._start = linestart
else:
self._linestart = ''
def print_string(self, hstring):
"""
Prints a string to the stream
This general method prints any string
to stream.
@param hstring: the header to print
@type hstring: string
"""
# that's easy up to now
self._stream.write(hstring)
def print_list(self, strlist):
"""
Prints a list to the stream.
The method combines a string list from the input
to a string which represents a line. Delimiter,
linend and linestart are taken into account.
The lines is directly sent to the I/O stream.
@param strlist: list
@type strlist: [string]
"""
self._stream.write(self._start
+ self._delimiter.join(strlist) + self._linend)
class Separator(object):
"""
Class to separate an ascii line into items
Instance of this class split an ascii line into
the different items. The methods on how to split
a line work with a delimiter, or according to
whitespace or according to a fixed format given
in a file (not yet implemented.
"""
def __init__(self, delimiter=None, file=None):
"""
The class constructor
"""
self._delimiter = delimiter
self._file = file
def separate(self, line):
"""
Separates a line into its items
@param line: the ascii line to be separated
@type line: string
@return: the list of items
@rtype: [string]
"""
# delete the trailing newline
if line[-1] == '\n':
line = line[:len(line)-1]
# separate either along a delimiter
if self._delimiter != None:
items = self.separate_delim(line)
# or along whitespaces
else:
items = self.separate_white(line)
return items
def separate_white(self, line):
"""
Separates a line along the whitespace
The method transforms a line into the list
of its space-separated items. The first space
is the delimiter, any further spaces are interpreted
to belong to the item and are preserved.
This is advantageous to keep the item length for
string columns with leading spaces.
@param line: the ascii line to be separated
@type line: string
@return: the list of items
@rtype: [string]
"""
# create the item list
witems = []
# split it conventionally
items = line.strip().split()
# go again over the line and identify
# the exact starting position of each
# item, preserving the leading spaces
start=0
for item in items:
pos = line.find(item,start)
if pos > -1:
witems.append(line[start:pos+len(item)])
start = pos+len(item)+1
# return the list
return witems
def separate_delim(self, line):
"""
Separates a line along a delimiter
The method transforms a line into the list
of its delimiter separated items.
@param line: the ascii line to be separated
@type line: string
@return: the list of items
@rtype: [string]
"""
# split the line
items = line.split( self._delimiter)
# return the list
return items
class AsciiLenGetIter(object):
"""
A general purpose iteratorfor any class with len() and get[]
"""
def __init__(self, len_get_object):
"""
The class constructor
"""
# store the associated AsciiData object
self._len_get_object = len_get_object
# set the index of the actual column
self._index = -1
# set the maximum column index
self._max_index = len(self._len_get_object) - 1
def _iter(self):
"""
Mandatory method for an iterator class
"""
return self
def __next__(self):
"""
Mandatory method for an iterator class
The method gives the next object in the iterator sequence.
In case that a next object does no longer exist,
a corresponding exception is thrown to indicate
the end of the iterator sequence.
"""
# check whether the next iteration does exist
if self._index >= self._max_index:
# no next iteration, raise exception
raise StopIteration
# enhance the actual index
self._index += 1
# return the next iteration
return self._len_get_object[self._index]
class AsciiColumnIter(object):
"""
An iterator class for the AsciiData class
"""
def __init__(self, ascii_column):
"""
The class constructor
"""
# store the associated AsciiColumn object
self.ascii_column = ascii_column
# set the index of the actual row
self._row_index = -1
# set the maximum column index
self._max_index = ascii_column.get_nrows() - 1
def _iter(self):
"""
Mandatory method for an iterator class
"""
return self
def __next__(self):
"""
Mandatory method for an iterator class
The method gives the next object in the iterator sequence.
In case that a next object does no longer exist,
a corresponding exception is thrown to indicate
the end of the iterator sequence.
"""
# check whether the next iteration does exist
if self._row_index >= self._max_index:
# no next iteration, raise exception
raise StopIteration
# enhance the actual column index
self._row_index += 1
# return the next iteration
return self.ascii_column[self._row_index]
|
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from recipe_engine.config_types import Path
from recipe_engine import config as recipe_config
import DEPS
CONFIG_CTX = DEPS['chromium'].CONFIG_CTX
@CONFIG_CTX(includes=['android_common', 'ninja', 'static_library'],
config_vars={'TARGET_ARCH': 'arm', 'TARGET_BITS': 32,
'BUILD_CONFIG': 'Debug'})
def base_config(c):
c.compile_py.default_targets=[]
if c.HOST_PLATFORM != 'linux': # pragma: no cover
raise recipe_config.BadConf('Can only build android on linux.')
@CONFIG_CTX(includes=['base_config', 'default_compiler', 'goma'])
def main_builder(c):
if c.TARGET_ARCH != 'arm': # pragma: no cover
raise recipe_config.BadConf(
'Cannot target arm with TARGET_ARCH == %s' % c.TARGET_ARCH)
@CONFIG_CTX(includes=['main_builder', 'mb'])
def main_builder_mb(c):
pass
@CONFIG_CTX(includes=['main_builder_mb'],
config_vars={'BUILD_CONFIG': 'Release'})
def main_builder_rel_mb(c):
pass
@CONFIG_CTX(includes=['base_config', 'clang', 'goma', 'android_findbugs'])
def clang_builder(c):
c.gyp_env.GYP_DEFINES['component'] = 'shared_library'
c.gyp_env.GYP_DEFINES['asan'] = 1
c.gyp_env.GYP_DEFINES['use_allocator'] = 'none'
@CONFIG_CTX(includes=['main_builder'])
def component_builder(c):
c.gyp_env.GYP_DEFINES['component'] = 'shared_library' # pragma: no cover
@CONFIG_CTX(includes=['base_config', 'default_compiler', 'goma'],
config_vars={'TARGET_ARCH': 'intel'})
def x86_builder(c):
if c.TARGET_ARCH != 'intel': # pragma: no cover
raise recipe_config.BadConf(
'Cannot target x86 with TARGET_ARCH == %s' % c.TARGET_ARCH)
@CONFIG_CTX(includes=['x86_builder', 'mb'])
def x86_builder_mb(c):
pass
@CONFIG_CTX(includes=['base_config', 'default_compiler'],
config_vars={'TARGET_ARCH': 'mipsel'})
def mipsel_builder(c):
if c.TARGET_ARCH != 'mipsel': # pragma: no cover
raise recipe_config.BadConf('I dunno what to put in a mips builder!')
@CONFIG_CTX(includes=['mipsel_builder', 'mb'])
def mipsel_builder_mb(c):
pass
@CONFIG_CTX(includes=['main_builder'])
def dartium_builder(c):
c.compile_py.default_targets=['chrome_apk', 'content_shell_apk']
@CONFIG_CTX()
def cronet_builder(c):
c.gyp_env.GYP_DEFINES['disable_brotli_filter'] = 1
c.gyp_env.GYP_DEFINES['disable_file_support'] = 1
c.gyp_env.GYP_DEFINES['disable_ftp_support'] = 1
c.gyp_env.GYP_DEFINES['enable_bidirectional_stream'] = 1
c.gyp_env.GYP_DEFINES['enable_websockets'] = 0
c.gyp_env.GYP_DEFINES['use_platform_icu_alternatives'] = 1
c.compile_py.clobber = True
# TODO(jbudorick): Remove {cronet,net}_unittests_apk targets after
# gn switch is finished.
c.compile_py.default_targets=['cronet_package',
'cronet_sample_test_apk',
'cronet_test_instrumentation_apk',
'cronet_unittests',
'cronet_unittests_apk',
'net_unittests',
'net_unittests_apk',]
@CONFIG_CTX(includes=['main_builder'])
def arm_l_builder(c): # pragma: no cover
pass
@CONFIG_CTX(includes=['arm_l_builder'])
def arm_l_builder_lto(c): # pragma: no cover
c.gyp_env.GYP_DEFINES['use_lto'] = 1
c.gyp_env.GYP_DEFINES['component'] = 'shared_library'
@CONFIG_CTX(includes=['arm_l_builder'],
config_vars={'BUILD_CONFIG': 'Release'})
def arm_l_builder_rel(c): # pragma: no cover
pass
@CONFIG_CTX(includes=['base_config', 'default_compiler', 'goma', 'mb'],
config_vars={'TARGET_ARCH': 'intel', 'TARGET_BITS': 64})
def x64_builder_mb(c):
if c.TARGET_ARCH != 'intel' or c.TARGET_BITS != 64:
raise recipe_config.BadConf(
'Cannot target x64 with TARGET_ARCH == %s, TARGET_BITS == %d'
% (c.TARGET_ARCH, c.TARGET_BITS)) # pragma: no cover
@CONFIG_CTX(includes=['base_config', 'default_compiler', 'goma'],
config_vars={'TARGET_BITS': 64})
def arm64_builder(c):
pass
@CONFIG_CTX(includes=['arm64_builder', 'mb'])
def arm64_builder_mb(c):
pass
@CONFIG_CTX(includes=['arm64_builder'],
config_vars={'BUILD_CONFIG': 'Release'})
def arm64_builder_rel(c): # pragma: no cover
pass
@CONFIG_CTX(includes=['arm64_builder_rel', 'mb'])
def arm64_builder_rel_mb(c):
pass
@CONFIG_CTX(includes=['main_builder'])
def try_builder(c):
pass # pragma: no cover
@CONFIG_CTX(includes=['x86_builder'])
def x86_try_builder(c):
pass # pragma: no cover
@CONFIG_CTX(includes=['base_config'])
def tests_base(c):
pass # pragma: no cover
@CONFIG_CTX(includes=['arm64_builder_rel'])
def tests_arm64(c): # pragma: no cover
pass
@CONFIG_CTX(includes=['x64_builder'])
def tests_x64(c): # pragma: no cover
pass
@CONFIG_CTX(includes=['tests_base'])
def main_tests(c):
pass # pragma: no cover
@CONFIG_CTX(includes=['tests_base'])
def clang_tests(c):
pass # pragma: no cover
@CONFIG_CTX(includes=['tests_base'])
def enormous_tests(c):
pass # pragma: no cover
@CONFIG_CTX(includes=['tests_base'])
def try_instrumentation_tests(c):
pass # pragma: no cover
@CONFIG_CTX(includes=['x86_builder'])
def x86_try_instrumentation_tests(c):
pass # pragma: no cover
@CONFIG_CTX(includes=['main_builder'],
config_vars={'BUILD_CONFIG': 'Debug'})
def coverage_builder_tests(c): # pragma: no cover
gyp_defs = c.gyp_env.GYP_DEFINES
gyp_defs['emma_coverage'] = 1
gyp_defs['emma_filter'] = 'com.google.android.apps.chrome.*, org.chromium.*'
@CONFIG_CTX(includes=['main_builder'])
def incremental_coverage_builder_tests(c):
gyp_defs = c.gyp_env.GYP_DEFINES
gyp_defs['emma_coverage'] = 1
gyp_defs['emma_filter'] = 'org.chromium.*'
@CONFIG_CTX(includes=['main_builder'])
def non_device_wipe_provisioning(c):
pass
# TODO(zty): figure out what perf builder really wants and use that instead.
# e.g. official
@CONFIG_CTX(includes=['main_builder'])
def perf(c):
gyp_defs = c.gyp_env.GYP_DEFINES
gyp_defs['branding'] = 'Chrome'
gyp_defs['buildtype'] = 'Official'
@CONFIG_CTX(includes=['main_builder'])
def webview_perf(c):
gyp_defs = c.gyp_env.GYP_DEFINES
@CONFIG_CTX(includes=['main_builder'])
def cast_builder(c):
c.gyp_env.GYP_DEFINES['chromecast'] = 1
@CONFIG_CTX()
def errorprone(c):
c.gyp_env.GYP_DEFINES['enable_errorprone'] = 1
|
|
# Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
ZFS Storage Appliance Cinder Volume Driver
"""
import ast
import math
from oslo_config import cfg
from oslo_log import log
from oslo_serialization import base64
from oslo_utils import units
import six
from cinder import exception
from cinder import utils
from cinder.i18n import _, _LE, _LI, _LW
from cinder.volume import driver
from cinder.volume.drivers.san import san
from cinder.volume.drivers.zfssa import zfssarest
from cinder.volume import volume_types
import taskflow.engines
from taskflow.patterns import linear_flow as lf
from taskflow import task
CONF = cfg.CONF
LOG = log.getLogger(__name__)
ZFSSA_OPTS = [
cfg.StrOpt('zfssa_pool',
help='Storage pool name.'),
cfg.StrOpt('zfssa_project',
help='Project name.'),
cfg.StrOpt('zfssa_lun_volblocksize', default='8k',
choices=['512', '1k', '2k', '4k', '8k', '16k', '32k', '64k',
'128k'],
help='Block size.'),
cfg.BoolOpt('zfssa_lun_sparse', default=False,
help='Flag to enable sparse (thin-provisioned): True, False.'),
cfg.StrOpt('zfssa_lun_compression', default='off',
choices=['off', 'lzjb', 'gzip-2', 'gzip', 'gzip-9'],
help='Data compression.'),
cfg.StrOpt('zfssa_lun_logbias', default='latency',
choices=['latency', 'throughput'],
help='Synchronous write bias.'),
cfg.StrOpt('zfssa_initiator_group', default='',
help='iSCSI initiator group.'),
cfg.StrOpt('zfssa_initiator', default='',
help='iSCSI initiator IQNs. (comma separated)'),
cfg.StrOpt('zfssa_initiator_user', default='',
help='iSCSI initiator CHAP user (name).'),
cfg.StrOpt('zfssa_initiator_password', default='',
help='Secret of the iSCSI initiator CHAP user.', secret=True),
cfg.StrOpt('zfssa_initiator_config', default='',
help='iSCSI initiators configuration.'),
cfg.StrOpt('zfssa_target_group', default='tgt-grp',
help='iSCSI target group name.'),
cfg.StrOpt('zfssa_target_user', default='',
help='iSCSI target CHAP user (name).'),
cfg.StrOpt('zfssa_target_password', default='', secret=True,
help='Secret of the iSCSI target CHAP user.'),
cfg.StrOpt('zfssa_target_portal',
help='iSCSI target portal (Data-IP:Port, w.x.y.z:3260).'),
cfg.StrOpt('zfssa_target_interfaces',
help='Network interfaces of iSCSI targets. (comma separated)'),
cfg.IntOpt('zfssa_rest_timeout',
help='REST connection timeout. (seconds)'),
cfg.StrOpt('zfssa_replication_ip', default='',
help='IP address used for replication data. (maybe the same as '
'data ip)'),
cfg.BoolOpt('zfssa_enable_local_cache', default=True,
help='Flag to enable local caching: True, False.'),
cfg.StrOpt('zfssa_cache_project', default='os-cinder-cache',
help='Name of ZFSSA project where cache volumes are stored.')
]
CONF.register_opts(ZFSSA_OPTS)
ZFSSA_LUN_SPECS = {
'zfssa:volblocksize',
'zfssa:sparse',
'zfssa:compression',
'zfssa:logbias',
}
def factory_zfssa():
return zfssarest.ZFSSAApi()
class ZFSSAISCSIDriver(driver.ISCSIDriver):
"""ZFSSA Cinder iSCSI volume driver.
Version history:
1.0.1:
Backend enabled volume migration.
Local cache feature.
"""
VERSION = '1.0.1'
protocol = 'iSCSI'
def __init__(self, *args, **kwargs):
super(ZFSSAISCSIDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(ZFSSA_OPTS)
self.configuration.append_config_values(san.san_opts)
self.zfssa = None
self.tgt_zfssa = None
self._stats = None
self.tgtiqn = None
def _get_target_alias(self):
"""return target alias."""
return self.configuration.zfssa_target_group
def do_setup(self, context):
"""Setup - create multiple elements.
Project, initiators, initiatorgroup, target and targetgroup.
"""
lcfg = self.configuration
LOG.info(_LI('Connecting to host: %s.'), lcfg.san_ip)
self.zfssa = factory_zfssa()
self.tgt_zfssa = factory_zfssa()
self.zfssa.set_host(lcfg.san_ip, timeout=lcfg.zfssa_rest_timeout)
auth_str = '%s:%s' % (lcfg.san_login, lcfg.san_password)
auth_str = base64.encode_as_text(auth_str)[:-1]
self.zfssa.login(auth_str)
self.zfssa.create_project(lcfg.zfssa_pool, lcfg.zfssa_project,
compression=lcfg.zfssa_lun_compression,
logbias=lcfg.zfssa_lun_logbias)
if lcfg.zfssa_enable_local_cache:
self.zfssa.create_project(lcfg.zfssa_pool,
lcfg.zfssa_cache_project,
compression=lcfg.zfssa_lun_compression,
logbias=lcfg.zfssa_lun_logbias)
schemas = [
{'property': 'image_id',
'description': 'OpenStack image ID',
'type': 'String'},
{'property': 'updated_at',
'description': 'Most recent updated time of image',
'type': 'String'}]
self.zfssa.create_schemas(schemas)
if (lcfg.zfssa_initiator_config != ''):
initiator_config = ast.literal_eval(lcfg.zfssa_initiator_config)
for initiator_group in initiator_config:
zfssa_initiator_group = initiator_group
for zfssa_initiator in initiator_config[zfssa_initiator_group]:
self.zfssa.create_initiator(zfssa_initiator['iqn'],
zfssa_initiator_group + '-' +
zfssa_initiator['iqn'],
chapuser=
zfssa_initiator['user'],
chapsecret=
zfssa_initiator['password'])
if (zfssa_initiator_group != 'default'):
self.zfssa.add_to_initiatorgroup(
zfssa_initiator['iqn'],
zfssa_initiator_group)
else:
LOG.warning(_LW('zfssa_initiator_config not found. '
'Using deprecated configuration options.'))
if (lcfg.zfssa_initiator != '' and
(lcfg.zfssa_initiator_group == '' or
lcfg.zfssa_initiator_group == 'default')):
LOG.warning(_LW('zfssa_initiator: %(ini)s'
' wont be used on '
'zfssa_initiator_group= %(inigrp)s.'),
{'ini': lcfg.zfssa_initiator,
'inigrp': lcfg.zfssa_initiator_group})
# Setup initiator and initiator group
if (lcfg.zfssa_initiator != '' and
lcfg.zfssa_initiator_group != '' and
lcfg.zfssa_initiator_group != 'default'):
for initiator in lcfg.zfssa_initiator.split(','):
self.zfssa.create_initiator(
initiator, lcfg.zfssa_initiator_group + '-' +
initiator, chapuser=lcfg.zfssa_initiator_user,
chapsecret=lcfg.zfssa_initiator_password)
self.zfssa.add_to_initiatorgroup(
initiator, lcfg.zfssa_initiator_group)
# Parse interfaces
interfaces = []
for interface in lcfg.zfssa_target_interfaces.split(','):
if interface == '':
continue
interfaces.append(interface)
# Setup target and target group
iqn = self.zfssa.create_target(
self._get_target_alias(),
interfaces,
tchapuser=lcfg.zfssa_target_user,
tchapsecret=lcfg.zfssa_target_password)
self.zfssa.add_to_targetgroup(iqn, lcfg.zfssa_target_group)
def check_for_setup_error(self):
"""Check that driver can login.
Check also pool, project, initiators, initiatorgroup, target and
targetgroup.
"""
lcfg = self.configuration
self.zfssa.verify_pool(lcfg.zfssa_pool)
self.zfssa.verify_project(lcfg.zfssa_pool, lcfg.zfssa_project)
if (lcfg.zfssa_initiator_config != ''):
initiator_config = ast.literal_eval(lcfg.zfssa_initiator_config)
for initiator_group in initiator_config:
zfssa_initiator_group = initiator_group
for zfssa_initiator in initiator_config[zfssa_initiator_group]:
self.zfssa.verify_initiator(zfssa_initiator['iqn'])
else:
if (lcfg.zfssa_initiator != '' and
lcfg.zfssa_initiator_group != '' and
lcfg.zfssa_initiator_group != 'default'):
for initiator in lcfg.zfssa_initiator.split(','):
self.zfssa.verify_initiator(initiator)
self.zfssa.verify_target(self._get_target_alias())
def _get_provider_info(self, volume, lun=None):
"""Return provider information."""
lcfg = self.configuration
project = lcfg.zfssa_project
if ((lcfg.zfssa_enable_local_cache is True) and
(volume['name'].startswith('os-cache-vol-'))):
project = lcfg.zfssa_cache_project
if lun is None:
lun = self.zfssa.get_lun(lcfg.zfssa_pool,
project,
volume['name'])
if isinstance(lun['number'], list):
lun['number'] = lun['number'][0]
if self.tgtiqn is None:
self.tgtiqn = self.zfssa.get_target(self._get_target_alias())
loc = "%s %s %s" % (lcfg.zfssa_target_portal, self.tgtiqn,
lun['number'])
LOG.debug('_get_provider_info: provider_location: %s', loc)
provider = {'provider_location': loc}
if lcfg.zfssa_target_user != '' and lcfg.zfssa_target_password != '':
provider['provider_auth'] = ('CHAP %s %s' %
(lcfg.zfssa_target_user,
lcfg.zfssa_target_password))
return provider
def create_volume(self, volume):
"""Create a volume on ZFSSA."""
LOG.debug('zfssa.create_volume: volume=' + volume['name'])
lcfg = self.configuration
volsize = str(volume['size']) + 'g'
specs = self._get_voltype_specs(volume)
self.zfssa.create_lun(lcfg.zfssa_pool,
lcfg.zfssa_project,
volume['name'],
volsize,
lcfg.zfssa_target_group,
specs)
def delete_volume(self, volume):
"""Deletes a volume with the given volume['name']."""
LOG.debug('zfssa.delete_volume: name=%s', volume['name'])
lcfg = self.configuration
try:
lun2del = self.zfssa.get_lun(lcfg.zfssa_pool,
lcfg.zfssa_project,
volume['name'])
except exception.VolumeBackendAPIException as ex:
# NOTE(jdg): This will log an error and continue
# if for some reason the volume no longer exists
# on the backend
if 'Error Getting Volume' in ex.message:
LOG.error(_LE("Volume ID %s was not found on "
"the zfssa device while attempting "
"delete_volume operation."), volume['id'])
return
# Delete clone temp snapshot. see create_cloned_volume()
if 'origin' in lun2del and 'id' in volume:
if lun2del['nodestroy']:
self.zfssa.set_lun_props(lcfg.zfssa_pool,
lcfg.zfssa_project,
volume['name'],
nodestroy=False)
tmpsnap = 'tmp-snapshot-%s' % volume['id']
if lun2del['origin']['snapshot'] == tmpsnap:
self.zfssa.delete_snapshot(lcfg.zfssa_pool,
lcfg.zfssa_project,
lun2del['origin']['share'],
lun2del['origin']['snapshot'])
return
self.zfssa.delete_lun(pool=lcfg.zfssa_pool,
project=lcfg.zfssa_project,
lun=volume['name'])
if ('origin' in lun2del and
lun2del['origin']['project'] == lcfg.zfssa_cache_project):
self._check_origin(lun2del, volume['name'])
def create_snapshot(self, snapshot):
"""Creates a snapshot of a volume.
Snapshot name: snapshot['name']
Volume name: snapshot['volume_name']
"""
LOG.debug('zfssa.create_snapshot: snapshot=%s', snapshot['name'])
lcfg = self.configuration
self.zfssa.create_snapshot(lcfg.zfssa_pool,
lcfg.zfssa_project,
snapshot['volume_name'],
snapshot['name'])
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
LOG.debug('zfssa.delete_snapshot: snapshot=%s', snapshot['name'])
lcfg = self.configuration
numclones = self.zfssa.num_clones(lcfg.zfssa_pool,
lcfg.zfssa_project,
snapshot['volume_name'],
snapshot['name'])
if numclones > 0:
LOG.error(_LE('Snapshot %s: has clones'), snapshot['name'])
raise exception.SnapshotIsBusy(snapshot_name=snapshot['name'])
self.zfssa.delete_snapshot(lcfg.zfssa_pool,
lcfg.zfssa_project,
snapshot['volume_name'],
snapshot['name'])
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot - clone a snapshot."""
LOG.debug('zfssa.create_volume_from_snapshot: volume=%s',
volume['name'])
LOG.debug('zfssa.create_volume_from_snapshot: snapshot=%s',
snapshot['name'])
if not self._verify_clone_size(snapshot, volume['size'] * units.Gi):
exception_msg = (_('Error verifying clone size on '
'Volume clone: %(clone)s '
'Size: %(size)d on'
'Snapshot: %(snapshot)s')
% {'clone': volume['name'],
'size': volume['size'],
'snapshot': snapshot['name']})
LOG.error(exception_msg)
raise exception.InvalidInput(reason=exception_msg)
lcfg = self.configuration
self.zfssa.clone_snapshot(lcfg.zfssa_pool,
lcfg.zfssa_project,
snapshot['volume_name'],
snapshot['name'],
lcfg.zfssa_project,
volume['name'])
def _update_volume_status(self):
"""Retrieve status info from volume group."""
LOG.debug("Updating volume status")
self._stats = None
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data["volume_backend_name"] = backend_name or self.__class__.__name__
data["vendor_name"] = 'Oracle'
data["driver_version"] = self.VERSION
data["storage_protocol"] = self.protocol
lcfg = self.configuration
(avail, total) = self.zfssa.get_project_stats(lcfg.zfssa_pool,
lcfg.zfssa_project)
if avail is None or total is None:
return
host = lcfg.san_ip
pool = lcfg.zfssa_pool
project = lcfg.zfssa_project
auth_str = '%s:%s' % (lcfg.san_login, lcfg.san_password)
auth_str = base64.encode_as_text(auth_str)[:-1]
zfssa_tgt_group = lcfg.zfssa_target_group
repl_ip = lcfg.zfssa_replication_ip
data['location_info'] = "%s:%s:%s:%s:%s:%s" % (host, auth_str, pool,
project,
zfssa_tgt_group,
repl_ip)
data['total_capacity_gb'] = int(total) / units.Gi
data['free_capacity_gb'] = int(avail) / units.Gi
data['reserved_percentage'] = 0
data['QoS_support'] = False
self._stats = data
def get_volume_stats(self, refresh=False):
"""Get volume status.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self._update_volume_status()
return self._stats
def create_export(self, context, volume, connector):
pass
def remove_export(self, context, volume):
pass
def ensure_export(self, context, volume):
pass
def extend_volume(self, volume, new_size):
"""Driver entry point to extent volume size."""
LOG.debug('extend_volume: volume name: %s', volume['name'])
lcfg = self.configuration
self.zfssa.set_lun_props(lcfg.zfssa_pool,
lcfg.zfssa_project,
volume['name'],
volsize=new_size * units.Gi)
def create_cloned_volume(self, volume, src_vref):
"""Create a clone of the specified volume."""
zfssa_snapshot = {'volume_name': src_vref['name'],
'name': 'tmp-snapshot-%s' % volume['id']}
self.create_snapshot(zfssa_snapshot)
try:
self.create_volume_from_snapshot(volume, zfssa_snapshot)
except exception.VolumeBackendAPIException:
LOG.error(_LE('Clone Volume:'
'%(volume)s failed from source volume:'
'%(src_vref)s'),
{'volume': volume['name'],
'src_vref': src_vref['name']})
# Cleanup snapshot
self.delete_snapshot(zfssa_snapshot)
def clone_image(self, context, volume,
image_location, image_meta,
image_service):
"""Create a volume efficiently from an existing image.
Verify the image ID being used:
(1) If there is no existing cache volume, create one and transfer
image data to it. Take a snapshot.
(2) If a cache volume already exists, verify if it is either alternated
or updated. If so try to remove it, raise exception if removal fails.
Create a new cache volume as in (1).
Clone a volume from the cache volume and returns it to Cinder.
"""
LOG.debug('Cloning image %(image)s to volume %(volume)s',
{'image': image_meta['id'], 'volume': volume['name']})
lcfg = self.configuration
if not lcfg.zfssa_enable_local_cache:
return None, False
# virtual_size is the image's actual size when stored in a volume
# virtual_size is expected to be updated manually through glance
try:
virtual_size = int(image_meta['properties'].get('virtual_size'))
except Exception:
LOG.error(_LE('virtual_size property is not set for the image.'))
return None, False
cachevol_size = int(math.ceil(float(virtual_size) / units.Gi))
if cachevol_size > volume['size']:
exception_msg = (_LE('Image size %(img_size)dGB is larger '
'than volume size %(vol_size)dGB.'),
{'img_size': cachevol_size,
'vol_size': volume['size']})
LOG.error(exception_msg)
return None, False
specs = self._get_voltype_specs(volume)
cachevol_props = {'size': cachevol_size}
try:
cache_vol, cache_snap = self._verify_cache_volume(context,
image_meta,
image_service,
specs,
cachevol_props)
# A cache volume and a snapshot should be ready by now
# Create a clone from the cache volume
self.zfssa.clone_snapshot(lcfg.zfssa_pool,
lcfg.zfssa_cache_project,
cache_vol,
cache_snap,
lcfg.zfssa_project,
volume['name'])
if cachevol_size < volume['size']:
self.extend_volume(volume, volume['size'])
except exception.VolumeBackendAPIException as exc:
exception_msg = (_LE('Cannot clone image %(image)s to '
'volume %(volume)s. Error: %(error)s.'),
{'volume': volume['name'],
'image': image_meta['id'],
'error': exc.message})
LOG.error(exception_msg)
return None, False
return None, True
@utils.synchronized('zfssaiscsi', external=True)
def _verify_cache_volume(self, context, img_meta,
img_service, specs, cachevol_props):
"""Verify if we have a cache volume that we want.
If we don't, create one.
If we do, check if it's been updated:
* If so, delete it and recreate a new volume
* If not, we are good.
If it's out of date, delete it and create a new one.
After the function returns, there should be a cache volume available,
ready for cloning.
There needs to be a file lock here, otherwise subsequent clone_image
requests will fail if the first request is still pending.
"""
lcfg = self.configuration
cachevol_name = 'os-cache-vol-%s' % img_meta['id']
cachesnap_name = 'image-%s' % img_meta['id']
cachevol_meta = {
'cache_name': cachevol_name,
'snap_name': cachesnap_name,
}
cachevol_props.update(cachevol_meta)
cache_vol, cache_snap = None, None
updated_at = six.text_type(img_meta['updated_at'].isoformat())
LOG.debug('Verifying cache volume %s:', cachevol_name)
try:
cache_vol = self.zfssa.get_lun(lcfg.zfssa_pool,
lcfg.zfssa_cache_project,
cachevol_name)
cache_snap = self.zfssa.get_lun_snapshot(lcfg.zfssa_pool,
lcfg.zfssa_cache_project,
cachevol_name,
cachesnap_name)
except exception.VolumeNotFound:
# There is no existing cache volume, create one:
return self._create_cache_volume(context,
img_meta,
img_service,
specs,
cachevol_props)
except exception.SnapshotNotFound:
exception_msg = (_('Cache volume %(cache_vol)s'
'does not have snapshot %(cache_snap)s.'),
{'cache_vol': cachevol_name,
'cache_snap': cachesnap_name})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
# A cache volume does exist, check if it's updated:
if ((cache_vol['updated_at'] != updated_at) or
(cache_vol['image_id'] != img_meta['id'])):
# The cache volume is updated, but has clones:
if cache_snap['numclones'] > 0:
exception_msg = (_('Cannot delete '
'cache volume: %(cachevol_name)s. '
'It was updated at %(updated_at)s '
'and currently has %(numclones)s '
'volume instances.'),
{'cachevol_name': cachevol_name,
'updated_at': updated_at,
'numclones': cache_snap['numclones']})
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
# The cache volume is updated, but has no clone, so we delete it
# and re-create a new one:
self.zfssa.delete_lun(lcfg.zfssa_pool,
lcfg.zfssa_cache_project,
cachevol_name)
return self._create_cache_volume(context,
img_meta,
img_service,
specs,
cachevol_props)
return cachevol_name, cachesnap_name
def _create_cache_volume(self, context, img_meta,
img_service, specs, cachevol_props):
"""Create a cache volume from an image.
Returns names of the cache volume and its snapshot.
"""
lcfg = self.configuration
cachevol_size = int(cachevol_props['size'])
lunsize = "%sg" % six.text_type(cachevol_size)
lun_props = {
'custom:image_id': img_meta['id'],
'custom:updated_at': (
six.text_type(img_meta['updated_at'].isoformat())),
}
lun_props.update(specs)
cache_vol = {
'name': cachevol_props['cache_name'],
'id': img_meta['id'],
'size': cachevol_size,
}
LOG.debug('Creating cache volume %s.', cache_vol['name'])
try:
self.zfssa.create_lun(lcfg.zfssa_pool,
lcfg.zfssa_cache_project,
cache_vol['name'],
lunsize,
lcfg.zfssa_target_group,
lun_props)
super(ZFSSAISCSIDriver, self).copy_image_to_volume(context,
cache_vol,
img_service,
img_meta['id'])
self.zfssa.create_snapshot(lcfg.zfssa_pool,
lcfg.zfssa_cache_project,
cache_vol['name'],
cachevol_props['snap_name'])
except Exception as exc:
exc_msg = (_('Fail to create cache volume %(volume)s. '
'Error: %(err)s'),
{'volume': cache_vol['name'],
'err': six.text_type(exc)})
LOG.error(exc_msg)
self.zfssa.delete_lun(lcfg.zfssa_pool,
lcfg.zfssa_cache_project,
cache_vol['name'])
raise exception.VolumeBackendAPIException(data=exc_msg)
return cachevol_props['cache_name'], cachevol_props['snap_name']
def local_path(self, volume):
"""Not implemented."""
pass
def backup_volume(self, context, backup, backup_service):
"""Not implemented."""
pass
def restore_backup(self, context, backup, volume, backup_service):
"""Not implemented."""
pass
def _verify_clone_size(self, snapshot, size):
"""Check whether the clone size is the same as the parent volume."""
lcfg = self.configuration
lun = self.zfssa.get_lun(lcfg.zfssa_pool,
lcfg.zfssa_project,
snapshot['volume_name'])
return lun['size'] == size
def initialize_connection(self, volume, connector):
lcfg = self.configuration
init_groups = self.zfssa.get_initiator_initiatorgroup(
connector['initiator'])
if ((lcfg.zfssa_enable_local_cache is True) and
(volume['name'].startswith('os-cache-vol-'))):
project = lcfg.zfssa_cache_project
else:
project = lcfg.zfssa_project
for initiator_group in init_groups:
self.zfssa.set_lun_initiatorgroup(lcfg.zfssa_pool,
project,
volume['name'],
initiator_group)
iscsi_properties = {}
provider = self._get_provider_info(volume)
(target_portal, iqn, lun) = provider['provider_location'].split()
iscsi_properties['target_discovered'] = False
iscsi_properties['target_portal'] = target_portal
iscsi_properties['target_iqn'] = iqn
iscsi_properties['target_lun'] = lun
iscsi_properties['volume_id'] = volume['id']
if 'provider_auth' in provider:
(auth_method, auth_username, auth_password) = provider[
'provider_auth'].split()
iscsi_properties['auth_method'] = auth_method
iscsi_properties['auth_username'] = auth_username
iscsi_properties['auth_password'] = auth_password
return {
'driver_volume_type': 'iscsi',
'data': iscsi_properties
}
def terminate_connection(self, volume, connector, **kwargs):
"""Driver entry point to terminate a connection for a volume."""
LOG.debug('terminate_connection: volume name: %s.', volume['name'])
lcfg = self.configuration
project = lcfg.zfssa_project
if ((lcfg.zfssa_enable_local_cache is True) and
(volume['name'].startswith('os-cache-vol-'))):
project = lcfg.zfssa_cache_project
self.zfssa.set_lun_initiatorgroup(lcfg.zfssa_pool,
project,
volume['name'],
'')
def _get_voltype_specs(self, volume):
"""Get specs suitable for volume creation."""
vtype = volume.get('volume_type_id', None)
extra_specs = None
if vtype:
extra_specs = volume_types.get_volume_type_extra_specs(vtype)
return self._get_specs(extra_specs)
def _get_specs(self, xspecs):
"""Return a dict with extra specs and/or config values."""
result = {}
for spc in ZFSSA_LUN_SPECS:
val = None
prop = spc.split(':')[1]
cfg = 'zfssa_lun_' + prop
if xspecs:
val = xspecs.pop(spc, None)
if val is None:
val = self.configuration.safe_get(cfg)
if val is not None and val != '':
result.update({prop: val})
return result
def migrate_volume(self, ctxt, volume, host):
LOG.debug('Attempting ZFSSA enabled volume migration. volume: %(id)s, '
'host: %(host)s, status=%(status)s.',
{'id': volume['id'],
'host': host,
'status': volume['status']})
lcfg = self.configuration
default_ret = (False, None)
if volume['status'] != "available":
LOG.debug('Only available volumes can be migrated using backend '
'assisted migration. Defaulting to generic migration.')
return default_ret
if (host['capabilities']['vendor_name'] != 'Oracle' or
host['capabilities']['storage_protocol'] != self.protocol):
LOG.debug('Source and destination drivers need to be Oracle iSCSI '
'to use backend assisted migration. Defaulting to '
'generic migration.')
return default_ret
if 'location_info' not in host['capabilities']:
LOG.debug('Could not find location_info in capabilities reported '
'by the destination driver. Defaulting to generic '
'migration.')
return default_ret
loc_info = host['capabilities']['location_info']
try:
(tgt_host, auth_str, tgt_pool, tgt_project, tgt_tgtgroup,
tgt_repl_ip) = loc_info.split(':')
except ValueError:
LOG.error(_LE("Location info needed for backend enabled volume "
"migration not in correct format: %s. Continuing "
"with generic volume migration."), loc_info)
return default_ret
if tgt_repl_ip == '':
msg = _LE("zfssa_replication_ip not set in cinder.conf. "
"zfssa_replication_ip is needed for backend enabled "
"volume migration. Continuing with generic volume "
"migration.")
LOG.error(msg)
return default_ret
src_pool = lcfg.zfssa_pool
src_project = lcfg.zfssa_project
try:
LOG.info(_LI('Connecting to target host: %s for backend enabled '
'migration.'), tgt_host)
self.tgt_zfssa.set_host(tgt_host)
self.tgt_zfssa.login(auth_str)
# Verify that the replication service is online
try:
self.zfssa.verify_service('replication')
self.tgt_zfssa.verify_service('replication')
except exception.VolumeBackendAPIException:
return default_ret
# ensure that a target group by the same name exists on the target
# system also, if not, use default migration.
lun = self.zfssa.get_lun(src_pool, src_project, volume['name'])
if lun['targetgroup'] != tgt_tgtgroup:
return default_ret
tgt_asn = self.tgt_zfssa.get_asn()
src_asn = self.zfssa.get_asn()
# verify on the source system that the destination has been
# registered as a replication target
tgts = self.zfssa.get_replication_targets()
targets = []
for target in tgts['targets']:
if target['asn'] == tgt_asn:
targets.append(target)
if targets == []:
LOG.debug('Target host: %(host)s for volume migration '
'not configured as a replication target '
'for volume: %(vol)s.',
{'host': tgt_repl_ip,
'vol': volume['name']})
return default_ret
# Multiple ips from the same appliance may be configured
# as different targets
for target in targets:
if target['address'] == tgt_repl_ip + ':216':
break
if target['address'] != tgt_repl_ip + ':216':
LOG.debug('Target with replication ip: %s not configured on '
'the source appliance for backend enabled volume '
'migration. Proceeding with default migration.',
tgt_repl_ip)
return default_ret
flow = lf.Flow('zfssa_volume_migration').add(
MigrateVolumeInit(),
MigrateVolumeCreateAction(provides='action_id'),
MigrateVolumeSendReplUpdate(),
MigrateVolumeSeverRepl(),
MigrateVolumeMoveVol(),
MigrateVolumeCleanUp()
)
taskflow.engines.run(flow,
store={'driver': self,
'tgt_zfssa': self.tgt_zfssa,
'tgt_pool': tgt_pool,
'tgt_project': tgt_project,
'volume': volume, 'tgt_asn': tgt_asn,
'src_zfssa': self.zfssa,
'src_asn': src_asn,
'src_pool': src_pool,
'src_project': src_project,
'target': target})
return(True, None)
except Exception:
LOG.error(_LE("Error migrating volume: %s"), volume['name'])
raise
def update_migrated_volume(self, ctxt, volume, new_volume,
original_volume_status):
"""Return model update for migrated volume.
:param volume: The original volume that was migrated to this backend
:param new_volume: The migration volume object that was created on
this backend as part of the migration process
:param original_volume_status: The status of the original volume
:return model_update to update DB with any needed changes
"""
lcfg = self.configuration
original_name = CONF.volume_name_template % volume['id']
current_name = CONF.volume_name_template % new_volume['id']
LOG.debug('Renaming migrated volume: %(cur)s to %(org)s',
{'cur': current_name,
'org': original_name})
self.zfssa.set_lun_props(lcfg.zfssa_pool, lcfg.zfssa_project,
current_name, name=original_name)
return {'_name_id': None}
@utils.synchronized('zfssaiscsi', external=True)
def _check_origin(self, lun, volname):
"""Verify the cache volume of a bootable volume.
If the cache no longer has clone, it will be deleted.
There is a small lag between the time a clone is deleted and the number
of clones being updated accordingly. There is also a race condition
when multiple volumes (clones of a cache volume) are deleted at once,
leading to the number of clones reported incorrectly. The file lock is
here to avoid such issues.
"""
lcfg = self.configuration
cache = lun['origin']
numclones = -1
if (cache['snapshot'].startswith('image-') and
cache['share'].startswith('os-cache-vol')):
try:
numclones = self.zfssa.num_clones(lcfg.zfssa_pool,
lcfg.zfssa_cache_project,
cache['share'],
cache['snapshot'])
except Exception:
LOG.debug('Cache volume is already deleted.')
return
LOG.debug('Checking cache volume %(name)s, numclones = %(clones)d',
{'name': cache['share'], 'clones': numclones})
# Sometimes numclones still hold old values even when all clones
# have been deleted. So we handle this situation separately here:
if numclones == 1:
try:
self.zfssa.get_lun(lcfg.zfssa_pool,
lcfg.zfssa_project,
volname)
# The volume does exist, so return
return
except exception.VolumeNotFound:
# The volume is already deleted
numclones = 0
if numclones == 0:
self.zfssa.delete_lun(lcfg.zfssa_pool,
lcfg.zfssa_cache_project,
cache['share'])
class MigrateVolumeInit(task.Task):
def execute(self, src_zfssa, volume, src_pool, src_project):
LOG.debug('Setting inherit flag on source backend to False.')
src_zfssa.edit_inherit_replication_flag(src_pool, src_project,
volume['name'], set=False)
def revert(self, src_zfssa, volume, src_pool, src_project, **kwargs):
LOG.debug('Rollback: Setting inherit flag on source appliance to '
'True.')
src_zfssa.edit_inherit_replication_flag(src_pool, src_project,
volume['name'], set=True)
class MigrateVolumeCreateAction(task.Task):
def execute(self, src_zfssa, volume, src_pool, src_project, target,
tgt_pool):
LOG.debug('Creating replication action on source appliance.')
action_id = src_zfssa.create_replication_action(src_pool,
src_project,
target['label'],
tgt_pool,
volume['name'])
self._action_id = action_id
return action_id
def revert(self, src_zfssa, **kwargs):
if hasattr(self, '_action_id'):
LOG.debug('Rollback: deleting replication action on source '
'appliance.')
src_zfssa.delete_replication_action(self._action_id)
class MigrateVolumeSendReplUpdate(task.Task):
def execute(self, src_zfssa, action_id):
LOG.debug('Sending replication update from source appliance.')
src_zfssa.send_repl_update(action_id)
LOG.debug('Deleting replication action on source appliance.')
src_zfssa.delete_replication_action(action_id)
self._action_deleted = True
class MigrateVolumeSeverRepl(task.Task):
def execute(self, tgt_zfssa, src_asn, action_id, driver):
source = tgt_zfssa.get_replication_source(src_asn)
if not source:
err = (_('Source with host ip/name: %s not found on the '
'target appliance for backend enabled volume '
'migration, procedding with default migration.'),
driver.configuration.san_ip)
LOG.error(err)
raise exception.VolumeBackendAPIException(data=err)
LOG.debug('Severing replication package on destination appliance.')
tgt_zfssa.sever_replication(action_id, source['name'],
project=action_id)
class MigrateVolumeMoveVol(task.Task):
def execute(self, tgt_zfssa, tgt_pool, tgt_project, action_id, volume):
LOG.debug('Moving LUN to destination project on destination '
'appliance.')
tgt_zfssa.move_volume(tgt_pool, action_id, volume['name'], tgt_project)
LOG.debug('Deleting temporary project on destination appliance.')
tgt_zfssa.delete_project(tgt_pool, action_id)
self._project_deleted = True
def revert(self, tgt_zfssa, tgt_pool, tgt_project, action_id, volume,
**kwargs):
if not hasattr(self, '_project_deleted'):
LOG.debug('Rollback: deleting temporary project on destination '
'appliance.')
tgt_zfssa.delete_project(tgt_pool, action_id)
class MigrateVolumeCleanUp(task.Task):
def execute(self, driver, volume, tgt_zfssa):
LOG.debug('Finally, delete source volume on source appliance.')
driver.delete_volume(volume)
tgt_zfssa.logout()
|
|
"""The core job_stream adapter in Python. This file is responsible for
providing adaptations for all of the needed C++ code.
.. warning::
New users should look at using the :mod:`job_stream.inline` module instead
of the classes in this file.
Example usage:
.. code-block:: python
from job_stream import common
class AddOne(common.Job):
def handleWork(self, work):
self.emit(work + 1)
common.work.extend([ 8, 9 ])
common.run({
'jobs': [
{ 'type': AddOne }
]
})
# 9 and 10 will be printed
Or:
r = job_stream.map([8, 9], lambda w: w+1)
print(r)
# [ 9, 10 ] will be printed
"""
import _job_stream as _j
import multiprocessing
import os
import pickle
import six
import sys
import threading
import traceback
# Allow self-referencing
moduleSelf = globals()
import job_stream
# Classes waiting for _patchForMultiprocessing. We wait until _pool is initiated
# so that A) classes inheriting from one another are rewritten backwards so that they
# execute the original method, not the override, and B) so that their methods may be
# updated between class definition and job_stream.run()
_classesToPatch = []
_pool = [ None ]
_poolLock = threading.Lock()
def _initMultiprocessingPool():
"""The multiprocessing pool is initialized lazily by default, to avoid
overhead if no jobs are using multiprocessing"""
if _pool[0] is None:
with _poolLock:
if _pool[0] is None:
def initProcess():
if 'numpy.random' in sys.modules:
sys.modules['numpy.random'].seed()
_pool[0] = multiprocessing.Pool(processes=_j.getHostCpuCount(),
initializer=initProcess)
def _decode(s):
"""Decodes an object with cPickle"""
return pickle.loads(s)
def _encode(o):
"""Encodes an object with cPickle"""
return pickle.dumps(o, pickle.HIGHEST_PROTOCOL)
class Object(object):
"""A generic object with no attributes of its own."""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __repr__(self):
r = [ 'job_stream.Object(' ]
for k, v in self.__dict__.items():
r.append('{}={}, '.format(k, repr(v)))
r.append(')')
return ''.join(r)
class _StackAlreadyPrintedError(Exception):
"""An exception to be used if the stack trace has already been printed,
and an exception needs to be raised just to communicate to job_stream to
abort."""
# Initialize the encode and decode values first so that they can be used in
# debug code (if left uninitialized, any attempt to pickle something from within
# C++ code will crash with NoneType cannot be called)
# NOTE: With Python3, leaving these handles alive causes a SIGSEGV. Therefore,
# this happens in run().
# _j.registerEncoding(Object, _StackAlreadyPrintedError, _encode, _decode)
class _Work(list):
"""List of initial work sent into job_stream.
If left empty, work comes from stdin."""
work = _Work()
_localJobs = {}
_localJobId = [ 0 ]
def _localJobInit(obj):
_localJobs[obj.id] = obj
if hasattr(obj, 'emit'):
obj.emit = lambda *args: obj.emitters.append(args)
if hasattr(obj, 'recur'):
obj.recur = lambda *args: obj.recurs.append(args)
obj._forceCheckpoint = lambda *args: obj.forceCheckpoints.append(args)
def obj_reset():
obj.emitters = []
obj.recurs = []
obj.forceCheckpoints = []
obj._resetLocalJob = obj_reset
# Now call postSetup. Note that since we haven't called reset() yet, none of the
# arrays exist and so emit(), recur(), and _forceCheckpoint() will all crash
try:
obj.mPostSetup()
except:
traceback.print_exc()
raise _StackAlreadyPrintedError()
def _localCallNoStore(obj, method, *args):
if obj not in _localJobs:
return (0,)
o = _localJobs[obj]
o._resetLocalJob()
try:
_j._timerStart()
try:
getattr(o, method)(*args)
finally:
times = _j._timerPop()
except:
traceback.print_exc()
raise _StackAlreadyPrintedError()
return (1, o.emitters, o.recurs, o.forceCheckpoints, times)
def _localCallStoreFirst(obj, method, first, *args):
if obj not in _localJobs:
return (0,)
o = _localJobs[obj]
o._resetLocalJob()
try:
_j._timerStart()
try:
getattr(o, method)(first, *args)
finally:
times = _j._timerPop()
except:
traceback.print_exc()
raise _StackAlreadyPrintedError()
return (1, first, o.emitters, o.recurs, o.forceCheckpoints, times)
def _callNoStore(obj, method, *args):
_initMultiprocessingPool()
while True:
r = _pool[0].apply(_localCallNoStore, args = (obj.id, method) + args)
if r[0] == 0:
_pool[0].map(_localJobInit, [ obj ] * _pool[0]._processes)
else:
break
for eArgs in r[1]:
obj.emit(*eArgs)
for rArgs in r[2]:
obj.recur(*rArgs)
for fArgs in r[3]:
obj._forceCheckpoint(*fArgs)
# Result is the user (wallTime, cpuTime)
return r[4]
def _callStoreFirst(obj, method, first, *args):
_initMultiprocessingPool()
while True:
r = _pool[0].apply(_localCallStoreFirst,
args = (obj.id, method, first) + args)
if r[0] == 0:
_pool[0].map(_localJobInit, [ obj ] * _pool[0]._processes)
else:
break
first.__dict__ = r[1].__dict__
for eArgs in r[2]:
obj.emit(*eArgs)
for rArgs in r[3]:
obj.recur(*rArgs)
for fArgs in r[4]:
obj._forceCheckpoint(*fArgs)
# Result is the user (wallTime, cpuTime)
return r[5]
def _hierarchicalName(cls, name = None):
fullname = cls.__module__
n = name or cls.__name__
if fullname == '__main__':
fullname = n
else:
fullname += '.' + n
return fullname
class _Job__metaclass__(type(_j.Job)):
def __init__(cls, name, bases, attrs):
type(_j.Job).__init__(cls, name, bases, attrs)
# Derived hierarchical name, use that in config
fullname = _hierarchicalName(cls, name)
# Metaclasses are called for their first rendition as well, so...
if fullname == 'job_stream.common.Job':
return
_classesToPatch.append(cls)
_j.registerJob(fullname, cls)
class Job(six.with_metaclass(_Job__metaclass__, _j.Job)):
"""Base class for a standard job (starts with some work, and emits zero or
more times). Handles registration of job class with the job_stream
system.
Example:
import job_stream
class MyJob(job_stream.Job):
'''Adds 8 to an integer or floating point number'''
def handleWork(self, work):
self.emit(work + 8)
job_stream.common.work = [ 1, 2, 3.0 ]
# This will print 9, 10, and 11.0
job_stream.run({ 'jobs': [ MyJob ] })
"""
USE_MULTIPROCESSING = True
USE_MULTIPROCESSING_doc = """If True [default {}], job_stream automatically handles
overloading the class' methods and serializing everything so that the GIL is
circumvented. While this defaults to True as it is low overhead, lots of jobs
do not need multiprocessing if they are using other python libraries or operations
that release the GIL.""".format(USE_MULTIPROCESSING)
@classmethod
def _patchForMultiprocessing(cls):
if hasattr(cls, '_MULTIPROCESSING_PATCHED'):
return
cls._MULTIPROCESSING_PATCHED = True
def newInit(self):
super(cls, self).__init__()
self.id = _localJobId[0]
_localJobId[0] += 1
cls.__init__ = newInit
cls.mHandleWork = cls.handleWork
cls.handleWork = lambda self, *args: _callNoStore(self, "mHandleWork",
*args)
# We do not call postSetup when job_stream requests it. This is because
# our jobs must be set up in each thread, so we defer until it is called
# in a thread.
cls.mPostSetup = cls.postSetup
cls.postSetup = lambda self: True
def postSetup(self):
"""Called when self.config is set and the Job is fully ready for work,
but before any work is accepted."""
pass
def handleWork(self, work):
"""Handle incoming work, maybe call self.emit() to generate more work
for jobs further down the pipe."""
raise NotImplementedError()
class _Reducer__metaclass__(type(_j.Reducer)):
def __init__(cls, name, bases, attrs):
type(_j.Reducer).__init__(cls, name, bases, attrs)
# Derived hierarchical name, use that in config
fullname = _hierarchicalName(cls, name)
# Metaclasses are called for their first rendition as well, so...
if fullname == 'job_stream.common.Reducer':
return
_classesToPatch.append(cls)
_j.registerReducer(fullname, cls)
class Reducer(six.with_metaclass(_Reducer__metaclass__, _j.Reducer)):
"""Base class for a Reducer. A Reducer combines work emitted from the last
stage of a reduction, eventually emitting its own result to the next link
in the processing chain. A reduction starts when a piece of work enters
a module guarded by a Reducer.
Example:
import job_stream
class AddLetterA(job_stream.Job):
def handleWork(self, w):
self.emit(w + 'A')
class CountLetters(job_stream.Reducer):
'''Counts the number of letters passed to it'''
def handleInit(self, store):
store.count = 0
def handleAdd(self, store, work):
store.count += len(work)
def handleJoin(self, store, other):
store.count += other.count
def handleDone(self, store):
self.emit(store.count)
job_stream.common.work = [ 'Hello', 'World' ]
# Here the reduction starts at the global scope, so it will print out 12,
# which is the original 10 letters plus the two new letter A's.
print("First:")
job_stream.run({
'reducer': CountLetters,
'jobs': [ AddLetterA ]
})
# This config has the reduction starting as part of the first job rather
# than the global scope, so this will print 6 twice (once for each work that
# we initially passed in).
print("Second:")
job_stream.run({
'jobs': [
{
'reducer': CountLetters,
'jobs': [ AddLetterA ]
}
]
})
"""
USE_MULTIPROCESSING = Job.USE_MULTIPROCESSING
USE_MULTIPROCESSING_doc = Job.USE_MULTIPROCESSING_doc
@classmethod
def _patchForMultiprocessing(cls):
if hasattr(cls, '_MULTIPROCESSING_PATCHED'):
return
cls._MULTIPROCESSING_PATCHED = True
def newInit(self):
super(cls, self).__init__()
self.id = _localJobId[0]
_localJobId[0] += 1
cls.__init__ = newInit
for oldName in [ 'handleInit', 'handleAdd', 'handleJoin', 'handleDone' ]:
newName = 'm' + oldName[0].upper() + oldName[1:]
setattr(cls, newName, getattr(cls, oldName))
closure = lambda newName: lambda self, *args: _callStoreFirst(self,
newName, *args)
setattr(cls, oldName, closure(newName))
# We do not call postSetup when job_stream requests it. This is because
# our jobs must be set up in each thread, so we defer until it is called
# in a thread.
cls.mPostSetup = cls.postSetup
cls.postSetup = lambda self: True
def postSetup(self):
"""Called when self.config is set and the Job is fully ready for work,
but before any work is accepted."""
pass
def handleInit(self, store):
"""Called when a reduction is started. Store is a python object that
should be modified to remember information between invocations."""
def handleAdd(self, store, work):
"""Called when new work arrives at the Reducer."""
raise NotImplementedError()
def handleJoin(self, store, other):
"""Called to merge two stores from the same Reducer."""
raise NotImplementedError()
def handleDone(self, store):
"""Called when the reduction is finished. The reduction will be marked
as unfinished if a recur() happens."""
raise NotImplementedError()
class _Frame__metaclass__(type(_j.Frame)):
def __init__(cls, name, bases, attrs):
# Derived hierarchical name, use that in config
fullname = _hierarchicalName(cls, name)
# Metaclasses are called for their first rendition as well, so...
if fullname == 'job_stream.common.Frame':
return
_classesToPatch.append(cls)
_j.registerFrame(fullname, cls)
class Frame(six.with_metaclass(_Frame__metaclass__, _j.Frame)):
"""Base class for a Frame. A Frame is a special type of reducer that
performs some special behavior based on the work that begins the reduction.
Typically this is used for checking termination conditions in a recurring
algorithm:
import job_stream
class AddAb(job_stream.Job):
def handleWork(self, w):
self.emit(w + 'Ab')
class MakeAtLeastTenLetters(job_stream.Frame):
def handleFirst(self, store, w):
store.word = w
def handleNext(self, store, w):
store.word = w
def handleDone(self, store):
if len(store.word) < 10:
self.recur(store.word)
else:
self.emit(store.word)
job_stream.common.work = [ 'abracadabra', 'Hey', 'Apples' ]
# This'll print out the unmodified abracadabra, add two Ab's to Apples, and
# four Ab's to Hey
job_stream.common.run({
'jobs': [ {
'frame': MakeAtLeastTenLetters,
'jobs': [ AddAb ]
} ]
})
"""
USE_MULTIPROCESSING = Job.USE_MULTIPROCESSING
USE_MULTIPROCESSING_doc = Job.USE_MULTIPROCESSING_doc
@classmethod
def _patchForMultiprocessing(cls):
if hasattr(cls, '_MULTIPROCESSING_PATCHED'):
return
cls._MULTIPROCESSING_PATCHED = True
def newInit(self):
super(cls, self).__init__()
self.id = _localJobId[0]
_localJobId[0] += 1
cls.__init__ = newInit
for oldName in [ 'handleFirst', 'handleNext', 'handleDone' ]:
newName = 'm' + oldName[0].upper() + oldName[1:]
setattr(cls, newName, getattr(cls, oldName))
closure = lambda newName: lambda self, *args: _callStoreFirst(self,
newName, *args)
setattr(cls, oldName, closure(newName))
# We do not call postSetup when job_stream requests it. This is because
# our jobs must be set up in each thread, so we defer until it is called
# in a thread.
cls.mPostSetup = cls.postSetup
cls.postSetup = lambda self: True
def handleFirst(self, store, work):
"""Called for the first work, which starts a reduction. Store is an
empty Object() to which this method may assign attributes."""
raise NotImplementedError()
def handleNext(self, store, work):
"""Called when finished work arrives at the Frame."""
raise NotImplementedError()
def handleDone(self, store):
"""Called when the reduction is finished. The reduction will be marked
as unfinished if a recur() happens."""
raise NotImplementedError()
def postSetup(self):
"""Called when self.config is set and the Frame is fully ready for work,
but before any work is accepted."""
def _convertDictToYaml(c):
levels = [ { 'type': dict, 'vals': iter(c.items()) } ]
result = []
def cueLine():
result.append(" " * (len(levels) - 1))
while levels:
try:
val = next(levels[-1]['vals'])
except StopIteration:
levels.pop()
continue
cueLine()
if levels[-1]['type'] == dict:
key, dval = val
result.append(key)
result.append(": ")
val = dval
elif levels[-1]['type'] == list:
result.append("- ")
# Now, the value part
if isinstance(val, dict):
# MUST be sorted, otherwise checkpoint files can break since the
# config files will not match.
levels.append({ 'type': dict, 'vals': iter(sorted(val.items())) })
elif isinstance(val, list):
if len(val) == 0:
result.append("[]")
else:
levels.append({ 'type': list, 'vals': iter(val) })
elif isinstance(val, (int, float, str)):
result.append(str(val))
elif isinstance(val, type) and issubclass(val, (Frame, Job, Reducer)):
result.append(_hierarchicalName(val))
else:
raise ValueError("Unrecognized YAML object: {}: {}".format(key,
val))
result.append("\n")
result = ''.join(result)
return result
checkpointInfo = _j.checkpointInfo
def _cpuThreadTime():
"""Returns the current thread's CPU time in seconds. Used for tests of
profiling, mostly.
"""
return _j._cpuThreadTimeMs() * 0.001
def getCpuCount():
"""Retunrs the number of CPUs in the cluster. Must be called within a
job_stream, or will raise an error.
"""
return _j.getCpuCount()
def getHostCpuCount():
"""Returns the number of CPUs on this host. Must be called within a
job_stream, or will raise an error.
"""
return _j.getHostCpuCount()
def getRank():
"""Returns the rank (integer index) of this processor. Typically, this
value is checked against 0 for work that should only happen once, e.g.
init code."""
return _j.getRank()
def invoke(progAndArgs, transientErrors = [], maxRetries = 20):
"""Since it can be difficult to launch some programs from an MPI distributed
application, job_stream provides invoke functionality to safely launch an
external program (launching an application such as Xyce, for instance, can
cause problems if the environment variables are not doctored appropriately).
progAndArgs: list, [ 'path/to/file', *args ]
transientErrors: list of strings, if any of these strings are found in the
stderr of the program, then any non-zero return code is considered
a transient error and the application will be re-launched up to
maxRetries times.
Note that "No child processes" is automatically handled as
transient.
maxRetries: The maximum number of times to run the application if there are
transient errors. Only the final (successful) results are returned.
Returns: (contents of stdout, contents of stderr)
"""
return _j.invoke(progAndArgs, transientErrors, maxRetries)
def map(func, *sequence, **kwargs):
"""Returns [ func(*a) for a in sequence ] in a parallel manner. Identical
to the builtin map(), except parallelized.
kwargs - Passed to job_stream.run()."""
job_stream.common.work = _Work()
job_stream.common.work.extend([ (i, a)
for i, a in enumerate(zip(*sequence)) ])
result = [ None for _ in range(len(job_stream.common.work)) ]
def handleWork(s, work):
s.emit((work[0], func(*work[1])))
mapCls = type("_map__MapJob__", (Job,), dict(handleWork=handleWork))
moduleSelf[mapCls.__name__] = mapCls
def _mapResult(w):
result[w[0]] = w[1]
run(
{ 'jobs': [
{ 'type': mapCls }
]},
handleResult = _mapResult,
**kwargs
)
return result
_hasRun = [ False, False ]
def run(configDictOrPath, **kwargs):
"""Runs the given YAML file or config dictionary.
Acceptable kwargs:
checkpointFile - (string) The file to use for checkpoint / restore
checkpointInterval - (float) The time between the completion of one
checkpoint and the starting of the next, in seconds.
checkpointSyncInterval - (float) The time between all processors
thinking they're ready to checkpoint and the actual checkpoint.
handleResult - (callable) The default is to print out repr(result). If
specified, this function will be called instead with the output
work as an argument. Note that this goes outside of checkpointing!
If you are saving work into an array, for example, and want to be
checkpoint-safe, this method MUST save what it needs to file.
"""
if isinstance(configDictOrPath, str):
# Path to file
config = open(configDictOrPath).read()
elif isinstance(configDictOrPath, dict):
config = _convertDictToYaml(configDictOrPath)
else:
raise ValueError("configDictOrPath was not dict or filename!")
for cls in reversed(_classesToPatch):
if cls.USE_MULTIPROCESSING:
cls._patchForMultiprocessing()
if 'handleResult' not in kwargs:
def handleResult(r):
"""Process an output work. Note that this function is responsible for
checkpointing!
"""
print(repr(r))
else:
handleResult = kwargs.pop('handleResult')
if 'checkpointFile' in kwargs and not kwargs['checkpointFile']:
kwargs.pop('checkpointFile')
# Flag as having been run before; multiple job_streams plus checkpoints do
# not mix well!
withCheckpoint = False
if 'checkpointFile' in kwargs or 'JOBS_CHECKPOINT' in os.environ:
withCheckpoint = True
_hasRun[1] = _hasRun[1] or withCheckpoint
if _hasRun[0] and _hasRun[1]:
raise ValueError("Cannot run more than one job_stream when using "
"checkpoints; there is no suitable control mechanism for "
"resolving which checkpoint belongs to which job_stream.")
_hasRun[0] = True
try:
_j.registerEncoding(Object, _StackAlreadyPrintedError, _encode,
_decode)
_j.runProcessor(config, list(job_stream.common.work), handleResult,
**kwargs)
finally:
_j.registerEncoding(None, None, None, None)
# Close our multiprocessing pool; especially in the interpreter, the
# pool must be launched AFTER all classes are defined. So if we define
# a class in between invocations of run(), we still want them to work
if _pool[0] is not None:
p = _pool[0]
_pool[0] = None
p.terminate()
p.join()
|
|
"""
Test suite for the selenium test cases
"""
import os
import pytest
import time
from django.urls import reverse
from tests import BaseSeleniumTest
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
@pytest.mark.online
@pytest.mark.selenium
class TestBulkEolCheckFunction(BaseSeleniumTest):
def test_optional_product_migration_entry(self, browser, liveserver):
self.api_helper.drop_all_data(liveserver)
self.api_helper.load_base_test_data(liveserver)
# open the new Product Check page
browser.get(liveserver + reverse("productdb:create-product_check"))
browser.find_element_by_id("navbar_login").click()
homepage_message = "New Product Check"
self.login_user(browser, self.API_USERNAME, self.API_PASSWORD, homepage_message)
# the page contains a text field, where the product IDs must be entered
expected_text = "On this page, you can execute a bulk Product check of multiple Products against the local " \
"database. Please enter a list of Product IDs in the following text field separated by line " \
"breaks, e.g."
self.wait_for_text_to_be_displayed_in_body_tag(browser, expected_text)
assert "Migration source" not in browser.find_element_by_tag_name('body').text
# enable optional product migration source selection
browser.find_element_by_id("navbar_loggedin").click()
browser.find_element_by_id("navbar_loggedin_user_profile").click()
self.wait_for_text_to_be_displayed_in_body_tag(browser, "Edit User Profile")
browser.find_element_by_id("id_choose_migration_source").click()
browser.find_element_by_id("submit").click()
time.sleep(3)
# open the bulk eol check page
browser.get(liveserver + reverse("productdb:create-product_check"))
# the page contains a text field, where the product IDs must be entered
expected_text = "On this page, you can execute a bulk Product check of multiple Products against the local " \
"database. Please enter a list of Product IDs in the following text field separated by line " \
"breaks, e.g."
self.wait_for_text_to_be_displayed_in_body_tag(browser, expected_text)
assert "Migration source" in browser.find_element_by_tag_name('body').text
# end session
browser.get(liveserver + reverse("logout"))
def test_with_valid_query(self, browser, liveserver, test_download_dir):
self.api_helper.drop_all_data(liveserver)
self.api_helper.load_base_test_data(liveserver)
# open the new Product Check page
browser.get(liveserver + reverse("productdb:create-product_check"))
# the page contains a text field, where the product IDs must be entered
expected_text = "On this page, you can execute a bulk Product check of multiple Products against the local " \
"database. Please enter a list of Product IDs in the following text field separated by line " \
"breaks, e.g."
self.wait_for_text_to_be_displayed_in_body_tag(browser, expected_text)
# enter the query and submit (whitespace is stripped)
sample_eol_query = """WS-C2960-24LC-S
WS-C2960-24LC-S
WS-C2960-24LC-S
WS-C2960-24LC-S
WS-C2960-24LT-L
WS-C2960-24PC-S
WS-C2960X-24PD-L
WS-C2960X-24PD-L
WS-C2960X-24PD-L
MOH
WS-C2960-48PST-S
WS-C2960-24TC-L
MOH
WS-C2960-24TC-S
WS-C2960-24TT-L"""
browser.find_element_by_id("id_name").send_keys("Test")
browser.find_element_by_id("id_input_product_ids").send_keys(sample_eol_query)
browser.find_element_by_id("submit").click()
# verify result within the product summary table
expected_product_summary_row = "Cisco Systems WS-C2960-24LC-S 4 End of Support No"
expected_not_found_query = "MOH 2 Not found in Database --- --- ---"
# test that the Vendor Bulletin is not visible by default
assert "Vendor Bulletin" not in browser.find_element_by_tag_name("body").text
table = browser.find_element_by_class_name("table")
rows = table.find_elements_by_tag_name('tr')
assert expected_product_summary_row in [row.text for row in rows]
assert expected_not_found_query in [row.text for row in rows]
# scroll down
text_element = browser.find_element_by_class_name("alert-warning")
browser.execute_script("return arguments[0].scrollIntoView();", text_element)
# view the Vendor Bulletin
browser.find_element_by_xpath("//button[span='show additional columns ']").click()
browser.find_element_by_link_text("Vendor Bulletin").click()
browser.find_element_by_xpath("//button[span='show additional columns ']").send_keys(Keys.SPACE)
time.sleep(3)
WebDriverWait(browser, 10).until(EC.invisibility_of_element_located((
By.XPATH,
"//div[@class='dt-button-background']")
))
WebDriverWait(browser, 10).until(EC.element_to_be_clickable((
By.XPATH,
"//button[span='CSV']")
))
# test CSV download of the result table
browser.find_element_by_xpath("//button[span='CSV']").click()
time.sleep(5)
# The file should download automatically (firefox is configured this way)
# verify that the file is a CSV formatted field (with ";" as delimiter)
# verify that the second line contains a link (not the Bulletin number)
file = os.path.join(test_download_dir, "product check - Test.csv")
header_line = "Vendor;Product ID;Amount;Lifecycle State;Replacement Product ID;Replacement suggested by;" \
"Vendor Bulletin;LC auto-sync"
with open(file, "r", encoding="utf-8") as f:
content = f.read().splitlines()
assert header_line == content[0]
for line in content:
if "http://www.cisco.com/en/" in line:
break
else:
# no line matches, test failed
pytest.fail("expected content not found in file")
# test that the table view is stored
browser.execute_script("window.scrollTo(0, 0)")
time.sleep(1)
browser.find_element_by_id("_back").click()
self.wait_for_text_to_be_displayed_in_body_tag(browser, "All Product Checks")
time.sleep(2)
# go back to the product check view
browser.find_element_by_partial_link_text("Test").click()
time.sleep(5)
# test that the Vendor Bulletin is still visible (table state should persist)
assert "Vendor Bulletin" in browser.find_element_by_tag_name("body").text
# create new product check
browser.get(liveserver + reverse("productdb:create-product_check"))
browser.find_element_by_id("id_name").send_keys("Test")
browser.find_element_by_id("id_input_product_ids").send_keys(sample_eol_query)
browser.find_element_by_id("submit").click()
time.sleep(5)
# the new product check table should be displayed with the default options (without e.g. the Vendor Bulletin)
assert "Vendor Bulletin" not in browser.find_element_by_tag_name("body").text
def test_visible_of_product_checks(self, browser, liveserver):
self.api_helper.drop_all_data(liveserver)
self.api_helper.load_base_test_data(liveserver)
anonymous_product_check_name = "Public created Product Check"
private_product_check = "Private API User Product Check"
public_product_check = "Public API User Product Check"
sample_eol_query = """WS-C2960-24LC-S
WS-C2960-24LC-S
WS-C2960-24LC-S
WS-C2960-24LC-S
WS-C2960-24LT-L
WS-C2960-24PC-S
WS-C2960X-24PD-L
WS-C2960X-24PD-L
WS-C2960X-24PD-L
MOH
WS-C2960-48PST-S
WS-C2960-24TC-L
MOH
WS-C2960-24TC-S
WS-C2960-24TT-L"""
# open the new Product Check page
browser.get(liveserver + reverse("productdb:create-product_check"))
browser.find_element_by_id("id_name").send_keys(anonymous_product_check_name)
browser.find_element_by_id("id_input_product_ids").send_keys(sample_eol_query)
browser.find_element_by_id("submit").click()
time.sleep(2)
self.wait_for_text_to_be_displayed_in_body_tag(browser, "All Product Checks are")
# verify result
assert "All Product Checks are deleted every week on Sunday." in browser.find_element_by_tag_name("body").text
# verify list entries
browser.get(liveserver + reverse("productdb:list-product_checks"))
assert anonymous_product_check_name in browser.find_element_by_id("product_check_table").text
# login as API user
browser.get(liveserver + reverse("productdb:create-product_check"))
time.sleep(2)
browser.find_element_by_id("navbar_login").click()
time.sleep(2)
homepage_message = "New Product Check"
self.login_user(browser, self.API_USERNAME, self.API_PASSWORD, homepage_message)
# the page contains a text field, where the product IDs must be entered
expected_text = "On this page, you can execute a bulk Product check of multiple Products against the local " \
"database. Please enter a list of Product IDs in the following text field separated by line " \
"breaks, e.g."
self.wait_for_text_to_be_displayed_in_body_tag(browser, expected_text)
browser.find_element_by_id("id_name").send_keys(private_product_check)
browser.find_element_by_id("id_input_product_ids").send_keys(sample_eol_query)
browser.find_element_by_id("submit").click()
time.sleep(2)
self.wait_for_text_to_be_displayed_in_body_tag(browser, "All Product Checks are")
# verify result
assert "All Product Checks are deleted every week on Sunday." in browser.find_element_by_tag_name("body").text
# verify list entries
browser.get(liveserver + reverse("productdb:list-product_checks"))
assert private_product_check in browser.find_element_by_id("product_check_table").text
browser.get(liveserver + reverse("productdb:create-product_check"))
browser.find_element_by_id("id_name").send_keys(public_product_check)
browser.find_element_by_id("id_input_product_ids").send_keys(sample_eol_query)
browser.find_element_by_id("id_public_product_check").click()
browser.find_element_by_id("submit").click()
time.sleep(2)
self.wait_for_text_to_be_displayed_in_body_tag(browser, "All Product Checks are")
# verify result
assert "All Product Checks are deleted every week on Sunday." in browser.find_element_by_tag_name("body").text
browser.get(liveserver + reverse("productdb:list-product_checks"))
assert anonymous_product_check_name in browser.find_element_by_id("product_check_table").text
assert private_product_check in browser.find_element_by_id("product_check_table").text
assert public_product_check in browser.find_element_by_id("product_check_table").text
# logout
browser.find_element_by_id("navbar_loggedin").click()
browser.find_element_by_id("navbar_loggedin_logout").click()
time.sleep(3)
browser.get(liveserver + reverse("productdb:list-product_checks"))
# verify table entries
assert private_product_check not in browser.find_element_by_id("product_check_table").text
assert anonymous_product_check_name in browser.find_element_by_id("product_check_table").text
assert public_product_check in browser.find_element_by_id("product_check_table").text
# login as root (should also see only the public product checks)
browser.get(liveserver + reverse("productdb:list-product_checks"))
browser.find_element_by_id("navbar_login").click()
time.sleep(3)
homepage_message = "All Product Checks"
self.login_user(browser, self.ADMIN_USERNAME, self.ADMIN_PASSWORD, homepage_message)
# verify table entries
assert private_product_check not in browser.find_element_by_id("product_check_table").text
assert anonymous_product_check_name in browser.find_element_by_id("product_check_table").text
assert public_product_check in browser.find_element_by_id("product_check_table").text
|
|
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Jobs for statistics views."""
import ast
import collections
import datetime
from core import jobs
from core.domain import exp_services
from core.platform import models
(base_models, stats_models, exp_models,) = models.Registry.import_models([
models.NAMES.base_model, models.NAMES.statistics, models.NAMES.exploration
])
transaction_services = models.Registry.import_transaction_services()
import feconf
import utils
from google.appengine.ext import ndb
# Counts contributions from all versions.
_VERSION_ALL = 'all'
# Indicates that no version has been specified.
_VERSION_NONE = 'none'
# This date represents the date we stopped using StateCounterModel.
# This is here because StateCounterModel did not explicitly record
# a start event. It only used the hit count for the start state.
# This means that we need to figure out what the start state was
# during the StateCounterModel time period so that we can select the
# correct state hits to count as starts.
_STATE_COUNTER_CUTOFF_DATE = datetime.datetime(2014, 10, 11, 0, 0, 0)
# States with this name used to be treated as a pseudoend state, but are not
# anymore. This is kept here until the stats job may be updated to work with
# proper terminal states, rather than a hardcoded END pseudostate.
# TODO(bhenning): fix this
OLD_END_DEST = 'END'
class StatisticsRealtimeModel(
jobs.BaseRealtimeDatastoreClassForContinuousComputations):
num_starts = ndb.IntegerProperty(default=0)
num_completions = ndb.IntegerProperty(default=0)
class StatisticsAggregator(jobs.BaseContinuousComputationManager):
"""A continuous-computation job that counts 'start exploration' and
'complete exploration' events.
"""
@classmethod
def get_event_types_listened_to(cls):
return [
feconf.EVENT_TYPE_START_EXPLORATION,
feconf.EVENT_TYPE_COMPLETE_EXPLORATION]
@classmethod
def _get_realtime_datastore_class(cls):
return StatisticsRealtimeModel
@classmethod
def _get_batch_job_manager_class(cls):
return StatisticsMRJobManager
@classmethod
def _handle_incoming_event(cls, active_realtime_layer, event_type, *args):
exp_id = args[0]
def _increment_visit_counter():
realtime_class = cls._get_realtime_datastore_class()
realtime_model_id = realtime_class.get_realtime_id(
active_realtime_layer, exp_id)
model = realtime_class.get(realtime_model_id, strict=False)
if model is None:
realtime_class(
id=realtime_model_id, num_starts=1,
realtime_layer=active_realtime_layer).put()
else:
model.num_starts += 1
model.put()
def _increment_completion_counter():
realtime_class = cls._get_realtime_datastore_class()
realtime_model_id = realtime_class.get_realtime_id(
active_realtime_layer, exp_id)
model = realtime_class.get(realtime_model_id, strict=False)
if model is None:
realtime_class(
id=realtime_model_id, num_completions=1,
realtime_layer=active_realtime_layer).put()
else:
model.num_completions += 1
model.put()
if event_type == feconf.EVENT_TYPE_START_EXPLORATION:
transaction_services.run_in_transaction(
_increment_visit_counter)
else:
transaction_services.run_in_transaction(
_increment_completion_counter)
# Public query method.
@classmethod
def get_statistics(cls, exploration_id, exploration_version):
"""
Args:
- exploration_id: id of the exploration to get statistics for
- exploration_version: str. Which version of the exploration to get
statistics for; this can be a version number, the string 'all',
or the string 'none'.
Returns a dict with the following keys:
- 'start_exploration_count': # of times exploration was started
- 'complete_exploration_count': # of times exploration was completed
- 'state_hit_counts': a dict containing the hit counts for the states
in the exploration. It is formatted as follows:
{
state_name: {
'first_entry_count': # of sessions which hit this state
'total_entry_count': # of total hits for this state
'no_answer_count': # of hits with no answer for this state
}
}
"""
num_starts = 0
num_completions = 0
state_hit_counts = {}
last_updated = None
entity_id = stats_models.ExplorationAnnotationsModel.get_entity_id(
exploration_id, exploration_version)
mr_model = stats_models.ExplorationAnnotationsModel.get(
entity_id, strict=False)
if mr_model is not None:
num_starts += mr_model.num_starts
num_completions += mr_model.num_completions
state_hit_counts = mr_model.state_hit_counts
last_updated = utils.get_time_in_millisecs(mr_model.last_updated)
realtime_model = cls._get_realtime_datastore_class().get(
cls.get_active_realtime_layer_id(exploration_id), strict=False)
if realtime_model is not None:
num_starts += realtime_model.num_starts
num_completions += realtime_model.num_completions
return {
'start_exploration_count': num_starts,
'complete_exploration_count': num_completions,
'state_hit_counts': state_hit_counts,
'last_updated': last_updated,
}
class StatisticsMRJobManager(
jobs.BaseMapReduceJobManagerForContinuousComputations):
"""Job that calculates and creates stats models for exploration view.
Includes: * number of visits to the exploration
* number of completions of the exploration
"""
_TYPE_STATE_COUNTER_STRING = 'counter'
_TYPE_EVENT_STRING = 'event'
@classmethod
def _get_continuous_computation_class(cls):
return StatisticsAggregator
@classmethod
def entity_classes_to_map_over(cls):
return [stats_models.StartExplorationEventLogEntryModel,
stats_models.MaybeLeaveExplorationEventLogEntryModel,
stats_models.CompleteExplorationEventLogEntryModel,
stats_models.StateHitEventLogEntryModel,
stats_models.StateCounterModel]
@staticmethod
def map(item):
if StatisticsMRJobManager._entity_created_before_job_queued(item):
if isinstance(item, stats_models.StateCounterModel):
first_dot_index = item.id.find('.')
exploration_id = item.id[:first_dot_index]
state_name = item.id[first_dot_index + 1:]
value = {
'type': StatisticsMRJobManager._TYPE_STATE_COUNTER_STRING,
'exploration_id': exploration_id,
'version': _VERSION_NONE,
'state_name': state_name,
'first_entry_count': item.first_entry_count,
'subsequent_entries_count': item.subsequent_entries_count,
'resolved_answer_count': item.resolved_answer_count,
'active_answer_count': item.active_answer_count}
yield (
'%s:%s' % (exploration_id, _VERSION_NONE),
value)
yield ('%s:%s' % (exploration_id, _VERSION_ALL), value)
else:
version = _VERSION_NONE
if item.exploration_version is not None:
version = str(item.exploration_version)
value = {
'type': StatisticsMRJobManager._TYPE_EVENT_STRING,
'event_type': item.event_type,
'session_id': item.session_id,
'state_name': item.state_name,
'created_on': utils.get_time_in_millisecs(item.created_on),
'exploration_id': item.exploration_id,
'version': version}
yield ('%s:%s' % (item.exploration_id, version), value)
yield ('%s:%s' % (item.exploration_id, _VERSION_ALL), value)
@staticmethod
def reduce(key, stringified_values):
exploration = None
exp_id, version = key.split(':')
try:
if version == _VERSION_NONE:
exploration = exp_services.get_exploration_by_id(exp_id)
# Rewind to the last commit before the transition from
# StateCounterModel.
current_version = exploration.version
while (exploration.last_updated > _STATE_COUNTER_CUTOFF_DATE
and current_version > 1):
current_version -= 1
exploration = exp_models.ExplorationModel.get_version(
exp_id, current_version)
elif version == _VERSION_ALL:
exploration = exp_services.get_exploration_by_id(exp_id)
else:
exploration = exp_services.get_exploration_by_id(
exp_id, version=version)
except base_models.BaseModel.EntityNotFoundError:
return
# Number of times exploration was started
new_models_start_count = 0
# Number of times exploration was completed
new_models_complete_count = 0
# Session ids that have completed this state
new_models_end_sessions = set()
# {session_id: (created-on timestamp of last known maybe leave event,
# state_name)}
session_id_to_latest_leave_event = collections.defaultdict(
lambda: (0, ''))
old_models_start_count = 0
old_models_complete_count = 0
# {state_name: {'total_entry_count': ...,
# 'first_entry_count': ...,
# 'no_answer_count': ...}}
state_hit_counts = collections.defaultdict(
lambda: collections.defaultdict(int))
for state_name in exploration.states:
state_hit_counts[state_name] = {
'total_entry_count': 0,
'first_entry_count': 0,
'no_answer_count': 0,
}
# {state_name: set(session ids that have reached this state)}
state_session_ids = collections.defaultdict(set)
for state_name in exploration.states:
state_session_ids[state_name] = set([])
# Iterate over and process each event for this exploration.
for value_str in stringified_values:
value = ast.literal_eval(value_str)
state_name = value['state_name']
# Convert the state name to unicode, if necessary.
# Note: sometimes, item.state_name is None for
# StateHitEventLogEntryModel.
# TODO(sll): Track down the reason for this, and fix it.
if (state_name is not None and
not isinstance(state_name, unicode)):
state_name = state_name.decode('utf-8')
if (value['type'] ==
StatisticsMRJobManager._TYPE_STATE_COUNTER_STRING):
if state_name == exploration.init_state_name:
old_models_start_count = value['first_entry_count']
if state_name == OLD_END_DEST:
old_models_complete_count = value['first_entry_count']
else:
state_hit_counts[state_name]['no_answer_count'] += (
value['first_entry_count']
+ value['subsequent_entries_count']
- value['resolved_answer_count']
- value['active_answer_count'])
state_hit_counts[state_name]['first_entry_count'] += (
value['first_entry_count'])
state_hit_counts[state_name]['total_entry_count'] += (
value['first_entry_count']
+ value['subsequent_entries_count'])
continue
event_type = value['event_type']
created_on = value['created_on']
session_id = value['session_id']
# If this is a start event, increment start count.
if event_type == feconf.EVENT_TYPE_START_EXPLORATION:
new_models_start_count += 1
elif event_type == feconf.EVENT_TYPE_COMPLETE_EXPLORATION:
new_models_complete_count += 1
# Track that we have seen a 'real' end for this session id
new_models_end_sessions.add(session_id)
elif event_type == feconf.EVENT_TYPE_MAYBE_LEAVE_EXPLORATION:
# Identify the last learner event for this session.
latest_timestamp_so_far, _ = (
session_id_to_latest_leave_event[session_id])
if latest_timestamp_so_far < created_on:
latest_timestamp_so_far = created_on
session_id_to_latest_leave_event[session_id] = (
created_on, state_name)
# If this is a state hit, increment the total count and record that
# we have seen this session id.
elif event_type == feconf.EVENT_TYPE_STATE_HIT:
state_hit_counts[state_name]['total_entry_count'] += 1
state_session_ids[state_name].add(session_id)
# After iterating through all events, take the size of the set of
# session ids as the first entry count.
for state_name in state_session_ids:
state_hit_counts[state_name]['first_entry_count'] += len(
state_session_ids[state_name])
# Get the set of session ids that left without completing. This is
# determined as the set of session ids with maybe-leave events at
# intermediate states, minus the ones that have a maybe-leave event
# at the END state.
leave_states = set(session_id_to_latest_leave_event.keys()).difference(
new_models_end_sessions)
for session_id in leave_states:
# Grab the state name of the state they left on and count that as a
# 'no answer' for that state.
(_, state_name) = session_id_to_latest_leave_event[session_id]
state_hit_counts[state_name]['no_answer_count'] += 1
num_starts = (
old_models_start_count + new_models_start_count)
num_completions = (
old_models_complete_count + new_models_complete_count)
stats_models.ExplorationAnnotationsModel.create(
exp_id, str(version), num_starts, num_completions,
state_hit_counts)
|
|
import errno
import gym
import logging
import numpy as np
import os
from typing import Dict, List, Optional, Tuple, Union
import ray
import ray.experimental.tf_utils
from ray.util.debug import log_once
from ray.rllib.policy.policy import Policy, LEARNER_STATS_KEY
from ray.rllib.policy.rnn_sequencing import pad_batch_to_sequences_of_same_size
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.utils.annotations import override, DeveloperAPI
from ray.rllib.utils.debug import summarize
from ray.rllib.utils.framework import try_import_tf, get_variable
from ray.rllib.utils.schedules import ConstantSchedule, PiecewiseSchedule
from ray.rllib.utils.tf_run_builder import TFRunBuilder
from ray.rllib.utils.typing import ModelGradients, TensorType, \
TrainerConfigDict
tf1, tf, tfv = try_import_tf()
logger = logging.getLogger(__name__)
@DeveloperAPI
class TFPolicy(Policy):
"""An agent policy and loss implemented in TensorFlow.
Do not sub-class this class directly (neither should you sub-class
DynamicTFPolicy), but rather use
rllib.policy.tf_policy_template.build_tf_policy
to generate your custom tf (graph-mode or eager) Policy classes.
Extending this class enables RLlib to perform TensorFlow specific
optimizations on the policy, e.g., parallelization across gpus or
fusing multiple graphs together in the multi-agent setting.
Input tensors are typically shaped like [BATCH_SIZE, ...].
Attributes:
observation_space (gym.Space): observation space of the policy.
action_space (gym.Space): action space of the policy.
model (rllib.models.Model): RLlib model used for the policy.
Examples:
>>> policy = TFPolicySubclass(
sess, obs_input, sampled_action, loss, loss_inputs)
>>> print(policy.compute_actions([1, 0, 2]))
(array([0, 1, 1]), [], {})
>>> print(policy.postprocess_trajectory(SampleBatch({...})))
SampleBatch({"action": ..., "advantages": ..., ...})
"""
@DeveloperAPI
def __init__(self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: TrainerConfigDict,
sess: "tf1.Session",
obs_input: TensorType,
sampled_action: TensorType,
loss: TensorType,
loss_inputs: List[Tuple[str, TensorType]],
model: ModelV2 = None,
sampled_action_logp: Optional[TensorType] = None,
action_input: Optional[TensorType] = None,
log_likelihood: Optional[TensorType] = None,
dist_inputs: Optional[TensorType] = None,
dist_class: Optional[type] = None,
state_inputs: Optional[List[TensorType]] = None,
state_outputs: Optional[List[TensorType]] = None,
prev_action_input: Optional[TensorType] = None,
prev_reward_input: Optional[TensorType] = None,
seq_lens: Optional[TensorType] = None,
max_seq_len: int = 20,
batch_divisibility_req: int = 1,
update_ops: List[TensorType] = None,
explore: Optional[TensorType] = None,
timestep: Optional[TensorType] = None):
"""Initializes a Policy object.
Args:
observation_space (gym.spaces.Space): Observation space of the env.
action_space (gym.spaces.Space): Action space of the env.
config (TrainerConfigDict): The Policy config dict.
sess (tf1.Session): The TensorFlow session to use.
obs_input (TensorType): Input placeholder for observations, of
shape [BATCH_SIZE, obs...].
sampled_action (TensorType): Tensor for sampling an action, of
shape [BATCH_SIZE, action...]
loss (TensorType): Scalar policy loss output tensor.
loss_inputs (List[Tuple[str, TensorType]]): A (name, placeholder)
tuple for each loss input argument. Each placeholder name must
correspond to a SampleBatch column key returned by
postprocess_trajectory(), and has shape [BATCH_SIZE, data...].
These keys will be read from postprocessed sample batches and
fed into the specified placeholders during loss computation.
model (ModelV2): used to integrate custom losses and
stats from user-defined RLlib models.
sampled_action_logp (Optional[TensorType]): log probability of the
sampled action.
action_input (Optional[TensorType]): Input placeholder for actions
for logp/log-likelihood calculations.
log_likelihood (Optional[TensorType]): Tensor to calculate the
log_likelihood (given action_input and obs_input).
dist_class (Optional[type]): An optional ActionDistribution class
to use for generating a dist object from distribution inputs.
dist_inputs (Optional[TensorType]): Tensor to calculate the
distribution inputs/parameters.
state_inputs (Optional[List[TensorType]]): List of RNN state input
Tensors.
state_outputs (Optional[List[TensorType]]): List of RNN state
output Tensors.
prev_action_input (Optional[TensorType]): placeholder for previous
actions.
prev_reward_input (Optional[TensorType]): placeholder for previous
rewards.
seq_lens (Optional[TensorType]): Placeholder for RNN sequence
lengths, of shape [NUM_SEQUENCES].
Note that NUM_SEQUENCES << BATCH_SIZE. See
policy/rnn_sequencing.py for more information.
max_seq_len (int): Max sequence length for LSTM training.
batch_divisibility_req (int): pad all agent experiences batches to
multiples of this value. This only has an effect if not using
a LSTM model.
update_ops (List[TensorType]): override the batchnorm update ops to
run when applying gradients. Otherwise we run all update ops
found in the current variable scope.
explore (Optional[TensorType]): Placeholder for `explore` parameter
into call to Exploration.get_exploration_action.
timestep (Optional[TensorType]): Placeholder for the global
sampling timestep.
"""
self.framework = "tf"
super().__init__(observation_space, action_space, config)
# Disable env-info placeholder.
if SampleBatch.INFOS in self.view_requirements:
self.view_requirements[SampleBatch.INFOS].used_for_training = False
assert model is None or isinstance(model, ModelV2), \
"Model classes for TFPolicy other than `ModelV2` not allowed! " \
"You passed in {}.".format(model)
self.model = model
# Auto-update model's inference view requirements, if recurrent.
if self.model is not None:
self._update_model_inference_view_requirements_from_init_state()
self.exploration = self._create_exploration()
self._sess = sess
self._obs_input = obs_input
self._prev_action_input = prev_action_input
self._prev_reward_input = prev_reward_input
self._sampled_action = sampled_action
self._is_training = self._get_is_training_placeholder()
self._is_exploring = explore if explore is not None else \
tf1.placeholder_with_default(True, (), name="is_exploring")
self._sampled_action_logp = sampled_action_logp
self._sampled_action_prob = (tf.math.exp(self._sampled_action_logp)
if self._sampled_action_logp is not None
else None)
self._action_input = action_input # For logp calculations.
self._dist_inputs = dist_inputs
self.dist_class = dist_class
self._state_inputs = state_inputs or []
self._state_outputs = state_outputs or []
self._seq_lens = seq_lens
self._max_seq_len = max_seq_len
if len(self._state_inputs) != len(self._state_outputs):
raise ValueError(
"Number of state input and output tensors must match, got: "
"{} vs {}".format(self._state_inputs, self._state_outputs))
if len(self.get_initial_state()) != len(self._state_inputs):
raise ValueError(
"Length of initial state must match number of state inputs, "
"got: {} vs {}".format(self.get_initial_state(),
self._state_inputs))
if self._state_inputs and self._seq_lens is None:
raise ValueError(
"seq_lens tensor must be given if state inputs are defined")
self._batch_divisibility_req = batch_divisibility_req
self._update_ops = update_ops
self._apply_op = None
self._stats_fetches = {}
self._timestep = timestep if timestep is not None else \
tf1.placeholder(tf.int64, (), name="timestep")
self._optimizer = None
self._grads_and_vars = None
self._grads = None
# Policy tf-variables (weights), whose values to get/set via
# get_weights/set_weights.
self._variables = None
# Local optimizer's tf-variables (e.g. state vars for Adam).
# Will be stored alongside `self._variables` when checkpointing.
self._optimizer_variables = None
# The loss tf-op.
self._loss = None
# A batch dict passed into loss function as input.
self._loss_input_dict = {}
if loss is not None:
self._initialize_loss(loss, loss_inputs)
# The log-likelihood calculator op.
self._log_likelihood = log_likelihood
if self._log_likelihood is None and self._dist_inputs is not None and \
self.dist_class is not None:
self._log_likelihood = self.dist_class(
self._dist_inputs, self.model).logp(self._action_input)
def variables(self):
"""Return the list of all savable variables for this policy."""
return self.model.variables()
def get_placeholder(self, name) -> "tf1.placeholder":
"""Returns the given action or loss input placeholder by name.
If the loss has not been initialized and a loss input placeholder is
requested, an error is raised.
Args:
name (str): The name of the placeholder to return. One of
SampleBatch.CUR_OBS|PREV_ACTION/REWARD or a valid key from
`self._loss_input_dict`.
Returns:
tf1.placeholder: The placeholder under the given str key.
"""
if name == SampleBatch.CUR_OBS:
return self._obs_input
elif name == SampleBatch.PREV_ACTIONS:
return self._prev_action_input
elif name == SampleBatch.PREV_REWARDS:
return self._prev_reward_input
assert self._loss_input_dict, \
"You need to populate `self._loss_input_dict` before " \
"`get_placeholder()` can be called"
return self._loss_input_dict[name]
def get_session(self) -> "tf1.Session":
"""Returns a reference to the TF session for this policy."""
return self._sess
def loss_initialized(self) -> bool:
"""Returns whether the loss function has been initialized."""
return self._loss is not None
def _initialize_loss(self, loss: TensorType,
loss_inputs: List[Tuple[str, TensorType]]) -> None:
"""Initializes the loss op from given loss tensor and placeholders.
Args:
loss (TensorType): The loss op generated by some loss function.
loss_inputs (List[Tuple[str, TensorType]]): The list of Tuples:
(name, tf1.placeholders) needed for calculating the loss.
"""
self._loss_input_dict = dict(loss_inputs)
for i, ph in enumerate(self._state_inputs):
self._loss_input_dict["state_in_{}".format(i)] = ph
if self.model:
self._loss = self.model.custom_loss(loss, self._loss_input_dict)
self._stats_fetches.update({
"model": self.model.metrics() if isinstance(
self.model, ModelV2) else self.model.custom_stats()
})
else:
self._loss = loss
self._optimizer = self.optimizer()
self._grads_and_vars = [
(g, v) for (g, v) in self.gradients(self._optimizer, self._loss)
if g is not None
]
self._grads = [g for (g, v) in self._grads_and_vars]
if self.model:
self._variables = ray.experimental.tf_utils.TensorFlowVariables(
[], self._sess, self.variables())
# gather update ops for any batch norm layers
if not self._update_ops:
self._update_ops = tf1.get_collection(
tf1.GraphKeys.UPDATE_OPS, scope=tf1.get_variable_scope().name)
if self._update_ops:
logger.info("Update ops to run on apply gradient: {}".format(
self._update_ops))
with tf1.control_dependencies(self._update_ops):
self._apply_op = self.build_apply_op(self._optimizer,
self._grads_and_vars)
if log_once("loss_used"):
logger.debug(
"These tensors were used in the loss_fn:\n\n{}\n".format(
summarize(self._loss_input_dict)))
self._sess.run(tf1.global_variables_initializer())
self._optimizer_variables = None
if self._optimizer:
self._optimizer_variables = \
ray.experimental.tf_utils.TensorFlowVariables(
self._optimizer.variables(), self._sess)
@override(Policy)
def compute_actions(
self,
obs_batch: Union[List[TensorType], TensorType],
state_batches: Optional[List[TensorType]] = None,
prev_action_batch: Union[List[TensorType], TensorType] = None,
prev_reward_batch: Union[List[TensorType], TensorType] = None,
info_batch: Optional[Dict[str, list]] = None,
episodes: Optional[List["MultiAgentEpisode"]] = None,
explore: Optional[bool] = None,
timestep: Optional[int] = None,
**kwargs):
explore = explore if explore is not None else self.config["explore"]
timestep = timestep if timestep is not None else self.global_timestep
builder = TFRunBuilder(self._sess, "compute_actions")
to_fetch = self._build_compute_actions(
builder,
obs_batch,
state_batches=state_batches,
prev_action_batch=prev_action_batch,
prev_reward_batch=prev_reward_batch,
explore=explore,
timestep=timestep)
# Execute session run to get action (and other fetches).
fetched = builder.get(to_fetch)
# Update our global timestep by the batch size.
self.global_timestep += len(obs_batch) if isinstance(obs_batch, list) \
else obs_batch.shape[0]
return fetched
@override(Policy)
def compute_log_likelihoods(
self,
actions: Union[List[TensorType], TensorType],
obs_batch: Union[List[TensorType], TensorType],
state_batches: Optional[List[TensorType]] = None,
prev_action_batch: Optional[Union[List[TensorType],
TensorType]] = None,
prev_reward_batch: Optional[Union[List[
TensorType], TensorType]] = None) -> TensorType:
if self._log_likelihood is None:
raise ValueError("Cannot compute log-prob/likelihood w/o a "
"self._log_likelihood op!")
# Exploration hook before each forward pass.
self.exploration.before_compute_actions(
explore=False, tf_sess=self.get_session())
builder = TFRunBuilder(self._sess, "compute_log_likelihoods")
# Feed actions (for which we want logp values) into graph.
builder.add_feed_dict({self._action_input: actions})
# Feed observations.
builder.add_feed_dict({self._obs_input: obs_batch})
# Internal states.
state_batches = state_batches or []
if len(self._state_inputs) != len(state_batches):
raise ValueError(
"Must pass in RNN state batches for placeholders {}, got {}".
format(self._state_inputs, state_batches))
builder.add_feed_dict(
{k: v
for k, v in zip(self._state_inputs, state_batches)})
if state_batches:
builder.add_feed_dict({self._seq_lens: np.ones(len(obs_batch))})
# Prev-a and r.
if self._prev_action_input is not None and \
prev_action_batch is not None:
builder.add_feed_dict({self._prev_action_input: prev_action_batch})
if self._prev_reward_input is not None and \
prev_reward_batch is not None:
builder.add_feed_dict({self._prev_reward_input: prev_reward_batch})
# Fetch the log_likelihoods output and return.
fetches = builder.add_fetches([self._log_likelihood])
return builder.get(fetches)[0]
@override(Policy)
@DeveloperAPI
def learn_on_batch(
self, postprocessed_batch: SampleBatch) -> Dict[str, TensorType]:
assert self.loss_initialized()
builder = TFRunBuilder(self._sess, "learn_on_batch")
fetches = self._build_learn_on_batch(builder, postprocessed_batch)
return builder.get(fetches)
@override(Policy)
@DeveloperAPI
def compute_gradients(
self,
postprocessed_batch: SampleBatch) -> \
Tuple[ModelGradients, Dict[str, TensorType]]:
assert self.loss_initialized()
builder = TFRunBuilder(self._sess, "compute_gradients")
fetches = self._build_compute_gradients(builder, postprocessed_batch)
return builder.get(fetches)
@override(Policy)
@DeveloperAPI
def apply_gradients(self, gradients: ModelGradients) -> None:
assert self.loss_initialized()
builder = TFRunBuilder(self._sess, "apply_gradients")
fetches = self._build_apply_gradients(builder, gradients)
builder.get(fetches)
@override(Policy)
@DeveloperAPI
def get_exploration_info(self) -> Dict[str, TensorType]:
return self.exploration.get_info(sess=self.get_session())
@override(Policy)
@DeveloperAPI
def get_weights(self) -> Union[Dict[str, TensorType], List[TensorType]]:
return self._variables.get_weights()
@override(Policy)
@DeveloperAPI
def set_weights(self, weights) -> None:
return self._variables.set_weights(weights)
@override(Policy)
@DeveloperAPI
def get_state(self) -> Union[Dict[str, TensorType], List[TensorType]]:
# For tf Policies, return Policy weights and optimizer var values.
state = super().get_state()
if self._optimizer_variables and \
len(self._optimizer_variables.variables) > 0:
state["_optimizer_variables"] = \
self._sess.run(self._optimizer_variables.variables)
return state
@override(Policy)
@DeveloperAPI
def set_state(self, state) -> None:
state = state.copy() # shallow copy
# Set optimizer vars first.
optimizer_vars = state.pop("_optimizer_variables", None)
if optimizer_vars:
self._optimizer_variables.set_weights(optimizer_vars)
# Then the Policy's (NN) weights.
super().set_state(state)
@override(Policy)
@DeveloperAPI
def export_model(self, export_dir: str) -> None:
"""Export tensorflow graph to export_dir for serving."""
with self._sess.graph.as_default():
builder = tf1.saved_model.builder.SavedModelBuilder(export_dir)
signature_def_map = self._build_signature_def()
builder.add_meta_graph_and_variables(
self._sess, [tf1.saved_model.tag_constants.SERVING],
signature_def_map=signature_def_map,
saver=tf1.summary.FileWriter(export_dir).add_graph(
graph=self._sess.graph))
builder.save()
@override(Policy)
@DeveloperAPI
def export_checkpoint(self,
export_dir: str,
filename_prefix: str = "model") -> None:
"""Export tensorflow checkpoint to export_dir."""
try:
os.makedirs(export_dir)
except OSError as e:
# ignore error if export dir already exists
if e.errno != errno.EEXIST:
raise
save_path = os.path.join(export_dir, filename_prefix)
with self._sess.graph.as_default():
saver = tf1.train.Saver()
saver.save(self._sess, save_path)
@override(Policy)
@DeveloperAPI
def import_model_from_h5(self, import_file: str) -> None:
"""Imports weights into tf model."""
# Make sure the session is the right one (see issue #7046).
with self._sess.graph.as_default():
with self._sess.as_default():
return self.model.import_from_h5(import_file)
@DeveloperAPI
def copy(self,
existing_inputs: List[Tuple[str, "tf1.placeholder"]]) -> \
"TFPolicy":
"""Creates a copy of self using existing input placeholders.
Optional: Only required to work with the multi-GPU optimizer.
Args:
existing_inputs (List[Tuple[str, tf1.placeholder]]): Dict mapping
names (str) to tf1.placeholders to re-use (share) with the
returned copy of self.
Returns:
TFPolicy: A copy of self.
"""
raise NotImplementedError
@override(Policy)
@DeveloperAPI
def is_recurrent(self) -> bool:
return len(self._state_inputs) > 0
@override(Policy)
@DeveloperAPI
def num_state_tensors(self) -> int:
return len(self._state_inputs)
@DeveloperAPI
def extra_compute_action_feed_dict(self) -> Dict[TensorType, TensorType]:
"""Extra dict to pass to the compute actions session run.
Returns:
Dict[TensorType, TensorType]: A feed dict to be added to the
feed_dict passed to the compute_actions session.run() call.
"""
return {}
@DeveloperAPI
def extra_compute_action_fetches(self) -> Dict[str, TensorType]:
"""Extra values to fetch and return from compute_actions().
By default we return action probability/log-likelihood info
and action distribution inputs (if present).
Returns:
Dict[str, TensorType]: An extra fetch-dict to be passed to and
returned from the compute_actions() call.
"""
extra_fetches = {}
# Action-logp and action-prob.
if self._sampled_action_logp is not None:
extra_fetches[SampleBatch.ACTION_PROB] = self._sampled_action_prob
extra_fetches[SampleBatch.ACTION_LOGP] = self._sampled_action_logp
# Action-dist inputs.
if self._dist_inputs is not None:
extra_fetches[SampleBatch.ACTION_DIST_INPUTS] = self._dist_inputs
return extra_fetches
@DeveloperAPI
def extra_compute_grad_feed_dict(self) -> Dict[TensorType, TensorType]:
"""Extra dict to pass to the compute gradients session run.
Returns:
Dict[TensorType, TensorType]: Extra feed_dict to be passed to the
compute_gradients Session.run() call.
"""
return {} # e.g, kl_coeff
@DeveloperAPI
def extra_compute_grad_fetches(self) -> Dict[str, any]:
"""Extra values to fetch and return from compute_gradients().
Returns:
Dict[str, any]: Extra fetch dict to be added to the fetch dict
of the compute_gradients Session.run() call.
"""
return {LEARNER_STATS_KEY: {}} # e.g, stats, td error, etc.
@DeveloperAPI
def optimizer(self) -> "tf.keras.optimizers.Optimizer":
"""TF optimizer to use for policy optimization.
Returns:
tf.keras.optimizers.Optimizer: The local optimizer to use for this
Policy's Model.
"""
if hasattr(self, "config"):
return tf1.train.AdamOptimizer(learning_rate=self.config["lr"])
else:
return tf1.train.AdamOptimizer()
@DeveloperAPI
def gradients(self, optimizer: "tf.keras.optimizers.Optimizer",
loss: TensorType) -> List[Tuple[TensorType, TensorType]]:
"""Override this for a custom gradient computation behavior.
Returns:
List[Tuple[TensorType, TensorType]]: List of tuples with grad
values and the grad-value's corresponding tf.variable in it.
"""
return optimizer.compute_gradients(loss)
@DeveloperAPI
def build_apply_op(
self,
optimizer: "tf.keras.optimizers.Optimizer",
grads_and_vars: List[Tuple[TensorType, TensorType]]) -> \
"tf.Operation":
"""Override this for a custom gradient apply computation behavior.
Args:
optimizer (tf.keras.optimizers.Optimizer): The local tf optimizer
to use for applying the grads and vars.
grads_and_vars (List[Tuple[TensorType, TensorType]]): List of
tuples with grad values and the grad-value's corresponding
tf.variable in it.
"""
# Specify global_step for TD3 which needs to count the num updates.
return optimizer.apply_gradients(
self._grads_and_vars,
global_step=tf1.train.get_or_create_global_step())
def _get_is_training_placeholder(self):
"""Get the placeholder for _is_training, i.e., for batch norm layers.
This can be called safely before __init__ has run.
"""
if not hasattr(self, "_is_training"):
self._is_training = tf1.placeholder_with_default(
False, (), name="is_training")
return self._is_training
def _debug_vars(self):
if log_once("grad_vars"):
for _, v in self._grads_and_vars:
logger.info("Optimizing variable {}".format(v))
def _extra_input_signature_def(self):
"""Extra input signatures to add when exporting tf model.
Inferred from extra_compute_action_feed_dict()
"""
feed_dict = self.extra_compute_action_feed_dict()
return {
k.name: tf1.saved_model.utils.build_tensor_info(k)
for k in feed_dict.keys()
}
def _extra_output_signature_def(self):
"""Extra output signatures to add when exporting tf model.
Inferred from extra_compute_action_fetches()
"""
fetches = self.extra_compute_action_fetches()
return {
k: tf1.saved_model.utils.build_tensor_info(fetches[k])
for k in fetches.keys()
}
def _build_signature_def(self):
"""Build signature def map for tensorflow SavedModelBuilder.
"""
# build input signatures
input_signature = self._extra_input_signature_def()
input_signature["observations"] = \
tf1.saved_model.utils.build_tensor_info(self._obs_input)
if self._seq_lens is not None:
input_signature["seq_lens"] = \
tf1.saved_model.utils.build_tensor_info(self._seq_lens)
if self._prev_action_input is not None:
input_signature["prev_action"] = \
tf1.saved_model.utils.build_tensor_info(
self._prev_action_input)
if self._prev_reward_input is not None:
input_signature["prev_reward"] = \
tf1.saved_model.utils.build_tensor_info(
self._prev_reward_input)
input_signature["is_training"] = \
tf1.saved_model.utils.build_tensor_info(self._is_training)
for state_input in self._state_inputs:
input_signature[state_input.name] = \
tf1.saved_model.utils.build_tensor_info(state_input)
# build output signatures
output_signature = self._extra_output_signature_def()
for i, a in enumerate(tf.nest.flatten(self._sampled_action)):
output_signature["actions_{}".format(i)] = \
tf1.saved_model.utils.build_tensor_info(a)
for state_output in self._state_outputs:
output_signature[state_output.name] = \
tf1.saved_model.utils.build_tensor_info(state_output)
signature_def = (
tf1.saved_model.signature_def_utils.build_signature_def(
input_signature, output_signature,
tf1.saved_model.signature_constants.PREDICT_METHOD_NAME))
signature_def_key = (tf1.saved_model.signature_constants.
DEFAULT_SERVING_SIGNATURE_DEF_KEY)
signature_def_map = {signature_def_key: signature_def}
return signature_def_map
def _build_compute_actions(self,
builder,
obs_batch,
*,
state_batches=None,
prev_action_batch=None,
prev_reward_batch=None,
episodes=None,
explore=None,
timestep=None):
explore = explore if explore is not None else self.config["explore"]
timestep = timestep if timestep is not None else self.global_timestep
# Call the exploration before_compute_actions hook.
self.exploration.before_compute_actions(
timestep=timestep, explore=explore, tf_sess=self.get_session())
state_batches = state_batches or []
if len(self._state_inputs) != len(state_batches):
raise ValueError(
"Must pass in RNN state batches for placeholders {}, got {}".
format(self._state_inputs, state_batches))
builder.add_feed_dict(self.extra_compute_action_feed_dict())
builder.add_feed_dict({self._obs_input: obs_batch})
if state_batches:
builder.add_feed_dict({self._seq_lens: np.ones(len(obs_batch))})
if self._prev_action_input is not None and \
prev_action_batch is not None:
builder.add_feed_dict({self._prev_action_input: prev_action_batch})
if self._prev_reward_input is not None and \
prev_reward_batch is not None:
builder.add_feed_dict({self._prev_reward_input: prev_reward_batch})
builder.add_feed_dict({self._is_training: False})
builder.add_feed_dict({self._is_exploring: explore})
if timestep is not None:
builder.add_feed_dict({self._timestep: timestep})
builder.add_feed_dict(dict(zip(self._state_inputs, state_batches)))
# Determine, what exactly to fetch from the graph.
to_fetch = [self._sampled_action] + self._state_outputs + \
[self.extra_compute_action_fetches()]
# Perform the session call.
fetches = builder.add_fetches(to_fetch)
return fetches[0], fetches[1:-1], fetches[-1]
def _build_compute_gradients(self, builder, postprocessed_batch):
self._debug_vars()
builder.add_feed_dict(self.extra_compute_grad_feed_dict())
builder.add_feed_dict(
self._get_loss_inputs_dict(postprocessed_batch, shuffle=False))
fetches = builder.add_fetches(
[self._grads, self._get_grad_and_stats_fetches()])
return fetches[0], fetches[1]
def _build_apply_gradients(self, builder, gradients):
if len(gradients) != len(self._grads):
raise ValueError(
"Unexpected number of gradients to apply, got {} for {}".
format(gradients, self._grads))
builder.add_feed_dict({self._is_training: True})
builder.add_feed_dict(dict(zip(self._grads, gradients)))
fetches = builder.add_fetches([self._apply_op])
return fetches[0]
def _build_learn_on_batch(self, builder, postprocessed_batch):
self._debug_vars()
# Callback handling.
self.callbacks.on_learn_on_batch(
policy=self, train_batch=postprocessed_batch)
builder.add_feed_dict(self.extra_compute_grad_feed_dict())
builder.add_feed_dict(
self._get_loss_inputs_dict(postprocessed_batch, shuffle=False))
fetches = builder.add_fetches([
self._apply_op,
self._get_grad_and_stats_fetches(),
])
return fetches[1]
def _get_grad_and_stats_fetches(self):
fetches = self.extra_compute_grad_fetches()
if LEARNER_STATS_KEY not in fetches:
raise ValueError(
"Grad fetches should contain 'stats': {...} entry")
if self._stats_fetches:
fetches[LEARNER_STATS_KEY] = dict(self._stats_fetches,
**fetches[LEARNER_STATS_KEY])
return fetches
def _get_loss_inputs_dict(self, batch, shuffle):
"""Return a feed dict from a batch.
Args:
batch (SampleBatch): batch of data to derive inputs from
shuffle (bool): whether to shuffle batch sequences. Shuffle may
be done in-place. This only makes sense if you're further
applying minibatch SGD after getting the outputs.
Returns:
feed dict of data
"""
# Get batch ready for RNNs, if applicable.
pad_batch_to_sequences_of_same_size(
batch,
shuffle=shuffle,
max_seq_len=self._max_seq_len,
batch_divisibility_req=self._batch_divisibility_req,
feature_keys=[
k for k in self._loss_input_dict.keys() if k != "seq_lens"
],
)
batch["is_training"] = True
# Build the feed dict from the batch.
feed_dict = {}
for key, placeholder in self._loss_input_dict.items():
feed_dict[placeholder] = batch[key]
state_keys = [
"state_in_{}".format(i) for i in range(len(self._state_inputs))
]
for key in state_keys:
feed_dict[self._loss_input_dict[key]] = batch[key]
if state_keys:
feed_dict[self._seq_lens] = batch["seq_lens"]
return feed_dict
@DeveloperAPI
class LearningRateSchedule:
"""Mixin for TFPolicy that adds a learning rate schedule."""
@DeveloperAPI
def __init__(self, lr, lr_schedule):
self.cur_lr = tf1.get_variable("lr", initializer=lr, trainable=False)
if lr_schedule is None:
self.lr_schedule = ConstantSchedule(lr, framework=None)
else:
self.lr_schedule = PiecewiseSchedule(
lr_schedule, outside_value=lr_schedule[-1][-1], framework=None)
@override(Policy)
def on_global_var_update(self, global_vars):
super(LearningRateSchedule, self).on_global_var_update(global_vars)
self.cur_lr.load(
self.lr_schedule.value(global_vars["timestep"]),
session=self._sess)
@override(TFPolicy)
def optimizer(self):
return tf1.train.AdamOptimizer(learning_rate=self.cur_lr)
@DeveloperAPI
class EntropyCoeffSchedule:
"""Mixin for TFPolicy that adds entropy coeff decay."""
@DeveloperAPI
def __init__(self, entropy_coeff, entropy_coeff_schedule):
self.entropy_coeff = get_variable(
entropy_coeff,
framework="tf",
tf_name="entropy_coeff",
trainable=False)
if entropy_coeff_schedule is None:
self.entropy_coeff_schedule = ConstantSchedule(
entropy_coeff, framework=None)
else:
# Allows for custom schedule similar to lr_schedule format
if isinstance(entropy_coeff_schedule, list):
self.entropy_coeff_schedule = PiecewiseSchedule(
entropy_coeff_schedule,
outside_value=entropy_coeff_schedule[-1][-1],
framework=None)
else:
# Implements previous version but enforces outside_value
self.entropy_coeff_schedule = PiecewiseSchedule(
[[0, entropy_coeff], [entropy_coeff_schedule, 0.0]],
outside_value=0.0,
framework=None)
@override(Policy)
def on_global_var_update(self, global_vars):
super(EntropyCoeffSchedule, self).on_global_var_update(global_vars)
op_or_none = self.entropy_coeff.assign(
self.entropy_coeff_schedule.value(global_vars["timestep"]),
read_value=False, # return tf op (None in eager mode).
)
if self._sess is not None:
self._sess.run(op_or_none)
|
|
from Crypto.PublicKey import RSA as _RSA
from Crypto.Cipher import AES as _AES
from Crypto.Util import Counter as _Counter
from six.moves import xrange as _xrange
from ._aesmix import lib as _lib
from .keyreg import KeyRegRSA as _KeyRegRSA
from .padder import Padder as _Padder
from .wrapper import mix_and_slice as _mix_and_slice
from .wrapper import unslice_and_unmix as _unslice_and_unmix
from base64 import b64encode as _b64encode, b64decode as _b64decode
from io import BytesIO as _BytesIO
import os as _os
import json as _json
import random as _random
import shutil as _shutil
import logging as _logging
# cryptographically secure PRNG
_random = _random.SystemRandom()
class _MixSliceMetadata(object):
def __init__(self, key, iv, rsakey=None, order=None, state=None):
"""Instantiates a MixSliceMetadata.
Args:
key (bytestr): The key used for AES encryption (16 bytes long).
iv (bytestr): initialization vector for the mixing phase.
rsakey (bytestr): The rsakey used for key derivation. If None,
a new rsa keypair is generated.
order (list[int]): The list of fragment ids to which
the layer of encryption was applied. The leftmost id is the
innermost layer of encryption. If None, an empty list.
state (bytestr): The last state for key derivation. If None,
a random state is generated.
Returns:
The MixSliceMetadata object.
"""
rsakey = rsakey or _RSA.generate(2048)
order = order or []
state = state or _random.randrange(3, rsakey.n)
assert 3 <= state <= rsakey.n
self._key = key
self._iv = iv
self._order = order
self._keyreg = _KeyRegRSA.load(rsakey, state)
@staticmethod
def load_from_file(metadatafile):
"""Loads the metadata from a file.
Args:
metadatafile (path): The path for the metadatafile.
Returns:
The MixSliceMetadata object created from reading the file.
"""
with open(metadatafile, "r") as fp:
metadata = _json.load(fp)
return _MixSliceMetadata(
key=_b64decode(metadata["key"].encode("ascii")),
iv=_b64decode(metadata["iv"].encode("ascii")),
rsakey=_RSA.importKey(metadata["rsakey"].encode("ascii")),
order=metadata["order"],
state=metadata["state"])
def save_to_file(self, metadatafile, private):
"""Saves the metadata to file.
Args:
metadatafile (path): The path for the metadatafile.
private (bool): Save also the private key.
"""
state = self._keyreg.get_state(private)
rsakey = self._keyreg.get_rsakey(private)
metadata = {
"key": _b64encode(self._key).decode("ascii"),
"iv": _b64encode(self._iv).decode("ascii"),
"rsakey": rsakey.exportKey().decode("ascii"),
"order": self._order,
"state": state,
}
with open(metadatafile, "w") as fp:
_json.dump(metadata, fp)
def is_private(self):
return self._rsakey.has_private()
def add_encryption_step(self, fragment_id):
self._order.append(fragment_id)
self._keyreg, stm = self._keyreg.wind()
return stm.keyder()
def decryption_steps(self):
keyreg = self._keyreg
stm = keyreg.unwind() if keyreg.is_publisher() else keyreg
for fragment_id in reversed(self._order):
yield fragment_id, stm.keyder()
stm = stm.unwind()
class MixSlice(object):
def __init__(self, fragments, metadata, changed=None):
self._fragments = fragments
self._metadata = metadata
self._changed = set(changed if changed is not None
else _xrange(len(self._fragments)))
@staticmethod
def encrypt(data, key, iv, threads=None, rsakey=None,
state=None, padder=None):
"""Creates a MixSlice from plaintext data.
Args:
data (bytestr): The data to encrypt (multiple of MACRO_SIZE).
key (bytestr): The key used for AES encryption (16 bytes long).
iv (bytestr): The iv used for AES encryption (16 bytes long).
threads (int): The number of threads used. (default: cpu count).
rsakey (bytestr): The rsakey used for key derivation. If None,
a new rsa keypair is generated.
state (bytestr): The last state for key derivation. If None,
a random state is generated.
Returns:
A new MixSlice that holds the encrypted fragments.
"""
padder = padder or _Padder(blocksize=_lib.MACRO_SIZE)
padded_data = padder.pad(data)
fragments = _mix_and_slice(data=padded_data, key=key,
iv=iv, threads=threads)
fragments = [_BytesIO(f) for f in fragments]
metadata = _MixSliceMetadata(key=key, iv=iv, order=None,
rsakey=rsakey, state=state)
return MixSlice(fragments, metadata)
@staticmethod
def load_from_file(fragsdir, metadatafile):
"""Load a MixSlice from data and metadata files.
Args:
fragsdir (path): The path to the encrypted fragments directory.
metadatafile (path): The path to the metadatafile.
Returns:
A new MixSlice that holds the encrypted fragments.
"""
fragfiles = sorted(_os.listdir(fragsdir))
assert len(fragfiles) == _lib.MINI_PER_MACRO, \
"exactly MINI_PER_MACRO files required in fragsdir."
fragments = [_os.path.join(fragsdir, f) for f in fragfiles]
metadata = _MixSliceMetadata.load_from_file(metadatafile)
return MixSlice(fragments, metadata, changed=[])
def save_to_files(self, fragsdir, public_metafile, private_metafile):
if not _os.path.exists(fragsdir):
_os.makedirs(fragsdir)
fragids = self._changed or _xrange(len(self._fragments))
name = "frag_%%0%dd.dat" % len(str(len(self._fragments)))
for fragid in fragids:
fragment = self._fragments[fragid]
assert isinstance(fragment, _BytesIO)
fragment.seek(0)
destination = _os.path.join(fragsdir, name % fragid)
with open(destination, "wb") as fp:
_shutil.copyfileobj(fragment, fp)
fragment.close()
self._fragments[fragid] = destination
self._metadata.save_to_file(public_metafile, private=False)
self._metadata.save_to_file(private_metafile, private=True)
@staticmethod
def _read_fragment(fragment):
if isinstance(fragment, _BytesIO):
fragment.seek(0)
data = fragment.read()
fragment.seek(0)
else:
with open(fragment, "rb") as fp:
data = fp.read()
return data
def step_encrypt(self, fragment_id=None):
fragment_id = (fragment_id if fragment_id is not None
else _random.randrange(len(self._fragments)))
key = self._metadata.add_encryption_step(fragment_id)
ctr = _Counter.new(128)
cipher = _AES.new(key[:16], mode=_AES.MODE_CTR, counter=ctr)
_logging.info("Encrypting fragment #%d" % fragment_id)
self._fragments[fragment_id] = _BytesIO(
cipher.encrypt(self._read_fragment(self._fragments[fragment_id])))
self._changed.add(fragment_id)
return fragment_id
def decrypt(self, threads=None, padder=None):
fragments = [self._read_fragment(f) for f in self._fragments]
for fragment_id, key in self._metadata.decryption_steps():
_logging.info("Decrypting fragment #%d" % fragment_id)
ctr = _Counter.new(128)
cipher = _AES.new(key[:16], mode=_AES.MODE_CTR, counter=ctr)
fragments[fragment_id] = cipher.decrypt(fragments[fragment_id])
padded_data = _unslice_and_unmix(
fragments=fragments,
key=self._metadata._key,
iv=self._metadata._iv,
threads=threads)
padder = padder or _Padder(blocksize=_lib.MACRO_SIZE)
return padder.unpad(padded_data)
|
|
"""
The Connection class negotiates and manages the connection state.
"""
import logging
# pylint: disable=import-error
try:
from urllib import parse as urlparse
except ImportError:
import urlparse
try:
import ssl
except ImportError:
ssl = None
import threading
import time
from pamqp import specification as spec
from rabbitpy import base
from rabbitpy import heartbeat
from rabbitpy import io
from rabbitpy import channel
from rabbitpy import channel0
from rabbitpy import events
from rabbitpy import exceptions
from rabbitpy import message
from rabbitpy.utils import queue
from rabbitpy import utils
LOGGER = logging.getLogger(__name__)
AMQP = 'amqp'
AMQPS = 'amqps'
if ssl:
SSL_CERT_MAP = {'ignore': ssl.CERT_NONE,
'optional': ssl.CERT_OPTIONAL,
'required': ssl.CERT_REQUIRED}
SSL_VERSION_MAP = dict()
if hasattr(ssl, 'PROTOCOL_SSLv2'):
SSL_VERSION_MAP['SSLv2'] = getattr(ssl, 'PROTOCOL_SSLv2')
if hasattr(ssl, 'PROTOCOL_SSLv3'):
SSL_VERSION_MAP['SSLv3'] = getattr(ssl, 'PROTOCOL_SSLv3')
if hasattr(ssl, 'PROTOCOL_SSLv23'):
SSL_VERSION_MAP['SSLv23'] = getattr(ssl, 'PROTOCOL_SSLv23')
if hasattr(ssl, 'PROTOCOL_TLSv1'):
SSL_VERSION_MAP['TLSv1'] = getattr(ssl, 'PROTOCOL_TLSv1')
else:
SSL_CERT_MAP, SSL_VERSION_MAP = dict(), dict()
# pylint: disable=too-many-instance-attributes
class Connection(base.StatefulObject):
"""The Connection object is responsible for negotiating a connection and
managing its state. When creating a new instance of the Connection object,
if no URL is passed in, it uses the default connection parameters of
localhost port 5672, virtual host / with the guest/guest username/password
combination. Represented as a AMQP URL the connection information is:
:code:`amqp://guest:guest@localhost:5672/%2F`
To use a different connection, pass in a AMQP URL that follows the standard
format:
:code:`[scheme]://[username]:[password]@[host]:[port]/[virtual_host]`
The following example connects to the test virtual host on a RabbitMQ
server running at 192.168.1.200 port 5672 as the user "www" and the
password rabbitmq:
:code:`amqp://admin192.168.1.200:5672/test`
.. note::
You should be aware that most connection exceptions may be raised
during the use of all functionality in the library.
:param str url: The AMQP connection URL
:raises: rabbitpy.exceptions.AMQPException
:raises: rabbitpy.exceptions.ConnectionException
:raises: rabbitpy.exceptions.ConnectionResetException
:raises: rabbitpy.exceptions.RemoteClosedException
"""
CANCEL_METHOD = ['Basic.Cancel']
DEFAULT_CHANNEL_MAX = 65535
DEFAULT_TIMEOUT = 3
DEFAULT_HEARTBEAT_INTERVAL = 60.0
DEFAULT_LOCALE = 'en_US'
DEFAULT_URL = 'amqp://guest:guest@localhost:5672/%2F'
DEFAULT_VHOST = '%2F'
GUEST = 'guest'
PORTS = {'amqp': 5672, 'amqps': 5671, 'api': 15672}
QUEUE_WAIT = 0.01
def __init__(self, url=None):
"""Create a new instance of the Connection object"""
super(Connection, self).__init__()
# Create a name for the connection
self._name = '0x%x' % id(self)
# Extract parts of connection URL for use later
self._args = self._process_url(url or self.DEFAULT_URL)
# General events and queues shared across threads
self._events = events.Events()
# A queue for the child threads to put exceptions in
self._exceptions = queue.Queue()
# One queue for writing frames, regardless of the channel sending them
self._write_queue = queue.Queue()
# Lock used when managing the channel stack
self._channel_lock = threading.Lock()
# Attributes for core object threads
self._channel0 = None
self._channels = dict()
self._heartbeat = None
self._io = None
# Used by Message for breaking up body frames
self._max_frame_size = None
# Connect to RabbitMQ
self._connect()
def __enter__(self):
"""For use as a context manager, return a handle to this object
instance.
:rtype: Connection
"""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""When leaving the context, examine why the context is leaving, if
it's an exception or what.
"""
if exc_type and exc_val:
self._set_state(self.CLOSED)
raise exc_val
self.close()
@property
def args(self):
"""Return the connection arguments.
:rtype: dict
"""
return dict(self._args)
@property
def blocked(self):
"""Indicates if the connection is blocked from publishing by RabbitMQ.
This flag indicates communication from RabbitMQ that the connection is
blocked using the Connection.Blocked RPC notification from RabbitMQ
that was added in RabbitMQ 3.2.
:rtype: bool
"""
return self._events.is_set(events.CONNECTION_BLOCKED)
def channel(self, blocking_read=False):
"""Create a new channel
If blocking_read is True, the cross-thread Queue.get use will use
blocking operations that lower resource utilization and increase
throughput. However, due to how Python's blocking Queue.get is
implemented, KeyboardInterrupt is not raised when CTRL-C is
pressed.
:param bool blocking_read: Enable for higher throughput
:raises: rabbitpy.exceptions.AMQPException
:raises: rabbitpy.exceptions.RemoteClosedChannelException
"""
with self._channel_lock:
channel_id = self._get_next_channel_id()
channel_frames = queue.Queue()
self._channels[channel_id] = \
channel.Channel(channel_id,
self.capabilities,
self._events,
self._exceptions,
channel_frames,
self._write_queue,
self._max_frame_size,
self._io.write_trigger,
self,
blocking_read)
self._add_channel_to_io(self._channels[channel_id], channel_frames)
self._channels[channel_id].open()
return self._channels[channel_id]
def close(self):
"""Close the connection, including all open channels.
:raises: rabbitpy.exceptions.ConnectionClosed
"""
if not self.open and not self.opening:
raise exceptions.ConnectionClosed()
if not self._events.is_set(events.SOCKET_CLOSED):
self._set_state(self.CLOSING)
self._shutdown_connection()
LOGGER.debug('Setting to closed')
self._set_state(self.CLOSED)
@property
def capabilities(self):
"""Return the RabbitMQ Server capabilities from the connection
negotiation process.
:rtype: dict
"""
return self._channel0.properties.get('capabilities', dict())
@property
def server_properties(self):
"""Return the RabbitMQ Server properties from the connection
negotiation process.
:rtype: dict
"""
return self._channel0.properties
def _add_channel_to_io(self, channel_id, channel_queue):
"""Add a channel and queue to the IO object.
:param Queue.Queue channel_queue: Channel inbound msg queue
:param rabbitpy.base.AMQPChannel: The channel to add
"""
LOGGER.debug('Adding channel %s to io', int(channel_id))
self._io.add_channel(channel_id, channel_queue)
@property
def _api_credentials(self):
"""Return the auth credentials as a tuple
@rtype: tuple
"""
return self._args['username'], self._args['password']
@property
def _channel0_closed(self):
"""Returns a boolean indicating if the base connection channel (0)
is closed.
:rtype: bool
"""
return self._channel0.open and not \
self._events.is_set(events.CHANNEL0_CLOSED)
def _close_all_channels(self):
"""Close all open channels"""
for chan_id in [chan_id for chan_id in self._channels
if not self._channels[chan_id].closed]:
self._channels[chan_id].close()
self._channel0.close()
def _close_channels(self):
"""Close all the channels that are currently open."""
for channel_id in self._channels:
if (self._channels[channel_id].open and
not self._channels[channel_id].closing):
self._channels[channel_id].close()
def _connect(self):
"""Connect to the RabbitMQ Server"""
self._set_state(self.OPENING)
# Create and start the IO object that reads, writes & dispatches frames
self._io = self._create_io_thread()
self._io.daemon = True
self._io.start()
# Wait for IO to connect to the socket or raise an exception
while self.opening and not self._events.is_set(events.SOCKET_OPENED):
if not self._exceptions.empty():
exception = self._exceptions.get()
raise exception
# :meth:`threading.html#threading.Event.wait` always returns None
# in 2.6, so it is impossible to simply check wait() result
self._events.wait(events.SOCKET_OPENED, self._args['timeout'])
if not self._events.is_set(events.SOCKET_OPENED):
raise RuntimeError("Timeout waiting for opening the socket")
# If the socket could not be opened, return instead of waiting
if self.closed:
return self.close()
# Create the Channel0 queue and add it to the IO thread
self._channel0 = self._create_channel0()
self._add_channel_to_io(self._channel0, None)
self._channel0.start()
# Wait for Channel0 to raise an exception or negotiate the connection
while not self._channel0.open:
if not self._exceptions.empty():
exception = self._exceptions.get()
self._io.stop()
raise exception
time.sleep(0.01)
# Set the maximum frame size for channel use
self._max_frame_size = self._channel0.maximum_frame_size
# Create the heartbeat checker
self._heartbeat = heartbeat.Heartbeat(self._io, self._channel0,
self._args['heartbeat'])
self._heartbeat.start()
self._set_state(self.OPEN)
def _create_channel0(self):
"""Each connection should have a distinct channel0
:rtype: rabbitpy.channel0.Channel0
"""
return channel0.Channel0(connection_args=self._args,
events_obj=self._events,
exception_queue=self._exceptions,
write_queue=self._write_queue,
write_trigger=self._io.write_trigger,
connection=self)
def _create_io_thread(self):
"""Create the IO thread and the objects it uses for communication.
:rtype: rabbitpy.io.IO
"""
return io.IO(name='%s-io' % self._name,
kwargs={'events': self._events,
'exceptions': self._exceptions,
'connection_args': self._args,
'write_queue': self._write_queue})
def _create_message(self, channel_id, method_frame, header_frame, body):
"""Create a message instance with the channel it was received on and
the dictionary of message parts.
:param int channel_id: The channel id the message was sent on
:param method_frame: The method frame value
:type method_frame: pamqp.specification.Frame
:param header_frame: The header frame value
:type header_frame: pamqp.header.ContentHeader
:param str body: The message body
:rtype: rabbitpy.message.Message
"""
msg = message.Message(self._channels[channel_id],
body,
header_frame.properties.to_dict())
msg.method = method_frame
msg.name = method_frame.name
return msg
def _get_next_channel_id(self):
"""Return the next channel id
:rtype: int
"""
if not self._channels:
return 1
if self._max_channel_id == self._channel0.maximum_channels:
raise exceptions.TooManyChannelsError
return self._max_channel_id + 1
@property
def _max_channel_id(self):
"""Return the maximum channel ID that is currently being used.
:rtype: int
"""
return max(list(self._channels.keys()))
@staticmethod
def _normalize_expectations(channel_id, expectations):
"""Turn a class or list of classes into a list of class names.
:param int channel_id: The channel to normalize for
:param expectations: List of classes or class name or class obj
:type expectations: list or str or pamqp.specification.Frame
:rtype: list
"""
if isinstance(expectations, list):
output = list()
for value in expectations:
if isinstance(value, str):
output.append('%i:%s' % (channel_id, value))
else:
output.append('%i:%s' % (channel_id, value.name))
return output
elif utils.is_string(expectations):
return ['%i:%s' % (channel_id, expectations)]
return ['%i:%s' % (channel_id, expectations.name)]
def _process_url(self, url):
"""Parse the AMQP URL passed in and return the configuration
information in a dictionary of values.
The URL format is as follows:
amqp[s]://username:password@host:port/virtual_host[?query string]
Values in the URL such as the virtual_host should be URL encoded or
quoted just as a URL would be in a web browser. The default virtual
host / in RabbitMQ should be passed as %2F.
Default values:
- If port is omitted, port 5762 is used for AMQP and port 5671 is
used for AMQPS
- If username or password is omitted, the default value is guest
- If the virtual host is omitted, the default value of %2F is used
Query string options:
- heartbeat
- channel_max
- frame_max
- locale
- cacertfile - Path to CA certificate file
- certfile - Path to client certificate file
- keyfile - Path to client certificate key
- verify - Server certificate validation requirements (1)
- ssl_version - SSL version to use (2)
(1) Should be one of three values:
- ignore - Ignore the cert if provided (default)
- optional - Cert is validated if provided
- required - Cert is required and validated
(2) Should be one of four values:
- SSLv2
- SSLv3
- SSLv23
- TLSv1
:param str url: The AMQP url passed in
:rtype: dict
:raises: ValueError
"""
parsed = utils.urlparse(url)
self._validate_uri_scheme(parsed.scheme)
# Toggle the SSL flag based upon the URL scheme and if SSL is enabled
use_ssl = True if parsed.scheme == 'amqps' and ssl else False
# Ensure that SSL is available if SSL is requested
if parsed.scheme == 'amqps' and not ssl:
LOGGER.warning('SSL requested but not available, disabling')
# Figure out the port as specified by the scheme
scheme_port = self.PORTS[AMQPS] if parsed.scheme == AMQPS \
else self.PORTS[AMQP]
# Set the vhost to be after the base slash if it was specified
vhost = self.DEFAULT_VHOST
if parsed.path:
vhost = parsed.path[1:] or self.DEFAULT_VHOST
# Parse the query string
qargs = utils.parse_qs(parsed.query)
# Return the configuration dictionary to use when connecting
return {
'host': parsed.hostname,
'port': parsed.port or scheme_port,
'virtual_host': utils.unquote(vhost),
'username': urlparse.unquote(parsed.username or self.GUEST),
'password': urlparse.unquote(parsed.password or self.GUEST),
'timeout': self._qargs_int('timeout', qargs, self.DEFAULT_TIMEOUT),
'heartbeat': self._qargs_int('heartbeat', qargs,
self.DEFAULT_HEARTBEAT_INTERVAL),
'frame_max': self._qargs_int('frame_max', qargs,
spec.FRAME_MAX_SIZE),
'channel_max': self._qargs_int('channel_max', qargs,
self.DEFAULT_CHANNEL_MAX),
'locale': self._qargs_value('locale', qargs),
'ssl': use_ssl,
'cacertfile': self._qargs_mk_value(['cacertfile', 'ssl_cacert'],
qargs),
'certfile': self._qargs_mk_value(['certfile', 'ssl_cert'], qargs),
'keyfile': self._qargs_mk_value(['keyfile', 'ssl_key'], qargs),
'verify': self._qargs_ssl_validation(qargs),
'ssl_version': self._qargs_ssl_version(qargs)}
@staticmethod
def _qargs_int(key, values, default):
"""Return the query arg value as an integer for the specified key or
return the specified default value.
:param str key: The key to return the value for
:param dict values: The query value dict returned by urlparse
:param int default: The default return value
:rtype: int
"""
return int(values.get(key, [default])[0])
@staticmethod
def _qargs_float(key, values, default):
"""Return the query arg value as a float for the specified key or
return the specified default value.
:param str key: The key to return the value for
:param dict values: The query value dict returned by urlparse
:param float default: The default return value
:rtype: float
"""
return float(values.get(key, [default])[0])
def _qargs_ssl_validation(self, values):
"""Return the value mapped from the string value in the query string
for the AMQP URL specifying which level of server certificate
validation is required, if any.
:param dict values: The dict of query values from the AMQP URI
:rtype: int
"""
validation = self._qargs_mk_value(['verify', 'ssl_validation'], values)
if not validation:
return
elif validation not in SSL_CERT_MAP:
raise ValueError(
'Unsupported server cert validation option: %s',
validation)
return SSL_CERT_MAP[validation]
def _qargs_ssl_version(self, values):
"""Return the value mapped from the string value in the query string
for the AMQP URL for SSL version.
:param dict values: The dict of query values from the AMQP URI
:rtype: int
"""
version = self._qargs_value('ssl_version', values)
if not version:
return
elif version not in SSL_VERSION_MAP:
raise ValueError('Unuspported SSL version: %s' % version)
return SSL_VERSION_MAP[version]
@staticmethod
def _qargs_value(key, values, default=None):
"""Return the value from the query arguments for the specified key
or the default value.
:param str key: The key to get the value for
:param dict values: The query value dict returned by urlparse
:return: mixed
"""
return values.get(key, [default])[0]
def _qargs_mk_value(self, keys, values):
"""Try and find the query string value where the value can be specified
with different keys.
:param lists keys: The keys to check
:param dict values: The query value dict returned by urlparse
:return: mixed
"""
for key in keys:
value = self._qargs_value(key, values)
if value is not None:
return value
return None
def _shutdown_connection(self):
"""Tell Channel0 and IO to stop if they are not stopped.
"""
# Make sure the heartbeat is not running
if self._heartbeat is not None:
self._heartbeat.stop()
if not self._events.is_set(events.SOCKET_CLOSED):
self._close_all_channels()
# Let the IOLoop know to close
self._events.set(events.SOCKET_CLOSE)
# Break out of select waiting
self._trigger_write()
if (self._events.is_set(events.SOCKET_OPENED) and
not self._events.is_set(events.SOCKET_CLOSED)):
LOGGER.debug('Waiting on socket to close')
self._events.wait(events.SOCKET_CLOSED, 0.1)
self._io.stop()
else:
return self._io.stop()
while self._io.is_alive():
time.sleep(0.1)
def _trigger_write(self):
"""Notifies the IO loop we need to write a frame by writing a byte
to a local socket.
"""
utils.trigger_write(self._io.write_trigger)
def _validate_uri_scheme(self, scheme):
"""Insure that the specified URI scheme is supported by rabbitpy
:param str scheme: The value to validate
:raises: ValueError
"""
if scheme not in list(self.PORTS.keys()):
raise ValueError('Unsupported URI scheme: %s' % scheme)
|
|
import copy
import json
import os
import shutil
import time
from django.conf import settings
try:
from django.urls import reverse
except ImportError: # Django < 1.10
from django.core.urlresolvers import reverse
from .base import TestCase
import whisper
from graphite.util import unpickle
class MetricsTester(TestCase):
db = os.path.join(settings.WHISPER_DIR, 'test.wsp')
hostcpu = os.path.join(settings.WHISPER_DIR, 'hosts/hostname/cpu.wsp')
settings.CLUSTER_SERVERS = ['127.1.1.1', '127.1.1.2']
def wipe_whisper(self):
try:
os.remove(self.db)
except OSError:
pass
def create_whisper_hosts(self, ts=None):
worker1 = self.hostcpu.replace('hostname', 'worker1')
worker2 = self.hostcpu.replace('hostname', 'worker2')
try:
os.makedirs(worker1.replace('cpu.wsp', ''))
os.makedirs(worker2.replace('cpu.wsp', ''))
except OSError:
pass
whisper.create(worker1, [(1, 60)])
whisper.create(worker2, [(1, 60)])
ts = ts or int(time.time())
whisper.update(worker1, 1, ts)
whisper.update(worker2, 2, ts)
def wipe_whisper_hosts(self):
try:
os.remove(self.hostcpu.replace('hostname', 'worker1'))
os.remove(self.hostcpu.replace('hostname', 'worker2'))
shutil.rmtree(self.hostcpu.replace('hostname/cpu.wsp', ''))
except OSError:
pass
def test_index_json(self):
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
url = reverse('metrics_index')
# default
request = {}
response = self.client.post(url, request)
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEqual(data[0], 'hosts.worker1.cpu')
self.assertEqual(data[1], 'hosts.worker2.cpu')
# XXX Disabling this test for now since a local running
# Graphite webapp will always return a 200, breaking our test
## cluster failure
#request = {'cluster': 1}
#response = self.client.post(url, request)
#self.assertEqual(response.status_code, 500)
#data = json.loads(response.content)
#self.assertEqual(data, [])
# jsonp
request = {'jsonp': 'callback'}
response = self.client.post(url, request)
self.assertEqual(response.status_code, 200)
data = json.loads(response.content.split("(")[1].strip(")"))
self.assertEqual(data[0], 'hosts.worker1.cpu')
self.assertEqual(data[1], 'hosts.worker2.cpu')
def test_find_view(self):
ts = int(time.time())
#create a minus 60 variable to test with, otherwise the build could fail the longer the test runs
ts_minus_sixty_seconds = ts - 60
self.create_whisper_hosts(ts)
self.addCleanup(self.wipe_whisper_hosts)
url = reverse('metrics_find')
#
# Missing query param
#
response = self.client.post(url, {})
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content, "Missing required parameter 'query'")
#
# format=invalid_format
#
response = self.client.post(url, {'format': 'invalid_format', 'query': '*'})
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content, "Invalid value for 'format' parameter")
def test_find_view_basics(data):
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200)
self.assertTrue(response.has_header('Pragma'))
self.assertTrue(response.has_header('Cache-Control'))
return response.content
#
# Default values
#
request_default = {'query': '',
'local': 0,
'wildcards': 0,
'from': -1,
'until': -1,
'jsonp': '',
'automatic_variants': 0}
#
# format=treejson
#
request=copy.deepcopy(request_default)
request['format']='treejson'
request['query']='*'
content = test_find_view_basics(request)
[data] = json.loads(content)
self.assertEqual(data['text'], 'hosts')
# No match
request=copy.deepcopy(request_default)
request['format']='treejson'
request['query']='other'
content = test_find_view_basics(request)
self.assertEqual(content, '[]')
request['query']='*'
request['wildcards']=1
content = test_find_view_basics(request)
[data] = json.loads(content)
self.assertEqual(data['text'], 'hosts')
# Other formats than treejson shouldn't require DB calls
with self.assertNumQueries(0):
#
# format=pickle
#
request=copy.deepcopy(request_default)
request['format']='pickle'
request['query']='*'
content = test_find_view_basics(request)
data = unpickle.loads(content)
self.assertEqual(len(data), 1)
self.assertEqual(data[0]['path'], 'hosts')
self.assertEqual(data[0]['is_leaf'], False)
request['query']='hosts.*.cpu'
content = test_find_view_basics(request)
data = unpickle.loads(content)
self.assertEqual(len(data), 2)
data = sorted(data, key=lambda item: item['path'])
self.assertEqual(data[0]['path'], 'hosts.worker1.cpu')
self.assertEqual(data[0]['is_leaf'], True)
self.assertEqual(len(data[0]['intervals']), 1)
#self.assertEqual(int(data[0]['intervals'][0].start), ts_minus_sixty_seconds)
self.assertEqual(int(data[0]['intervals'][0].end), ts)
self.assertEqual(data[1]['path'], 'hosts.worker2.cpu')
self.assertEqual(data[1]['is_leaf'], True)
self.assertEqual(len(data[1]['intervals']), 1)
#self.assertEqual(int(data[1]['intervals'][0].start), ts_minus_sixty_seconds)
self.assertEqual(int(data[1]['intervals'][0].end), ts)
#
# format=completer
#
request=copy.deepcopy(request_default)
request['format']='completer'
request['query']='*'
content = test_find_view_basics(request)
data = json.loads(content)
data['metrics'] = sorted(data['metrics'])
self.assertEqual(data, {u'metrics': [{u'name': u'hosts', u'is_leaf': u'0', u'path': u'hosts.'}]})
request['query']='hosts'
content = test_find_view_basics(request)
data = json.loads(content)
data['metrics'] = sorted(data['metrics'])
self.assertEqual(data, {u'metrics': [{u'name': u'hosts', u'is_leaf': u'0', u'path': u'hosts.'}]})
request['query']='hosts.*.*'
content = test_find_view_basics(request)
data = json.loads(content)
data['metrics'] = sorted(data['metrics'])
self.assertEqual(data, {u'metrics': [{u'path': u'hosts.worker1.cpu', u'is_leaf': u'1', u'name': u'cpu'}, {u'path': u'hosts.worker2.cpu', u'is_leaf': u'1', u'name': u'cpu'}]})
request['query']='hosts.'
content = test_find_view_basics(request)
data = json.loads(content)
data['metrics'] = sorted(data['metrics'])
self.assertEqual(data, {u'metrics': [{u'is_leaf': u'0', u'name': u'worker1', u'path': u'hosts.worker1.'}, {u'is_leaf': u'0', u'name': u'worker2', u'path': u'hosts.worker2.'}]})
# No match
request['query']='other'
content = test_find_view_basics(request)
data = json.loads(content)
self.assertEqual(data['metrics'], [])
# No match
request['query']='other'
content = test_find_view_basics(request)
data = json.loads(content)
self.assertEqual(data['metrics'], [])
# Test wildcards param
request['wildcards']=1
request['query']='hosts.*.'
content = test_find_view_basics(request)
data = json.loads(content)
data['metrics'] = sorted(data['metrics'])
self.assertEqual(data, {u'metrics': [{u'name': u'*'}, {u'is_leaf': u'1', u'path': u'hosts.worker1.cpu', u'name': u'cpu'}, {u'is_leaf': u'1', u'path': u'hosts.worker2.cpu', u'name': u'cpu'}]})
# Test from/until params
request=copy.deepcopy(request_default)
request['format']='completer'
request['query']='hosts'
request['from']=int(time.time())-60
request['until']=int(time.time())
content = test_find_view_basics(request)
data = json.loads(content)
data['metrics'] = sorted(data['metrics'])
self.assertEqual(data, {u'metrics': [{u'name': u'hosts', u'is_leaf': u'0', u'path': u'hosts.'}]})
# Test from/until params
request=copy.deepcopy(request_default)
request['format']='completer'
request['query']='hosts'
request['from']='now-1min'
request['until']='now'
content = test_find_view_basics(request)
data = json.loads(content)
data['metrics'] = sorted(data['metrics'])
self.assertEqual(data, {u'metrics': [{u'name': u'hosts', u'is_leaf': u'0', u'path': u'hosts.'}]})
# automatic_variants
request=copy.deepcopy(request_default)
request['format']='completer'
request['automatic_variants']=1
request['query']='hosts.'
content = test_find_view_basics(request)
data = json.loads(content)
data['metrics'] = sorted(data['metrics'])
self.assertEqual(data, {u'metrics': [{u'is_leaf': u'0', u'name': u'worker1', u'path': u'hosts.worker1.'}, {u'is_leaf': u'0', u'name': u'worker2', u'path': u'hosts.worker2.'}]})
request['automatic_variants']=1
request['query']='{hosts,blah}.'
content = test_find_view_basics(request)
data = json.loads(content)
data['metrics'] = sorted(data['metrics'])
self.assertEqual(data, {u'metrics': [{u'path': u'hosts.worker1.', u'is_leaf': u'0', u'name': u'worker1'}, {u'path': u'hosts.worker2.', u'is_leaf': u'0', u'name': u'worker2'}]})
request['automatic_variants']=1
request['query']='hosts,blah.'
content = test_find_view_basics(request)
data = json.loads(content)
data['metrics'] = sorted(data['metrics'])
self.assertEqual(data, {u'metrics': [{u'name': u'worker1', u'path': u'hosts.worker1.', u'is_leaf': u'0'}, {u'name': u'worker2', u'path': u'hosts.worker2.', u'is_leaf': u'0'}]})
# format=completer+jsonp
request=copy.deepcopy(request_default)
request['format']='completer'
request['jsonp']='asdf'
request['query']='*'
content = test_find_view_basics(request)
data = json.loads(content.split("(")[1].strip(")"))
self.assertEqual(data, {u'metrics': [{u'name': u'hosts', u'path': u'hosts.', u'is_leaf': u'0'}]})
# No match
request['query']='other'
content = test_find_view_basics(request)
data = json.loads(content.split("(")[1].strip(")"))
self.assertEqual(data['metrics'], [])
#
# format=nodelist
#
request=copy.deepcopy(request_default)
request['format']='nodelist'
request['query']='*'
content = test_find_view_basics(request)
data = json.loads(content)
self.assertEqual(data, {u'nodes': [u'hosts']})
request=copy.deepcopy(request_default)
request['format']='nodelist'
request['query']='*.*'
content = test_find_view_basics(request)
data = json.loads(content)
self.assertEqual(data, {u'nodes': [u'worker1', u'worker2']})
request=copy.deepcopy(request_default)
request['format']='nodelist'
request['query']='*.*.*'
content = test_find_view_basics(request)
data = json.loads(content)
self.assertEqual(data, {u'nodes': [u'cpu']})
# override node position
request=copy.deepcopy(request_default)
request['format']='nodelist'
request['query']='*.*.*'
request['position']='0'
content = test_find_view_basics(request)
data = json.loads(content)
self.assertEqual(data, {u'nodes': [u'hosts']})
# format=json
request=copy.deepcopy(request_default)
request['format']='json'
# branch
request['query']='*'
content = test_find_view_basics(request)
data = json.loads(content)
self.assertEqual(data, [{u'path': u'hosts', u'is_leaf': False}])
# leaf
request['query']='hosts.*.cpu'
content = test_find_view_basics(request)
data = json.loads(content)
self.assertEqual(len(data), 2)
self.assertEqual(data[0]['path'], 'hosts.worker1.cpu')
self.assertEqual(data[0]['is_leaf'], True)
self.assertEqual(len(data[0]['intervals']), 1)
#self.assertEqual(int(data[0]['intervals'][0]['start']), ts_minus_sixty_seconds)
self.assertEqual(int(data[0]['intervals'][0]['end']), ts)
self.assertEqual(data[1]['path'], 'hosts.worker2.cpu')
self.assertEqual(data[1]['is_leaf'], True)
self.assertEqual(len(data[1]['intervals']), 1)
#self.assertEqual(int(data[1]['intervals'][0]['start']), ts_minus_sixty_seconds)
self.assertEqual(int(data[1]['intervals'][0]['end']), ts)
# No match
request['query']='other'
content = test_find_view_basics(request)
data = json.loads(content)
self.assertEqual(data, [])
# format=json+jsonp
request=copy.deepcopy(request_default)
request['format']='json'
request['jsonp']='asdf'
# branch
request['query']='*'
content = test_find_view_basics(request)
data = json.loads(content.split("(")[1].strip(")"))
self.assertEqual(data, [{u'path': u'hosts', u'is_leaf': False}])
# leaf
request['query']='hosts.*.cpu'
content = test_find_view_basics(request)
data = json.loads(content.split("(")[1].strip(")"))
self.assertEqual(len(data), 2)
self.assertEqual(data[0]['path'], 'hosts.worker1.cpu')
self.assertEqual(data[0]['is_leaf'], True)
self.assertEqual(len(data[0]['intervals']), 1)
#self.assertEqual(int(data[0]['intervals'][0]['start']), ts_minus_sixty_seconds)
self.assertEqual(int(data[0]['intervals'][0]['end']), ts)
self.assertEqual(data[1]['path'], 'hosts.worker2.cpu')
self.assertEqual(data[1]['is_leaf'], True)
self.assertEqual(len(data[1]['intervals']), 1)
#self.assertEqual(int(data[1]['intervals'][0]['start']), ts_minus_sixty_seconds)
self.assertEqual(int(data[1]['intervals'][0]['end']), ts)
# No match
request['query']='other'
content = test_find_view_basics(request)
data = json.loads(content.split("(")[1].strip(")"))
self.assertEqual(data, [])
def test_expand_view(self):
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
url = reverse('metrics_expand')
# default
request = {'query': '*'}
response = self.client.post(url, request)
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEqual(data['results'], [u'hosts'])
# empty query
request = {'query': ''}
response = self.client.post(url, request)
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEqual(data['results'], [u''])
def test_get_metadata_view(self):
"""Stub to test get_metadata_view. This currently doesn't test a valid key """
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
url = reverse('metrics_get_metadata')
# bad key
request = {'metric': 'hosts.worker1.cpu', 'key': 'a'}
response = self.client.post(url, request)
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEqual(data['hosts.worker1.cpu']['error'], "Unexpected error occurred in CarbonLink.get_metadata(hosts.worker1.cpu, a)")
def test_set_metadata_view(self):
"""Stub to test set_metadata_view. This currently doesn't test a valid key """
self.create_whisper_hosts()
self.addCleanup(self.wipe_whisper_hosts)
url = reverse('metrics_set_metadata')
# GET
# bad key
request = {'metric': 'hosts.worker1.cpu', 'key': 'a', 'value': 'b'}
response = self.client.get(url, request)
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEqual(data['hosts.worker1.cpu']['error'], "Unexpected error occurred in CarbonLink.set_metadata(hosts.worker1.cpu, a)")
# POST
# bad key
request = {'operations': '[{ "metric": "hosts.worker1.cpu", "key": "a", "value": "b" }]'}
response = self.client.post(url, request)
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEqual(data['hosts.worker1.cpu']['error'], "Unexpected error occurred in bulk CarbonLink.set_metadata(hosts.worker1.cpu)")
|
|
# Copyright (c) 2016 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
import uuid
import eventlet
from oslo_config import cfg
from heat.common import exception
from heat.engine import check_resource
from heat.engine import dependencies
from heat.engine import resource
from heat.engine import scheduler
from heat.engine import stack
from heat.engine import sync_point
from heat.engine import worker
from heat.rpc import api as rpc_api
from heat.rpc import worker_client
from heat.tests import common
from heat.tests.engine import tools
from heat.tests import utils
@mock.patch.object(check_resource, 'check_stack_complete')
@mock.patch.object(check_resource, 'propagate_check_resource')
@mock.patch.object(check_resource, 'check_resource_cleanup')
@mock.patch.object(check_resource, 'check_resource_update')
class CheckWorkflowUpdateTest(common.HeatTestCase):
@mock.patch.object(worker_client.WorkerClient, 'check_resource',
lambda *_: None)
def setUp(self):
super(CheckWorkflowUpdateTest, self).setUp()
thread_group_mgr = mock.Mock()
cfg.CONF.set_default('convergence_engine', True)
self.worker = worker.WorkerService('host-1',
'topic-1',
'engine_id',
thread_group_mgr)
self.cr = check_resource.CheckResource(self.worker.engine_id,
self.worker._rpc_client,
self.worker.thread_group_mgr,
mock.Mock(), {})
self.worker._rpc_client = worker_client.WorkerClient()
self.ctx = utils.dummy_context()
self.stack = tools.get_stack(
'check_workflow_create_stack', self.ctx,
template=tools.string_template_five, convergence=True)
self.stack.converge_stack(self.stack.t)
self.resource = self.stack['A']
self.is_update = True
self.graph_key = (self.resource.id, self.is_update)
self.orig_load_method = stack.Stack.load
stack.Stack.load = mock.Mock(return_value=self.stack)
def tearDown(self):
super(CheckWorkflowUpdateTest, self).tearDown()
stack.Stack.load = self.orig_load_method
def test_resource_not_available(
self, mock_cru, mock_crc, mock_pcr, mock_csc):
self.worker.check_resource(
self.ctx, 'non-existant-id', self.stack.current_traversal, {},
True, None)
for mocked in [mock_cru, mock_crc, mock_pcr, mock_csc]:
self.assertFalse(mocked.called)
@mock.patch.object(worker.WorkerService, '_retrigger_replaced')
def test_stale_traversal(
self, mock_rnt, mock_cru, mock_crc, mock_pcr, mock_csc):
self.worker.check_resource(self.ctx, self.resource.id,
'stale-traversal', {}, True, None)
self.assertTrue(mock_rnt.called)
def test_is_update_traversal(
self, mock_cru, mock_crc, mock_pcr, mock_csc):
self.worker.check_resource(
self.ctx, self.resource.id, self.stack.current_traversal, {},
self.is_update, None)
mock_cru.assert_called_once_with(self.resource,
self.resource.stack.t.id,
set(), self.worker.engine_id,
mock.ANY, mock.ANY)
self.assertFalse(mock_crc.called)
expected_calls = []
for req, fwd in self.stack.convergence_dependencies.leaves():
expected_calls.append(
(mock.call.worker.propagate_check_resource.
assert_called_once_with(
self.ctx, mock.ANY, mock.ANY,
self.stack.current_traversal, mock.ANY,
self.graph_key, {}, self.is_update)))
mock_csc.assert_called_once_with(
self.ctx, mock.ANY, self.stack.current_traversal,
self.resource.id,
mock.ANY, True)
@mock.patch.object(resource.Resource, 'load')
@mock.patch.object(resource.Resource, 'make_replacement')
@mock.patch.object(stack.Stack, 'time_remaining')
def test_is_update_traversal_raise_update_replace(
self, tr, mock_mr, mock_load, mock_cru, mock_crc, mock_pcr,
mock_csc):
mock_load.return_value = self.resource, self.stack, self.stack
mock_cru.side_effect = resource.UpdateReplace
tr.return_value = 317
self.worker.check_resource(
self.ctx, self.resource.id, self.stack.current_traversal, {},
self.is_update, None)
mock_cru.assert_called_once_with(self.resource,
self.resource.stack.t.id,
set(), self.worker.engine_id,
mock.ANY, mock.ANY)
self.assertTrue(mock_mr.called)
self.assertFalse(mock_crc.called)
self.assertFalse(mock_pcr.called)
self.assertFalse(mock_csc.called)
@mock.patch.object(check_resource.CheckResource,
'_stale_resource_needs_retry')
@mock.patch.object(stack.Stack, 'time_remaining')
def test_is_update_traversal_raise_update_inprogress(
self, tr, mock_tsl, mock_cru, mock_crc, mock_pcr,
mock_csc):
mock_cru.side_effect = exception.UpdateInProgress
self.worker.engine_id = 'some-thing-else'
mock_tsl.return_value = True
tr.return_value = 317
self.worker.check_resource(
self.ctx, self.resource.id, self.stack.current_traversal, {},
self.is_update, None)
mock_cru.assert_called_once_with(self.resource,
self.resource.stack.t.id,
set(), self.worker.engine_id,
mock.ANY, mock.ANY)
self.assertFalse(mock_crc.called)
self.assertFalse(mock_pcr.called)
self.assertFalse(mock_csc.called)
@mock.patch.object(resource.Resource, 'state_set')
def test_stale_resource_retry(
self, mock_ss, mock_cru, mock_crc, mock_pcr, mock_csc):
current_template_id = self.resource.current_template_id
res = self.cr._stale_resource_needs_retry(self.ctx,
self.resource,
current_template_id)
self.assertTrue(res)
mock_ss.assert_not_called()
@mock.patch.object(resource.Resource, 'state_set')
def test_try_steal_lock_alive(
self, mock_ss, mock_cru, mock_crc, mock_pcr, mock_csc):
res = self.cr._stale_resource_needs_retry(self.ctx,
self.resource,
str(uuid.uuid4()))
self.assertFalse(res)
mock_ss.assert_not_called()
@mock.patch.object(check_resource.listener_client, 'EngineListenerClient')
@mock.patch.object(check_resource.resource_objects.Resource, 'get_obj')
@mock.patch.object(resource.Resource, 'state_set')
def test_try_steal_lock_dead(
self, mock_ss, mock_get, mock_elc, mock_cru, mock_crc, mock_pcr,
mock_csc):
fake_res = mock.Mock()
fake_res.engine_id = 'some-thing-else'
mock_get.return_value = fake_res
mock_elc.return_value.is_alive.return_value = False
current_template_id = self.resource.current_template_id
res = self.cr._stale_resource_needs_retry(self.ctx,
self.resource,
current_template_id)
self.assertTrue(res)
mock_ss.assert_called_once_with(self.resource.action,
resource.Resource.FAILED,
mock.ANY)
@mock.patch.object(check_resource.listener_client, 'EngineListenerClient')
@mock.patch.object(check_resource.resource_objects.Resource, 'get_obj')
@mock.patch.object(resource.Resource, 'state_set')
def test_try_steal_lock_not_dead(
self, mock_ss, mock_get, mock_elc, mock_cru, mock_crc, mock_pcr,
mock_csc):
fake_res = mock.Mock()
fake_res.engine_id = self.worker.engine_id
mock_get.return_value = fake_res
mock_elc.return_value.is_alive.return_value = True
current_template_id = self.resource.current_template_id
res = self.cr._stale_resource_needs_retry(self.ctx,
self.resource,
current_template_id)
self.assertFalse(res)
mock_ss.assert_not_called()
@mock.patch.object(stack.Stack, 'rollback')
def test_resource_update_failure_sets_stack_state_as_failed(
self, mock_tr, mock_cru, mock_crc, mock_pcr, mock_csc):
self.stack.state_set(self.stack.UPDATE, self.stack.IN_PROGRESS, '')
self.resource.state_set(self.resource.UPDATE,
self.resource.IN_PROGRESS)
dummy_ex = exception.ResourceNotAvailable(
resource_name=self.resource.name)
mock_cru.side_effect = exception.ResourceFailure(
dummy_ex, self.resource, action=self.resource.UPDATE)
self.worker.check_resource(self.ctx, self.resource.id,
self.stack.current_traversal, {},
self.is_update, None)
s = self.stack.load(self.ctx, stack_id=self.stack.id)
self.assertEqual((s.UPDATE, s.FAILED), (s.action, s.status))
self.assertEqual('Resource UPDATE failed: '
'ResourceNotAvailable: resources.A: The Resource (A)'
' is not available.', s.status_reason)
@mock.patch.object(stack.Stack, 'rollback')
def test_resource_cleanup_failure_sets_stack_state_as_failed(
self, mock_tr, mock_cru, mock_crc, mock_pcr, mock_csc):
self.is_update = False # invokes check_resource_cleanup
self.stack.state_set(self.stack.UPDATE, self.stack.IN_PROGRESS, '')
self.resource.state_set(self.resource.UPDATE,
self.resource.IN_PROGRESS)
dummy_ex = exception.ResourceNotAvailable(
resource_name=self.resource.name)
mock_crc.side_effect = exception.ResourceFailure(
dummy_ex, self.resource, action=self.resource.UPDATE)
self.worker.check_resource(self.ctx, self.resource.id,
self.stack.current_traversal, {},
self.is_update, None)
s = self.stack.load(self.ctx, stack_id=self.stack.id)
self.assertEqual((s.UPDATE, s.FAILED), (s.action, s.status))
self.assertEqual('Resource UPDATE failed: '
'ResourceNotAvailable: resources.A: The Resource (A)'
' is not available.', s.status_reason)
def test_resource_update_failure_triggers_rollback_if_enabled(
self, mock_cru, mock_crc, mock_pcr, mock_csc):
mock_tr = self.stack.rollback = mock.Mock(return_value=None)
self.stack.disable_rollback = False
self.stack.store()
dummy_ex = exception.ResourceNotAvailable(
resource_name=self.resource.name)
mock_cru.side_effect = exception.ResourceFailure(
dummy_ex, self.resource, action=self.resource.UPDATE)
self.worker.check_resource(self.ctx, self.resource.id,
self.stack.current_traversal, {},
self.is_update, None)
self.assertTrue(mock_tr.called)
mock_tr.assert_called_once_with()
def test_resource_cleanup_failure_triggers_rollback_if_enabled(
self, mock_cru, mock_crc, mock_pcr, mock_csc):
mock_tr = self.stack.rollback = mock.Mock(return_value=None)
self.is_update = False # invokes check_resource_cleanup
self.stack.disable_rollback = False
self.stack.store()
dummy_ex = exception.ResourceNotAvailable(
resource_name=self.resource.name)
mock_crc.side_effect = exception.ResourceFailure(
dummy_ex, self.resource, action=self.resource.UPDATE)
self.worker.check_resource(self.ctx, self.resource.id,
self.stack.current_traversal, {},
self.is_update, None)
mock_tr.assert_called_once_with()
@mock.patch.object(stack.Stack, 'rollback')
def test_rollback_is_not_triggered_on_rollback_disabled_stack(
self, mock_tr, mock_cru, mock_crc, mock_pcr, mock_csc):
self.stack.disable_rollback = True
self.stack.store()
dummy_ex = exception.ResourceNotAvailable(
resource_name=self.resource.name)
mock_cru.side_effect = exception.ResourceFailure(
dummy_ex, self.resource, action=self.stack.CREATE)
self.worker.check_resource(self.ctx, self.resource.id,
self.stack.current_traversal, {},
self.is_update, None)
self.assertFalse(mock_tr.called)
@mock.patch.object(stack.Stack, 'rollback')
def test_rollback_not_re_triggered_for_a_rolling_back_stack(
self, mock_tr, mock_cru, mock_crc, mock_pcr, mock_csc):
self.stack.disable_rollback = False
self.stack.action = self.stack.ROLLBACK
self.stack.status = self.stack.IN_PROGRESS
self.stack.store()
dummy_ex = exception.ResourceNotAvailable(
resource_name=self.resource.name)
mock_cru.side_effect = exception.ResourceFailure(
dummy_ex, self.resource, action=self.stack.CREATE)
self.worker.check_resource(self.ctx, self.resource.id,
self.stack.current_traversal, {},
self.is_update, None)
self.assertFalse(mock_tr.called)
def test_resource_update_failure_purges_db_for_stack_failure(
self, mock_cru, mock_crc, mock_pcr, mock_csc):
self.stack.disable_rollback = True
self.stack.store()
self.stack.purge_db = mock.Mock()
dummy_ex = exception.ResourceNotAvailable(
resource_name=self.resource.name)
mock_cru.side_effect = exception.ResourceFailure(
dummy_ex, self.resource, action=self.resource.UPDATE)
self.worker.check_resource(self.ctx, self.resource.id,
self.stack.current_traversal, {},
self.is_update, None)
self.assertTrue(self.stack.purge_db.called)
def test_resource_cleanup_failure_purges_db_for_stack_failure(
self, mock_cru, mock_crc, mock_pcr, mock_csc):
self.is_update = False
self.stack.disable_rollback = True
self.stack.store()
self.stack.purge_db = mock.Mock()
dummy_ex = exception.ResourceNotAvailable(
resource_name=self.resource.name)
mock_crc.side_effect = exception.ResourceFailure(
dummy_ex, self.resource, action=self.resource.UPDATE)
self.worker.check_resource(self.ctx, self.resource.id,
self.stack.current_traversal, {},
self.is_update, None)
self.assertTrue(self.stack.purge_db.called)
@mock.patch.object(check_resource.CheckResource,
'retrigger_check_resource')
@mock.patch.object(stack.Stack, 'load')
def test_initiate_propagate_rsrc_retriggers_check_rsrc_on_new_stack_update(
self, mock_stack_load, mock_rcr, mock_cru, mock_crc, mock_pcr,
mock_csc):
key = sync_point.make_key(self.resource.id,
self.stack.current_traversal,
self.is_update)
mock_pcr.side_effect = exception.EntityNotFound(entity='Sync Point',
name=key)
updated_stack = stack.Stack(self.ctx, self.stack.name, self.stack.t,
self.stack.id,
current_traversal='some_newy_trvl_uuid')
mock_stack_load.return_value = updated_stack
self.cr._initiate_propagate_resource(self.ctx, self.resource.id,
self.stack.current_traversal,
self.is_update, self.resource,
self.stack)
mock_rcr.assert_called_once_with(self.ctx,
self.resource.id, updated_stack)
def test_check_stack_complete_is_invoked_for_replaced_resource(
self, mock_cru, mock_crc, mock_pcr, mock_csc):
resC = self.stack['C']
# lets say C is update-replaced
is_update = True
trav_id = self.stack.current_traversal
replacementC_id = resC.make_replacement(self.stack.t.id,
set(resC.requires))
replacementC, stack, _ = resource.Resource.load(self.ctx,
replacementC_id,
trav_id,
is_update, {})
self.cr._initiate_propagate_resource(self.ctx, replacementC_id,
trav_id,
is_update, replacementC,
self.stack)
# check_stack_complete should be called with resC.id not
# replacementC.id
mock_csc.assert_called_once_with(self.ctx, self.stack,
trav_id,
resC.id, mock.ANY,
is_update)
@mock.patch.object(sync_point, 'sync')
def test_retrigger_check_resource(self, mock_sync, mock_cru, mock_crc,
mock_pcr, mock_csc):
resC = self.stack['C']
# A, B are predecessors to C when is_update is True
expected_predecessors = {(self.stack['A'].id, True),
(self.stack['B'].id, True)}
self.cr.retrigger_check_resource(self.ctx, resC.id, self.stack)
mock_pcr.assert_called_once_with(self.ctx, mock.ANY, resC.id,
self.stack.current_traversal,
mock.ANY, (resC.id, True), None,
True, None)
call_args, call_kwargs = mock_pcr.call_args
actual_predecessors = call_args[4]
self.assertCountEqual(expected_predecessors, actual_predecessors)
def test_update_retrigger_check_resource_new_traversal_deletes_rsrc(
self, mock_cru, mock_crc, mock_pcr, mock_csc):
# mock dependencies to indicate a rsrc with id 2 is not present
# in latest traversal
self.stack._convg_deps = dependencies.Dependencies([
[(1, False), (1, True)], [(2, False), None]])
# simulate rsrc 2 completing its update for old traversal
# and calling rcr
self.cr.retrigger_check_resource(self.ctx, 2, self.stack)
# Ensure that pcr was called with proper delete traversal
mock_pcr.assert_called_once_with(self.ctx, mock.ANY, 2,
self.stack.current_traversal,
mock.ANY, (2, False), None,
False, None)
def test_delete_retrigger_check_resource_new_traversal_updates_rsrc(
self, mock_cru, mock_crc, mock_pcr, mock_csc):
# mock dependencies to indicate a rsrc with id 2 has an update
# in latest traversal
self.stack._convg_deps = dependencies.Dependencies([
[(1, False), (1, True)], [(2, False), (2, True)]])
# simulate rsrc 2 completing its delete for old traversal
# and calling rcr
self.cr.retrigger_check_resource(self.ctx, 2, self.stack)
# Ensure that pcr was called with proper delete traversal
mock_pcr.assert_called_once_with(self.ctx, mock.ANY, 2,
self.stack.current_traversal,
mock.ANY, (2, True), None,
True, None)
@mock.patch.object(stack.Stack, 'purge_db')
def test_handle_failure(self, mock_purgedb, mock_cru, mock_crc, mock_pcr,
mock_csc):
self.stack.mark_failed('dummy-reason')
mock_purgedb.assert_called_once_with()
self.assertEqual('dummy-reason', self.stack.status_reason)
def test_handle_failure_rollback(self, mock_cru, mock_crc,
mock_pcr, mock_csc):
mock_tr = self.stack.rollback = mock.Mock(return_value=None)
self.stack.disable_rollback = False
self.stack.state_set(self.stack.UPDATE, self.stack.IN_PROGRESS, '')
self.stack.mark_failed('dummy-reason')
mock_tr.assert_called_once_with()
@mock.patch.object(stack.Stack, 'purge_db')
@mock.patch.object(stack.Stack, 'state_set')
@mock.patch.object(check_resource.CheckResource,
'retrigger_check_resource')
@mock.patch.object(stack.Stack, 'rollback')
def test_handle_rsrc_failure_when_update_fails(
self, mock_tr, mock_rcr, mock_ss, mock_pdb, mock_cru, mock_crc,
mock_pcr, mock_csc):
# Emulate failure
mock_ss.return_value = False
self.cr._handle_resource_failure(self.ctx, self.is_update,
self.resource.id, self.stack,
'dummy-reason')
self.assertTrue(mock_ss.called)
self.assertFalse(mock_rcr.called)
self.assertFalse(mock_pdb.called)
self.assertFalse(mock_tr.called)
@mock.patch.object(stack.Stack, 'purge_db')
@mock.patch.object(stack.Stack, 'state_set')
@mock.patch.object(check_resource.CheckResource,
'retrigger_check_resource')
@mock.patch.object(stack.Stack, 'rollback')
def test_handle_rsrc_failure_when_update_fails_different_traversal(
self, mock_tr, mock_rcr, mock_ss, mock_pdb, mock_cru, mock_crc,
mock_pcr, mock_csc):
# Emulate failure
mock_ss.return_value = False
# Emulate new traversal
new_stack = tools.get_stack('check_workflow_create_stack', self.ctx,
template=tools.string_template_five,
convergence=True)
new_stack.current_traversal = 'new_traversal'
stack.Stack.load = mock.Mock(return_value=new_stack)
self.cr._handle_resource_failure(self.ctx, self.is_update,
self.resource.id,
self.stack, 'dummy-reason')
# Ensure retrigger called
self.assertTrue(mock_rcr.called)
self.assertTrue(mock_ss.called)
self.assertFalse(mock_pdb.called)
self.assertFalse(mock_tr.called)
def test_handle_stack_timeout(self, mock_cru, mock_crc, mock_pcr,
mock_csc):
mock_mf = self.stack.mark_failed = mock.Mock(return_value=True)
self.cr._handle_stack_timeout(self.ctx, self.stack)
mock_mf.assert_called_once_with(u'Timed out')
def test_do_check_resource_marks_stack_as_failed_if_stack_timesout(
self, mock_cru, mock_crc, mock_pcr, mock_csc):
mock_mf = self.stack.mark_failed = mock.Mock(return_value=True)
mock_cru.side_effect = scheduler.Timeout(None, 60)
self.is_update = True
self.cr._do_check_resource(self.ctx, self.stack.current_traversal,
self.stack.t, {}, self.is_update,
self.resource, self.stack, {})
mock_mf.assert_called_once_with(u'Timed out')
@mock.patch.object(check_resource.CheckResource,
'_handle_stack_timeout')
def test_do_check_resource_ignores_timeout_for_new_update(
self, mock_hst, mock_cru, mock_crc, mock_pcr, mock_csc):
# Ensure current_traversal is check before marking the stack as
# failed due to time-out.
mock_cru.side_effect = scheduler.Timeout(None, 60)
self.is_update = True
old_traversal = self.stack.current_traversal
self.stack.current_traversal = 'new_traversal'
self.cr._do_check_resource(self.ctx, old_traversal,
self.stack.t, {}, self.is_update,
self.resource, self.stack, {})
self.assertFalse(mock_hst.called)
@mock.patch.object(stack.Stack, 'has_timed_out')
@mock.patch.object(check_resource.CheckResource,
'_handle_stack_timeout')
def test_check_resource_handles_timeout(self, mock_hst, mock_to, mock_cru,
mock_crc, mock_pcr, mock_csc):
mock_to.return_value = True
self.worker.check_resource(self.ctx, self.resource.id,
self.stack.current_traversal,
{}, self.is_update, {})
self.assertTrue(mock_hst.called)
def test_check_resource_does_not_propagate_on_cancel(
self, mock_cru, mock_crc, mock_pcr, mock_csc):
# ensure when check_resource is cancelled, the next set of
# resources are not propagated.
mock_cru.side_effect = check_resource.CancelOperation
self.worker.check_resource(self.ctx, self.resource.id,
self.stack.current_traversal,
{}, self.is_update, {})
self.assertFalse(mock_pcr.called)
self.assertFalse(mock_csc.called)
@mock.patch.object(resource.Resource, 'load')
def test_requires(self, mock_load, mock_cru, mock_crc, mock_pcr, mock_csc):
mock_load.return_value = self.resource, self.stack, self.stack
res_data = {(1, True): {u'id': 5, u'name': 'A', 'attrs': {}},
(2, True): {u'id': 3, u'name': 'B', 'attrs': {}}}
self.worker.check_resource(self.ctx, self.resource.id,
self.stack.current_traversal,
sync_point.serialize_input_data(res_data),
self.is_update, {})
mock_cru.assert_called_once_with(
self.resource, self.resource.stack.t.id,
{5, 3}, self.worker.engine_id,
self.stack, mock.ANY)
@mock.patch.object(check_resource, 'check_stack_complete')
@mock.patch.object(check_resource, 'propagate_check_resource')
@mock.patch.object(check_resource, 'check_resource_cleanup')
@mock.patch.object(check_resource, 'check_resource_update')
class CheckWorkflowCleanupTest(common.HeatTestCase):
@mock.patch.object(worker_client.WorkerClient, 'check_resource',
lambda *_: None)
def setUp(self):
super(CheckWorkflowCleanupTest, self).setUp()
thread_group_mgr = mock.Mock()
self.worker = worker.WorkerService('host-1',
'topic-1',
'engine_id',
thread_group_mgr)
self.worker._rpc_client = worker_client.WorkerClient()
self.ctx = utils.dummy_context()
tstack = tools.get_stack(
'check_workflow_create_stack', self.ctx,
template=tools.string_template_five, convergence=True)
tstack.converge_stack(tstack.t, action=tstack.CREATE)
self.stack = stack.Stack.load(self.ctx, stack_id=tstack.id)
self.stack.thread_group_mgr = tools.DummyThreadGroupManager()
self.stack.converge_stack(self.stack.t, action=self.stack.DELETE)
self.resource = self.stack['A']
self.is_update = False
self.graph_key = (self.resource.id, self.is_update)
@mock.patch.object(resource.Resource, 'load')
@mock.patch.object(stack.Stack, 'time_remaining')
def test_is_cleanup_traversal(
self, tr, mock_load, mock_cru, mock_crc, mock_pcr, mock_csc):
tr.return_value = 317
mock_load.return_value = self.resource, self.stack, self.stack
self.worker.check_resource(
self.ctx, self.resource.id, self.stack.current_traversal, {},
self.is_update, None)
self.assertFalse(mock_cru.called)
mock_crc.assert_called_once_with(
self.resource, self.resource.stack.t.id,
self.worker.engine_id,
tr(), mock.ANY)
@mock.patch.object(stack.Stack, 'time_remaining')
def test_is_cleanup_traversal_raise_update_inprogress(
self, tr, mock_cru, mock_crc, mock_pcr, mock_csc):
mock_crc.side_effect = exception.UpdateInProgress
tr.return_value = 317
self.worker.check_resource(
self.ctx, self.resource.id, self.stack.current_traversal, {},
self.is_update, None)
mock_crc.assert_called_once_with(self.resource,
self.resource.stack.t.id,
self.worker.engine_id,
tr(), mock.ANY)
self.assertFalse(mock_cru.called)
self.assertFalse(mock_pcr.called)
self.assertFalse(mock_csc.called)
def test_check_resource_does_not_propagate_on_cancelling_cleanup(
self, mock_cru, mock_crc, mock_pcr, mock_csc):
# ensure when check_resource is cancelled, the next set of
# resources are not propagated.
mock_crc.side_effect = check_resource.CancelOperation
self.worker.check_resource(self.ctx, self.resource.id,
self.stack.current_traversal,
{}, self.is_update, {})
self.assertFalse(mock_pcr.called)
self.assertFalse(mock_csc.called)
class MiscMethodsTest(common.HeatTestCase):
def setUp(self):
super(MiscMethodsTest, self).setUp()
cfg.CONF.set_default('convergence_engine', True)
self.ctx = utils.dummy_context()
self.stack = tools.get_stack(
'check_workflow_create_stack', self.ctx,
template=tools.attr_cache_template, convergence=True)
self.stack.converge_stack(self.stack.t)
self.resource = self.stack['A']
def test_node_data_ok(self):
self.resource.action = self.resource.CREATE
expected_input_data = {'attrs': {(u'flat_dict', u'key2'): 'val2',
(u'flat_dict', u'key3'): 'val3',
(u'nested_dict', u'dict', u'a'): 1,
(u'nested_dict', u'dict', u'b'): 2},
'id': mock.ANY,
'reference_id': 'A',
'name': 'A',
'uuid': mock.ANY,
'action': mock.ANY,
'status': mock.ANY}
actual_input_data = self.resource.node_data()
self.assertEqual(expected_input_data, actual_input_data.as_dict())
def test_node_data_exception(self):
self.resource.action = self.resource.CREATE
expected_input_data = {'attrs': {},
'id': mock.ANY,
'reference_id': 'A',
'name': 'A',
'uuid': mock.ANY,
'action': mock.ANY,
'status': mock.ANY}
self.resource.get_attribute = mock.Mock(
side_effect=exception.InvalidTemplateAttribute(resource='A',
key='value'))
actual_input_data = self.resource.node_data()
self.assertEqual(expected_input_data, actual_input_data.as_dict())
@mock.patch.object(sync_point, 'sync')
def test_check_stack_complete_root(self, mock_sync):
check_resource.check_stack_complete(
self.ctx, self.stack, self.stack.current_traversal,
self.stack['E'].id, self.stack.convergence_dependencies,
True)
mock_sync.assert_called_once_with(
self.ctx, self.stack.id, self.stack.current_traversal, True,
mock.ANY, mock.ANY, {(self.stack['E'].id, True): None})
@mock.patch.object(sync_point, 'sync')
def test_check_stack_complete_child(self, mock_sync):
check_resource.check_stack_complete(
self.ctx, self.stack, self.stack.current_traversal,
self.resource.id, self.stack.convergence_dependencies,
True)
self.assertFalse(mock_sync.called)
@mock.patch.object(dependencies.Dependencies, 'roots')
@mock.patch.object(stack.Stack, '_persist_state')
def test_check_stack_complete_persist_called(self, mock_persist_state,
mock_dep_roots):
mock_dep_roots.return_value = [(1, True)]
check_resource.check_stack_complete(
self.ctx, self.stack, self.stack.current_traversal,
1, self.stack.convergence_dependencies,
True)
self.assertTrue(mock_persist_state.called)
@mock.patch.object(sync_point, 'sync')
def test_propagate_check_resource(self, mock_sync):
check_resource.propagate_check_resource(
self.ctx, mock.ANY, mock.ANY,
self.stack.current_traversal, mock.ANY,
('A', True), {}, True, None)
self.assertTrue(mock_sync.called)
@mock.patch.object(resource.Resource, 'create_convergence')
@mock.patch.object(resource.Resource, 'update_convergence')
def test_check_resource_update_init_action(self, mock_update, mock_create):
self.resource.action = 'INIT'
check_resource.check_resource_update(
self.resource, self.resource.stack.t.id, set(), 'engine-id',
self.stack, None)
self.assertTrue(mock_create.called)
self.assertFalse(mock_update.called)
@mock.patch.object(resource.Resource, 'create_convergence')
@mock.patch.object(resource.Resource, 'update_convergence')
def test_check_resource_update_create_action(
self, mock_update, mock_create):
self.resource.action = 'CREATE'
check_resource.check_resource_update(
self.resource, self.resource.stack.t.id, set(), 'engine-id',
self.stack, None)
self.assertFalse(mock_create.called)
self.assertTrue(mock_update.called)
@mock.patch.object(resource.Resource, 'create_convergence')
@mock.patch.object(resource.Resource, 'update_convergence')
def test_check_resource_update_update_action(
self, mock_update, mock_create):
self.resource.action = 'UPDATE'
check_resource.check_resource_update(
self.resource, self.resource.stack.t.id, set(), 'engine-id',
self.stack, None)
self.assertFalse(mock_create.called)
self.assertTrue(mock_update.called)
@mock.patch.object(resource.Resource, 'delete_convergence')
def test_check_resource_cleanup_delete(self, mock_delete):
self.resource.current_template_id = 'new-template-id'
check_resource.check_resource_cleanup(
self.resource, self.resource.stack.t.id, 'engine-id',
self.stack.timeout_secs(), None)
self.assertTrue(mock_delete.called)
def test_check_message_raises_cancel_exception(self):
# ensure CancelOperation is raised on receiving
# rpc_api.THREAD_CANCEL message
msg_queue = eventlet.queue.LightQueue()
msg_queue.put_nowait(rpc_api.THREAD_CANCEL)
self.assertRaises(check_resource.CancelOperation,
check_resource._check_for_message, msg_queue)
|
|
# -*- coding: utf-8 -*-
import operator
import pytest
import numpy as np
from pandas.compat import range
import pandas as pd
import pandas.util.testing as tm
from pandas.tests.frame.common import _check_mixed_float, _check_mixed_int
# -------------------------------------------------------------------
# Comparisons
class TestFrameComparisons(object):
def test_flex_comparison_nat(self):
# GH 15697, GH 22163 df.eq(pd.NaT) should behave like df == pd.NaT,
# and _definitely_ not be NaN
df = pd.DataFrame([pd.NaT])
result = df == pd.NaT
# result.iloc[0, 0] is a np.bool_ object
assert result.iloc[0, 0].item() is False
result = df.eq(pd.NaT)
assert result.iloc[0, 0].item() is False
result = df != pd.NaT
assert result.iloc[0, 0].item() is True
result = df.ne(pd.NaT)
assert result.iloc[0, 0].item() is True
def test_mixed_comparison(self):
# GH 13128, GH 22163 != datetime64 vs non-dt64 should be False,
# not raise TypeError
# (this appears to be fixed before #22163, not sure when)
df = pd.DataFrame([['1989-08-01', 1], ['1989-08-01', 2]])
other = pd.DataFrame([['a', 'b'], ['c', 'd']])
result = df == other
assert not result.any().any()
result = df != other
assert result.all().all()
def test_df_boolean_comparison_error(self):
# GH#4576, GH#22880
# comparing DataFrame against list/tuple with len(obj) matching
# len(df.columns) is supported as of GH#22800
df = pd.DataFrame(np.arange(6).reshape((3, 2)))
expected = pd.DataFrame([[False, False],
[True, False],
[False, False]])
result = df == (2, 2)
tm.assert_frame_equal(result, expected)
result = df == [2, 2]
tm.assert_frame_equal(result, expected)
def test_df_float_none_comparison(self):
df = pd.DataFrame(np.random.randn(8, 3), index=range(8),
columns=['A', 'B', 'C'])
result = df.__eq__(None)
assert not result.any().any()
def test_df_string_comparison(self):
df = pd.DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
mask_a = df.a > 1
tm.assert_frame_equal(df[mask_a], df.loc[1:1, :])
tm.assert_frame_equal(df[-mask_a], df.loc[0:0, :])
mask_b = df.b == "foo"
tm.assert_frame_equal(df[mask_b], df.loc[0:0, :])
tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :])
@pytest.mark.parametrize('opname', ['eq', 'ne', 'gt', 'lt', 'ge', 'le'])
def test_df_flex_cmp_constant_return_types(self, opname):
# GH 15077, non-empty DataFrame
df = pd.DataFrame({'x': [1, 2, 3], 'y': [1., 2., 3.]})
const = 2
result = getattr(df, opname)(const).get_dtype_counts()
tm.assert_series_equal(result, pd.Series([2], ['bool']))
@pytest.mark.parametrize('opname', ['eq', 'ne', 'gt', 'lt', 'ge', 'le'])
def test_df_flex_cmp_constant_return_types_empty(self, opname):
# GH 15077 empty DataFrame
df = pd.DataFrame({'x': [1, 2, 3], 'y': [1., 2., 3.]})
const = 2
empty = df.iloc[:0]
result = getattr(empty, opname)(const).get_dtype_counts()
tm.assert_series_equal(result, pd.Series([2], ['bool']))
# -------------------------------------------------------------------
# Arithmetic
class TestFrameFlexArithmetic(object):
def test_df_add_td64_columnwise(self):
# GH 22534 Check that column-wise addition broadcasts correctly
dti = pd.date_range('2016-01-01', periods=10)
tdi = pd.timedelta_range('1', periods=10)
tser = pd.Series(tdi)
df = pd.DataFrame({0: dti, 1: tdi})
result = df.add(tser, axis=0)
expected = pd.DataFrame({0: dti + tdi,
1: tdi + tdi})
tm.assert_frame_equal(result, expected)
def test_df_add_flex_filled_mixed_dtypes(self):
# GH 19611
dti = pd.date_range('2016-01-01', periods=3)
ser = pd.Series(['1 Day', 'NaT', '2 Days'], dtype='timedelta64[ns]')
df = pd.DataFrame({'A': dti, 'B': ser})
other = pd.DataFrame({'A': ser, 'B': ser})
fill = pd.Timedelta(days=1).to_timedelta64()
result = df.add(other, fill_value=fill)
expected = pd.DataFrame(
{'A': pd.Series(['2016-01-02', '2016-01-03', '2016-01-05'],
dtype='datetime64[ns]'),
'B': ser * 2})
tm.assert_frame_equal(result, expected)
def test_arith_flex_frame(self, all_arithmetic_operators, float_frame,
mixed_float_frame):
# one instance of parametrized fixture
op = all_arithmetic_operators
def f(x, y):
# r-versions not in operator-stdlib; get op without "r" and invert
if op.startswith('__r'):
return getattr(operator, op.replace('__r', '__'))(y, x)
return getattr(operator, op)(x, y)
result = getattr(float_frame, op)(2 * float_frame)
expected = f(float_frame, 2 * float_frame)
tm.assert_frame_equal(result, expected)
# vs mix float
result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)
expected = f(mixed_float_frame, 2 * mixed_float_frame)
tm.assert_frame_equal(result, expected)
_check_mixed_float(result, dtype=dict(C=None))
@pytest.mark.parametrize('op', ['__add__', '__sub__', '__mul__'])
def test_arith_flex_frame_mixed(self, op, int_frame, mixed_int_frame,
mixed_float_frame):
f = getattr(operator, op)
# vs mix int
result = getattr(mixed_int_frame, op)(2 + mixed_int_frame)
expected = f(mixed_int_frame, 2 + mixed_int_frame)
# no overflow in the uint
dtype = None
if op in ['__sub__']:
dtype = dict(B='uint64', C=None)
elif op in ['__add__', '__mul__']:
dtype = dict(C=None)
tm.assert_frame_equal(result, expected)
_check_mixed_int(result, dtype=dtype)
# vs mix float
result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)
expected = f(mixed_float_frame, 2 * mixed_float_frame)
tm.assert_frame_equal(result, expected)
_check_mixed_float(result, dtype=dict(C=None))
# vs plain int
result = getattr(int_frame, op)(2 * int_frame)
expected = f(int_frame, 2 * int_frame)
tm.assert_frame_equal(result, expected)
def test_arith_flex_frame_raise(self, all_arithmetic_operators,
float_frame):
# one instance of parametrized fixture
op = all_arithmetic_operators
# Check that arrays with dim >= 3 raise
for dim in range(3, 6):
arr = np.ones((1,) * dim)
msg = "Unable to coerce to Series/DataFrame"
with tm.assert_raises_regex(ValueError, msg):
getattr(float_frame, op)(arr)
def test_arith_flex_frame_corner(self, float_frame):
const_add = float_frame.add(1)
tm.assert_frame_equal(const_add, float_frame + 1)
# corner cases
result = float_frame.add(float_frame[:0])
tm.assert_frame_equal(result, float_frame * np.nan)
result = float_frame[:0].add(float_frame)
tm.assert_frame_equal(result, float_frame * np.nan)
with tm.assert_raises_regex(NotImplementedError, 'fill_value'):
float_frame.add(float_frame.iloc[0], fill_value=3)
with tm.assert_raises_regex(NotImplementedError, 'fill_value'):
float_frame.add(float_frame.iloc[0], axis='index', fill_value=3)
def test_arith_flex_series(self, simple_frame):
df = simple_frame
row = df.xs('a')
col = df['two']
# after arithmetic refactor, add truediv here
ops = ['add', 'sub', 'mul', 'mod']
for op in ops:
f = getattr(df, op)
op = getattr(operator, op)
tm.assert_frame_equal(f(row), op(df, row))
tm.assert_frame_equal(f(col, axis=0), op(df.T, col).T)
# special case for some reason
tm.assert_frame_equal(df.add(row, axis=None), df + row)
# cases which will be refactored after big arithmetic refactor
tm.assert_frame_equal(df.div(row), df / row)
tm.assert_frame_equal(df.div(col, axis=0), (df.T / col).T)
# broadcasting issue in GH 7325
df = pd.DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype='int64')
expected = pd.DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis='index')
tm.assert_frame_equal(result, expected)
df = pd.DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype='float64')
expected = pd.DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis='index')
tm.assert_frame_equal(result, expected)
def test_arith_flex_zero_len_raises(self):
# GH 19522 passing fill_value to frame flex arith methods should
# raise even in the zero-length special cases
ser_len0 = pd.Series([])
df_len0 = pd.DataFrame([], columns=['A', 'B'])
df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
with tm.assert_raises_regex(NotImplementedError, 'fill_value'):
df.add(ser_len0, fill_value='E')
with tm.assert_raises_regex(NotImplementedError, 'fill_value'):
df_len0.sub(df['A'], axis=None, fill_value=3)
class TestFrameArithmetic(object):
def test_df_add_2d_array_rowlike_broadcasts(self):
# GH#23000
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=['A', 'B', 'C'])
rowlike = arr[[1], :] # shape --> (1, ncols)
assert rowlike.shape == (1, df.shape[1])
expected = pd.DataFrame([[2, 4],
[4, 6],
[6, 8]],
columns=df.columns, index=df.index,
# specify dtype explicitly to avoid failing
# on 32bit builds
dtype=arr.dtype)
result = df + rowlike
tm.assert_frame_equal(result, expected)
result = rowlike + df
tm.assert_frame_equal(result, expected)
def test_df_add_2d_array_collike_broadcasts(self):
# GH#23000
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=['A', 'B', 'C'])
collike = arr[:, [1]] # shape --> (nrows, 1)
assert collike.shape == (df.shape[0], 1)
expected = pd.DataFrame([[1, 2],
[5, 6],
[9, 10]],
columns=df.columns, index=df.index,
# specify dtype explicitly to avoid failing
# on 32bit builds
dtype=arr.dtype)
result = df + collike
tm.assert_frame_equal(result, expected)
result = collike + df
tm.assert_frame_equal(result, expected)
def test_df_arith_2d_array_rowlike_broadcasts(self,
all_arithmetic_operators):
# GH#23000
opname = all_arithmetic_operators
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=['A', 'B', 'C'])
rowlike = arr[[1], :] # shape --> (1, ncols)
assert rowlike.shape == (1, df.shape[1])
exvals = [getattr(df.loc['A'], opname)(rowlike.squeeze()),
getattr(df.loc['B'], opname)(rowlike.squeeze()),
getattr(df.loc['C'], opname)(rowlike.squeeze())]
expected = pd.DataFrame(exvals, columns=df.columns, index=df.index)
if opname in ['__rmod__', '__rfloordiv__']:
# exvals will have dtypes [f8, i8, i8] so expected will be
# all-f8, but the DataFrame operation will return mixed dtypes
# use exvals[-1].dtype instead of "i8" for compat with 32-bit
# systems/pythons
expected[False] = expected[False].astype(exvals[-1].dtype)
result = getattr(df, opname)(rowlike)
tm.assert_frame_equal(result, expected)
def test_df_arith_2d_array_collike_broadcasts(self,
all_arithmetic_operators):
# GH#23000
opname = all_arithmetic_operators
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=['A', 'B', 'C'])
collike = arr[:, [1]] # shape --> (nrows, 1)
assert collike.shape == (df.shape[0], 1)
exvals = {True: getattr(df[True], opname)(collike.squeeze()),
False: getattr(df[False], opname)(collike.squeeze())}
dtype = None
if opname in ['__rmod__', '__rfloordiv__']:
# Series ops may return mixed int/float dtypes in cases where
# DataFrame op will return all-float. So we upcast `expected`
dtype = np.common_type(*[x.values for x in exvals.values()])
expected = pd.DataFrame(exvals, columns=df.columns, index=df.index,
dtype=dtype)
result = getattr(df, opname)(collike)
tm.assert_frame_equal(result, expected)
def test_df_bool_mul_int(self):
# GH 22047, GH 22163 multiplication by 1 should result in int dtype,
# not object dtype
df = pd.DataFrame([[False, True], [False, False]])
result = df * 1
# On appveyor this comes back as np.int32 instead of np.int64,
# so we check dtype.kind instead of just dtype
kinds = result.dtypes.apply(lambda x: x.kind)
assert (kinds == 'i').all()
result = 1 * df
kinds = result.dtypes.apply(lambda x: x.kind)
assert (kinds == 'i').all()
def test_td64_df_add_int_frame(self):
# GH#22696 Check that we don't dispatch to numpy implementation,
# which treats int64 as m8[ns]
tdi = pd.timedelta_range('1', periods=3)
df = tdi.to_frame()
other = pd.DataFrame([1, 2, 3], index=tdi) # indexed like `df`
with pytest.raises(TypeError):
df + other
with pytest.raises(TypeError):
other + df
with pytest.raises(TypeError):
df - other
with pytest.raises(TypeError):
other - df
|
|
from __future__ import absolute_import
import atexit
import logging
import time
try:
from queue import Empty, Full, Queue # pylint: disable=import-error
except ImportError:
from Queue import Empty, Full, Queue # pylint: disable=import-error
from collections import defaultdict
from threading import Thread, Event
from kafka.vendor import six
from kafka.errors import (
kafka_errors, UnsupportedCodecError, FailedPayloadsError,
RequestTimedOutError, AsyncProducerQueueFull, UnknownError,
RETRY_ERROR_TYPES, RETRY_BACKOFF_ERROR_TYPES, RETRY_REFRESH_ERROR_TYPES)
from kafka.protocol import CODEC_NONE, ALL_CODECS, create_message_set
from kafka.structs import (
ProduceRequestPayload, ProduceResponsePayload, TopicPartition, RetryOptions)
log = logging.getLogger('kafka.producer')
BATCH_SEND_DEFAULT_INTERVAL = 20
BATCH_SEND_MSG_COUNT = 20
# unlimited
ASYNC_QUEUE_MAXSIZE = 0
ASYNC_QUEUE_PUT_TIMEOUT = 0
# unlimited retries by default
ASYNC_RETRY_LIMIT = None
ASYNC_RETRY_BACKOFF_MS = 100
ASYNC_RETRY_ON_TIMEOUTS = True
ASYNC_LOG_MESSAGES_ON_ERROR = True
STOP_ASYNC_PRODUCER = -1
ASYNC_STOP_TIMEOUT_SECS = 30
SYNC_FAIL_ON_ERROR_DEFAULT = True
def _send_upstream(queue, client, codec, batch_time, batch_size,
req_acks, ack_timeout, retry_options, stop_event,
log_messages_on_error=ASYNC_LOG_MESSAGES_ON_ERROR,
stop_timeout=ASYNC_STOP_TIMEOUT_SECS,
codec_compresslevel=None):
"""Private method to manage producing messages asynchronously
Listens on the queue for a specified number of messages or until
a specified timeout and then sends messages to the brokers in grouped
requests (one per broker).
Messages placed on the queue should be tuples that conform to this format:
((topic, partition), message, key)
Currently does not mark messages with task_done. Do not attempt to
:meth:`join`!
Arguments:
queue (threading.Queue): the queue from which to get messages
client (kafka.SimpleClient): instance to use for communicating
with brokers
codec (kafka.protocol.ALL_CODECS): compression codec to use
batch_time (int): interval in seconds to send message batches
batch_size (int): count of messages that will trigger an immediate send
req_acks: required acks to use with ProduceRequests. see server protocol
ack_timeout: timeout to wait for required acks. see server protocol
retry_options (RetryOptions): settings for retry limits, backoff etc
stop_event (threading.Event): event to monitor for shutdown signal.
when this event is 'set', the producer will stop sending messages.
log_messages_on_error (bool, optional): log stringified message-contents
on any produce error, otherwise only log a hash() of the contents,
defaults to True.
stop_timeout (int or float, optional): number of seconds to continue
retrying messages after stop_event is set, defaults to 30.
"""
request_tries = {}
while not stop_event.is_set():
try:
client.reinit()
except Exception as e:
log.warning('Async producer failed to connect to brokers; backoff for %s(ms) before retrying', retry_options.backoff_ms)
time.sleep(float(retry_options.backoff_ms) / 1000)
else:
break
stop_at = None
while not (stop_event.is_set() and queue.empty() and not request_tries):
# Handle stop_timeout
if stop_event.is_set():
if not stop_at:
stop_at = stop_timeout + time.time()
if time.time() > stop_at:
log.debug('Async producer stopping due to stop_timeout')
break
timeout = batch_time
count = batch_size
send_at = time.time() + timeout
msgset = defaultdict(list)
# Merging messages will require a bit more work to manage correctly
# for now, don't look for new batches if we have old ones to retry
if request_tries:
count = 0
log.debug('Skipping new batch collection to handle retries')
else:
log.debug('Batching size: %s, timeout: %s', count, timeout)
# Keep fetching till we gather enough messages or a
# timeout is reached
while count > 0 and timeout >= 0:
try:
topic_partition, msg, key = queue.get(timeout=timeout)
except Empty:
break
# Check if the controller has requested us to stop
if topic_partition == STOP_ASYNC_PRODUCER:
stop_event.set()
break
# Adjust the timeout to match the remaining period
count -= 1
timeout = send_at - time.time()
msgset[topic_partition].append((msg, key))
# Send collected requests upstream
for topic_partition, msg in msgset.items():
messages = create_message_set(msg, codec, key, codec_compresslevel)
req = ProduceRequestPayload(
topic_partition.topic,
topic_partition.partition,
tuple(messages))
request_tries[req] = 0
if not request_tries:
continue
reqs_to_retry, error_cls = [], None
retry_state = {
'do_backoff': False,
'do_refresh': False
}
def _handle_error(error_cls, request):
if issubclass(error_cls, RETRY_ERROR_TYPES) or (retry_options.retry_on_timeouts and issubclass(error_cls, RequestTimedOutError)):
reqs_to_retry.append(request)
if issubclass(error_cls, RETRY_BACKOFF_ERROR_TYPES):
retry_state['do_backoff'] |= True
if issubclass(error_cls, RETRY_REFRESH_ERROR_TYPES):
retry_state['do_refresh'] |= True
requests = list(request_tries.keys())
log.debug('Sending: %s', requests)
responses = client.send_produce_request(requests,
acks=req_acks,
timeout=ack_timeout,
fail_on_error=False)
log.debug('Received: %s', responses)
for i, response in enumerate(responses):
error_cls = None
if isinstance(response, FailedPayloadsError):
error_cls = response.__class__
orig_req = response.payload
elif isinstance(response, ProduceResponsePayload) and response.error:
error_cls = kafka_errors.get(response.error, UnknownError)
orig_req = requests[i]
if error_cls:
_handle_error(error_cls, orig_req)
log.error('%s sending ProduceRequestPayload (#%d of %d) '
'to %s:%d with msgs %s',
error_cls.__name__, (i + 1), len(requests),
orig_req.topic, orig_req.partition,
orig_req.messages if log_messages_on_error
else hash(orig_req.messages))
if not reqs_to_retry:
request_tries = {}
continue
# doing backoff before next retry
if retry_state['do_backoff'] and retry_options.backoff_ms:
log.warning('Async producer backoff for %s(ms) before retrying', retry_options.backoff_ms)
time.sleep(float(retry_options.backoff_ms) / 1000)
# refresh topic metadata before next retry
if retry_state['do_refresh']:
log.warning('Async producer forcing metadata refresh metadata before retrying')
try:
client.load_metadata_for_topics()
except Exception:
log.exception("Async producer couldn't reload topic metadata.")
# Apply retry limit, dropping messages that are over
request_tries = dict(
(key, count + 1)
for (key, count) in request_tries.items()
if key in reqs_to_retry
and (retry_options.limit is None
or (count < retry_options.limit))
)
# Log messages we are going to retry
for orig_req in request_tries.keys():
log.info('Retrying ProduceRequestPayload to %s:%d with msgs %s',
orig_req.topic, orig_req.partition,
orig_req.messages if log_messages_on_error
else hash(orig_req.messages))
if request_tries or not queue.empty():
log.error('Stopped producer with %d unsent messages', len(request_tries) + queue.qsize())
class Producer(object):
"""
Base class to be used by producers
Arguments:
client (kafka.SimpleClient): instance to use for broker
communications. If async_send=True, the background thread will use
:meth:`client.copy`, which is expected to return a thread-safe
object.
codec (kafka.protocol.ALL_CODECS): compression codec to use.
req_acks (int, optional): A value indicating the acknowledgements that
the server must receive before responding to the request,
defaults to 1 (local ack).
ack_timeout (int, optional): millisecond timeout to wait for the
configured req_acks, defaults to 1000.
sync_fail_on_error (bool, optional): whether sync producer should
raise exceptions (True), or just return errors (False),
defaults to True.
async_send (bool, optional): send message using a background thread,
defaults to False.
batch_send_every_n (int, optional): If async_send is True, messages are
sent in batches of this size, defaults to 20.
batch_send_every_t (int or float, optional): If async_send is True,
messages are sent immediately after this timeout in seconds, even
if there are fewer than batch_send_every_n, defaults to 20.
async_retry_limit (int, optional): number of retries for failed messages
or None for unlimited, defaults to None / unlimited.
async_retry_backoff_ms (int, optional): milliseconds to backoff on
failed messages, defaults to 100.
async_retry_on_timeouts (bool, optional): whether to retry on
RequestTimedOutError, defaults to True.
async_queue_maxsize (int, optional): limit to the size of the
internal message queue in number of messages (not size), defaults
to 0 (no limit).
async_queue_put_timeout (int or float, optional): timeout seconds
for queue.put in send_messages for async producers -- will only
apply if async_queue_maxsize > 0 and the queue is Full,
defaults to 0 (fail immediately on full queue).
async_log_messages_on_error (bool, optional): set to False and the
async producer will only log hash() contents on failed produce
requests, defaults to True (log full messages). Hash logging
will not allow you to identify the specific message that failed,
but it will allow you to match failures with retries.
async_stop_timeout (int or float, optional): seconds to continue
attempting to send queued messages after :meth:`producer.stop`,
defaults to 30.
Deprecated Arguments:
async (bool, optional): send message using a background thread,
defaults to False. Deprecated, use 'async_send'
batch_send (bool, optional): If True, messages are sent by a background
thread in batches, defaults to False. Deprecated, use 'async_send'
"""
ACK_NOT_REQUIRED = 0 # No ack is required
ACK_AFTER_LOCAL_WRITE = 1 # Send response after it is written to log
ACK_AFTER_CLUSTER_COMMIT = -1 # Send response after data is committed
DEFAULT_ACK_TIMEOUT = 1000
def __init__(self, client,
req_acks=ACK_AFTER_LOCAL_WRITE,
ack_timeout=DEFAULT_ACK_TIMEOUT,
codec=None,
codec_compresslevel=None,
sync_fail_on_error=SYNC_FAIL_ON_ERROR_DEFAULT,
async_send=False,
batch_send=False, # deprecated, use async_send
batch_send_every_n=BATCH_SEND_MSG_COUNT,
batch_send_every_t=BATCH_SEND_DEFAULT_INTERVAL,
async_retry_limit=ASYNC_RETRY_LIMIT,
async_retry_backoff_ms=ASYNC_RETRY_BACKOFF_MS,
async_retry_on_timeouts=ASYNC_RETRY_ON_TIMEOUTS,
async_queue_maxsize=ASYNC_QUEUE_MAXSIZE,
async_queue_put_timeout=ASYNC_QUEUE_PUT_TIMEOUT,
async_log_messages_on_error=ASYNC_LOG_MESSAGES_ON_ERROR,
async_stop_timeout=ASYNC_STOP_TIMEOUT_SECS,
**kwargs):
# async renamed async_send for python3.7 support
if 'async' in kwargs:
log.warning('Deprecated async option found -- use async_send')
async_send = kwargs['async']
if async_send:
assert batch_send_every_n > 0
assert batch_send_every_t > 0
assert async_queue_maxsize >= 0
self.client = client
self.async_send = async_send
self.req_acks = req_acks
self.ack_timeout = ack_timeout
self.stopped = False
if codec is None:
codec = CODEC_NONE
elif codec not in ALL_CODECS:
raise UnsupportedCodecError("Codec 0x%02x unsupported" % (codec,))
self.codec = codec
self.codec_compresslevel = codec_compresslevel
if self.async_send:
# Messages are sent through this queue
self.queue = Queue(async_queue_maxsize)
self.async_queue_put_timeout = async_queue_put_timeout
async_retry_options = RetryOptions(
limit=async_retry_limit,
backoff_ms=async_retry_backoff_ms,
retry_on_timeouts=async_retry_on_timeouts)
self.thread_stop_event = Event()
self.thread = Thread(
target=_send_upstream,
args=(self.queue, self.client.copy(), self.codec,
batch_send_every_t, batch_send_every_n,
self.req_acks, self.ack_timeout,
async_retry_options, self.thread_stop_event),
kwargs={'log_messages_on_error': async_log_messages_on_error,
'stop_timeout': async_stop_timeout,
'codec_compresslevel': self.codec_compresslevel}
)
# Thread will die if main thread exits
self.thread.daemon = True
self.thread.start()
def cleanup(obj):
if not obj.stopped:
obj.stop()
self._cleanup_func = cleanup
atexit.register(cleanup, self)
else:
self.sync_fail_on_error = sync_fail_on_error
def send_messages(self, topic, partition, *msg):
"""Helper method to send produce requests.
Note that msg type *must* be encoded to bytes by user. Passing unicode
message will not work, for example you should encode before calling
send_messages via something like `unicode_message.encode('utf-8')`
All messages will set the message 'key' to None.
Arguments:
topic (str): name of topic for produce request
partition (int): partition number for produce request
*msg (bytes): one or more message payloads
Returns:
ResponseRequest returned by server
Raises:
FailedPayloadsError: low-level connection error, can be caused by
networking failures, or a malformed request.
KafkaUnavailableError: all known brokers are down when attempting
to refresh metadata.
LeaderNotAvailableError: topic or partition is initializing or
a broker failed and leadership election is in progress.
NotLeaderForPartitionError: metadata is out of sync; the broker
that the request was sent to is not the leader for the topic
or partition.
UnknownTopicOrPartitionError: the topic or partition has not
been created yet and auto-creation is not available.
AsyncProducerQueueFull: in async mode, if too many messages are
unsent and remain in the internal queue.
"""
return self._send_messages(topic, partition, *msg)
def _send_messages(self, topic, partition, *msg, **kwargs):
key = kwargs.pop('key', None)
# Guarantee that msg is actually a list or tuple (should always be true)
if not isinstance(msg, (list, tuple)):
raise TypeError("msg is not a list or tuple!")
for m in msg:
# The protocol allows to have key & payload with null values both,
# (https://goo.gl/o694yN) but having (null,null) pair doesn't make sense.
if m is None:
if key is None:
raise TypeError("key and payload can't be null in one")
# Raise TypeError if any non-null message is not encoded as bytes
elif not isinstance(m, six.binary_type):
raise TypeError("all produce message payloads must be null or type bytes")
# Raise TypeError if the key is not encoded as bytes
if key is not None and not isinstance(key, six.binary_type):
raise TypeError("the key must be type bytes")
if self.async_send:
for idx, m in enumerate(msg):
try:
item = (TopicPartition(topic, partition), m, key)
if self.async_queue_put_timeout == 0:
self.queue.put_nowait(item)
else:
self.queue.put(item, True, self.async_queue_put_timeout)
except Full:
raise AsyncProducerQueueFull(
msg[idx:],
'Producer async queue overfilled. '
'Current queue size %d.' % (self.queue.qsize(),))
resp = []
else:
messages = create_message_set([(m, key) for m in msg], self.codec, key, self.codec_compresslevel)
req = ProduceRequestPayload(topic, partition, messages)
try:
resp = self.client.send_produce_request(
[req], acks=self.req_acks, timeout=self.ack_timeout,
fail_on_error=self.sync_fail_on_error
)
except Exception:
log.exception("Unable to send messages")
raise
return resp
def stop(self, timeout=None):
"""
Stop the producer (async mode). Blocks until async thread completes.
"""
if timeout is not None:
log.warning('timeout argument to stop() is deprecated - '
'it will be removed in future release')
if not self.async_send:
log.warning('producer.stop() called, but producer is not async')
return
if self.stopped:
log.warning('producer.stop() called, but producer is already stopped')
return
if self.async_send:
self.queue.put((STOP_ASYNC_PRODUCER, None, None))
self.thread_stop_event.set()
self.thread.join()
if hasattr(self, '_cleanup_func'):
# Remove cleanup handler now that we've stopped
# py3 supports unregistering
if hasattr(atexit, 'unregister'):
atexit.unregister(self._cleanup_func) # pylint: disable=no-member
# py2 requires removing from private attribute...
else:
# ValueError on list.remove() if the exithandler no longer exists
# but that is fine here
try:
atexit._exithandlers.remove( # pylint: disable=no-member
(self._cleanup_func, (self,), {}))
except ValueError:
pass
del self._cleanup_func
self.stopped = True
def __del__(self):
if self.async_send and not self.stopped:
self.stop()
|
|
"""
Functions for converting text documents into vector space
and dealing with dirty data.
Features:
- no external dependencies
- feature selection methods:
- CHI
- WCP (Within Class Probability)
- CMFS (Comprehensively Measure Feature Selection)
- improved GINI
- CO matrix calcualtion
- DF calculation
- IDF calculation
"""
from multiprocessing import Pool
from collections import Counter
from itertools import groupby,chain
from math import log,ceil
import re
from batch import partitions
# TODO resampling
# TODO przeniesienie feature selection do osobnego pliku
# TODO __init__.py
# TODO przyjecie nomenklatury t-term,c-category, tk, ci
# TODO reorder functions top-down vs bottom-up vs subject
# TODO smart lowercase (prosty NER w oparciu o DF[t] vs DF[t.lower])
# TODO sklearn model.fit/transform interface OR SIMILAR via functools.partial
# TODO liczenie lift dla co
# TODO wybieranie ngramow na podstawie liftu
# ---[ document frequency ]-----------------------------------------------------
def get_df_part(kwargs):
min_df_part = kwargs['min_df_part']
max_df_part = kwargs['max_df_part']
min_tf_doc = kwargs['min_tf_doc']
df = Counter()
for tokens in iter_tokens_part(kwargs):
if min_tf_doc:
unique_tokens = [t for t,f in Counter(tokens).items() if f>=min_tf_doc]
else:
unique_tokens = set(tokens)
df.update(unique_tokens)
# limit within partition
if min_df_part:
below = [t for t in df if df[t]<min_df_part]
for t in below:
del df[t]
if max_df_part < 1.0:
x = max_df_part * len(X)
above = [t for t in df if df[t]>x]
for t in above:
del df[t]
return df
# TODO option to include whole words shorter than char ngram_range 'lo' value
# TODO option to mark word begin/end in char ngrams
# TODO max_df float
# TODO max_df int
# TODO max_df_part float
# TODO min_df float
# TODO reorder ARGS
def get_df(X, workers=4, as_dict=True,
token_pattern='[\w][\w-]*', split_pattern='', encoding=None,
lowercase=True, min_df=0, min_df_part=0, max_df=1.0, max_df_part=1.0,
analyzer='word', tokenizer=None, preprocessor=None,
decode_error='strict', stop_words=None, mp_pool=None,
min_tf_doc=0, ngram_range=None, postprocessor=None, ngram_words=None):
"""Calculate document frequency from a collection of text documents.
Parameters
----------
X : iterable
Collection of text documents.
workers : int, default=4
token_pattern : string, default='[\w][\w-]*'
Regular expression denoting what constitute a "token".
Will not be used when tokenizer or split_pattern is defined.
split_pattern : string, default=''
Regular expression denoting what separetes "tokens".
Will not be used when tokenizer is defined.
ngram_range : tuple (lo, hi)
The lower and upper "n" for n-grams to be extracted
ngram_words: iterable or None (default)
Limit n-gram generation to cases where at least one word occurs
in the provided list.
encoding: string or None (default)
lowercase: boolean, default=True
analyzer: str {'word','char'}
Whether the features should be made of word or character n-grams.
Option 'char' creates character n-grams only from text inside
word boundaries.
tokenizer: callable or None (default)
Function which transforms text into list of tokens (before
postprocessing and n-gram generation).
preprocessor: callable, list of callable or None (default)
Function or list of functions which transforms text before tokenization.
postprocessor: callable, list of callable or None (default)
Function(s) or list of functions which transforms list of tokens (before n-gram
generation)
min_df: int, default=0
min_df_part: int, default=0
min_tf_doc: int, default=0
max_df: float, default=1.0
max_df_part: float, default=1.0
decode_error: str, default='strict'
stop_words: iterable or None (default)
mp_pool: multiprocessing.Pool or None (default)
Multiprocessing pool object that will be used to parallelize
execution. If none is provided a new one will be created.
as_dict: boolean, default=True
Whether the result should be converted into dict or left
as collections.Counter (which doesn't support marshaling)
"""
data = []
for lo,hi in partitions(len(X),workers):
kwargs = dict(
X = X[lo:hi]
,token_pattern = token_pattern
,split_pattern = split_pattern
,encoding = encoding
,lowercase = lowercase
,min_df_part = min_df_part
,max_df_part = max_df_part
,analyzer = analyzer
,ngram_range = ngram_range
,tokenizer = tokenizer
,preprocessor = preprocessor
,decode_error = decode_error
,stop_words = stop_words
,min_tf_doc = min_tf_doc
,postprocessor = postprocessor
,ngram_words = ngram_words
)
data.append(kwargs)
pool = mp_pool or Pool(workers)
df_partitions = pool.map(get_df_part, data)
df=df_partitions[0]
for df_ in df_partitions[1:]:
df.update(df_)
if min_df:
below = [t for t in df if df[t]<min_df]
for t in below:
del df[t]
if max_df < 1.0:
max_df_cnt = max_df * len(X)
above = [t for t in df if df[t]>max_df_cnt]
for t in above:
del df[t]
if as_dict:
df = dict(df)
return df
### 24s get_dfy vs 11s get_df vs 25s get_dfy2(specialized)
def get_dfy(X,Y,workers=4,sort=True,mp_pool=None,**kwargs):
"""Calcualte per topic document frequency.
"""
dfy = {}
if sort:
data = sorted(zip(Y,X))
else:
data = zip(Y,X)
pool = mp_pool or Pool(workers)
for y,g in groupby(data,lambda x:x[0]):
x = [v[1] for v in g]
dfy[y] = get_df(x,mp_pool=pool,**kwargs)
return dfy
def get_df_from_dfy(dfy,as_dict=True):
"""Convert per topic document frequency into total document frequency.
"""
df = Counter()
for y in dfy:
df.update(dfy[y])
if as_dict:
df = dict(df)
return df
# ---[ clean ]------------------------------------------------------------------
# TODO replace based on matched split_pattern
def get_clean_x_part(kwargs):
replace = kwargs['replace']
out = []
for tokens in iter_tokens_part(kwargs):
out.append(replace.join(tokens))
return out
# TODO refactor with get_df
def get_clean_x(X, workers=4,
token_pattern='[\w][\w-]*', split_pattern='', encoding=None,
lowercase=True,
tokenizer=None, preprocessor=None,
decode_error='strict', stop_words=None, mp_pool=None,
postprocessor=None,
replace=u' ; ', stop_hashes=None, hash_fun=None):
"""Replace noise tokens (words/phrases).
Parameters
----------
X : iterable
Collection of text documents.
stop_words : iterable or None (default)
Collection of tokens that should be replaced
stop_hashes : iterable or None (default)
Collection of hashes of tokens that should be replaced
replace : str, default=u' ; '
Replacement text for noise tokens
workers : int, default=4
token_pattern : string, default='[\w][\w-]*'
Regular expression denoting what constitute a "token".
Will not be used when tokenizer or split_pattern is defined.
... TODO rest of params
"""
data = []
for lo,hi in partitions(len(X),workers):
kwargs = dict(
X = X[lo:hi]
,token_pattern = token_pattern
,split_pattern = split_pattern
,encoding = encoding
,lowercase = lowercase
,tokenizer = tokenizer
,preprocessor = preprocessor
,decode_error = decode_error
,stop_words = stop_words
,postprocessor = postprocessor
,replace = replace
,stop_hashes = stop_hashes
,hash_fun = hash_fun
)
data.append(kwargs)
pool = mp_pool or Pool(workers)
x_partitions = pool.map(get_clean_x_part, data)
x=x_partitions[0]
for x_ in x_partitions[1:]:
x.extend(x_)
return x
# ---[ feature selection ]------------------------------------------------------
def get_idf(df, n, a1=1, a2=1, a3=1, min_df=0):
"""Calculate inverse document frequency.
"""
idf = Counter()
for t in df:
if min_df and df[t]<min_df: continue
idf[t] = log( (a1+n) / (a2+df[t]) ) + a3
return idf
# TODO mcd
def get_chiy(df,n,dfy,ny,alpha=0):
chiy = {}
for y in dfy:
chiy[y] = get_chi(df,n,dfy[y],ny[y],alpha)
return chiy
# TODO rename dfy,ny
def get_chi(df,n,dfy,ny,alpha=0):
"""Calculate chi scores for features from one topic
"""
chi = {}
for t,val in iter_chi(df,n,dfy,ny,alpha):
chi[t] = val
return chi
# TODO rename dfy,ny
def get_chi_explain(df,n,dfy,ny,alpha=0):
chi_explain = iter_chi(df,n,dfy,ny,alpha,explain=True)
return dict(chi_explain)
# TODO rename dfy,ny -> dfc (class), dft (topic)
# TODO rename chi variables
def iter_chi(df,n,dfy,ny,alpha=0,explain=False):
all = df
topic = dfy
for t in df:
# observed
o_c1_t1 = topic.get(t,0)
o_c1_t0 = ny - topic.get(t,0)
o_c0_t1 = all[t] - topic.get(t,0)
o_c0_t0 = n - o_c1_t1 - o_c1_t0 - o_c0_t1
# expected
e_c1_t1 = 1.0 * ny * all[t]/n
e_c1_t0 = 1.0 * ny * (n-all[t])/n
e_c0_t1 = 1.0 * (n-ny)/n * all[t]
e_c0_t0 = 1.0 * (n-ny)/n * (n-all[t])
# chi components
c1_t1 = (o_c1_t1 - e_c1_t1)**2 / (e_c1_t1 + alpha)
c1_t0 = (o_c1_t0 - e_c1_t0)**2 / (e_c1_t0 + alpha)
c0_t1 = (o_c0_t1 - e_c0_t1)**2 / (e_c0_t1 + alpha)
c0_t0 = (o_c0_t0 - e_c0_t0)**2 / (e_c0_t0 + alpha)
# chi
chi = c0_t0 + c1_t0 + c0_t1 + c1_t1
# result
if explain:
ex = dict()
ex['o_c1_t1'] = o_c1_t1
ex['o_c1_t0'] = o_c1_t0
ex['o_c0_t1'] = o_c0_t1
ex['o_c0_t0'] = o_c0_t0
ex['e_c1_t1'] = e_c1_t1
ex['e_c1_t0'] = e_c1_t0
ex['e_c0_t1'] = e_c0_t1
ex['e_c0_t0'] = e_c0_t0
ex['c1_t1'] = c1_t1
ex['c1_t0'] = c1_t0
ex['c0_t1'] = c0_t1
ex['c0_t0'] = c0_t0
ex['chi'] = chi
ex = {k:int(v) for k,v in ex.items()}
yield t,ex
else:
yield t,chi
# TODO gini(wcp) ???
def iter_wcpy(df,dfy,explain=False):
topics = dfy.keys()
V = len(df)
for t in df:
wcpy = {}
py = {}
for y in topics:
nom = 1 + dfy[y].get(t,0)
denom = V + df[t]
py[y] = 1.0 * nom / denom
sum_py = sum(py.values())
for y in topics:
wcpy[y] = py[y] / sum_py
if explain:
ex = dict()
# TODO
yield t,ex
else:
yield t,wcpy
def iter_giniy(df,dfy,ny,explain=False):
topics = dfy.keys()
for t in df:
giniy = {}
for y in topics:
p_t_when_y = 1.0 * dfy[y].get(t,0) / ny[y]
p_y_when_t = 1.0 * dfy[y].get(t,0) / df[t]
giniy[y] = p_t_when_y**2 + p_y_when_t**2
if explain:
ex = dict()
# TODO
yield t,ex
else:
yield t,giniy
def iter_cmfsy(df,dfy,explain=False):
topics = dfy.keys()
C = len(topics)
V = len(df)
sum_dfy = {y:sum(dfy[y].values()) for y in topics}
for t in df:
cmfsy = {}
for y in topics:
nom = dfy[y].get(t,0) + 1
denom1 = df[t] + C
denom2 = sum_dfy[y] + V
cmfsy[y] = 1.0 * nom / (denom1 * denom2)
if explain:
ex = dict()
# TODO
yield t,ex
else:
yield t,cmfsy
def get_giniy(df, dfy, ny):
"""Calculate improved GINI for all topics
"""
items = iter_giniy(df,dfy,ny)
topics = dfy.keys()
return transform_items_topics(items, topics)
def get_cmfsy(df, dfy):
"""
http://www.dafl.yuntech.edu.tw/download/2012.IPM.48.A%20new%20feature%20selection%20based%20on%20comprehensive%20measurement%20both%20in%20inter-category%20and%20intra-category%20for%20text%20categorization.pdf
"""
items = iter_cmfsy(df,dfy)
topics = dfy.keys()
return transform_items_topics(items, topics)
def get_wcpy(df, dfy):
"""Calculate WCP for all topics
"""
items = iter_wcpy(df, dfy)
topics = dfy.keys()
return transform_items_topics(items, topics)
# TODO opisac
def get_mcdy(fsy):
"minimal class difference of feature score"
topics = fsy.keys()
mcdy = {y:{} for y in topics}
vocab = set()
for y in topics:
vocab.update(fsy[y])
for t in vocab:
for y in topics:
val = min([abs(fsy[y].get(t,0)-fsy[y2].get(t,0)) for y2 in topics if y!=y2])
if val:
mcdy[y][t] = val
return mcdy
# ---[ vectorization ]----------------------------------------------------------
# TODO token_id iter not token_cnt
# TODO refactor using iter_tokens_part
def vectorize_part(kwargs):
vocabulary = kwargs['vocabulary']
binary = kwargs['binary']
sparse = kwargs['sparse']
stream = kwargs['stream']
upper_limit = kwargs['upper_limit']
dtype = kwargs['dtype']
if dtype:
import numpy as np
typecode = kwargs['typecode']
if typecode:
from array import array
if hasattr(vocabulary,'items'):
vocab_dict = vocabulary
vocab_len = max(vocab_dict.values()) + 1
else:
vocab_dict = {t:t_id for t_id,t in enumerate(vocabulary)}
vocab_len = len(vocabulary)
out = []
for tokens in iter_tokens_part(kwargs):
# TODO filter tokens - keep only vocabulary -> here or after ngrams ???
# output
if sparse:
if binary:
v = [vocab_dict[t] for t in set(tokens) if t in vocab_dict]
if dtype:
v = np.array(v,dtype=dtype)
if typecode:
v = array(typecode,v)
elif stream:
v = []
empty = True
for t in tokens:
if t not in vocab_dict:
continue # XXX
if not empty:
v.append(0) # XXX
empty = True
#continue # TODO optional -1 token
else:
t_id = vocab_dict[t]
v.append(t_id)
empty = False
if dtype:
v = np.array(v,dtype=dtype)
if typecode:
v = array(typecode,v)
else:
tf = {}
for t in tokens:
if t not in vocab_dict: continue
t_id = vocab_dict[t]
if t_id not in tf: tf[t_id]=1
else:
tf[t_id]+=1
if upper_limit:
for t in tf:
tf[t] = min(upper_limit,tf[t])
v = tf
if dtype:
v = np.array(tf.items(),dtype=dtype)
if typecode:
pass # TODO
else:
v = [0]*vocab_len
if binary:
for t in tokens:
if t not in vocab_dict: continue
t_id = vocab_dict[t]
v[t_id] = 1
else:
for t in tokens:
if t not in vocab_dict: continue
t_id = vocab_dict[t]
v[t_id] += 1
if dtype:
v = np.array(v, dtype=dtype)
if upper_limit:
if dtype:
v.clip(1,upper_limit)
elif dtype:
for t_id in range(vocab_len):
v[t_id] = min(upper_limit,v[t_id])
else:
v = [min(upper_limit,f) for f in v]
out.append(v)
return out
# TODO remove dead options
def vectorize(X, vocabulary, workers=4,
token_pattern='[\w][\w-]*', split_pattern='',
encoding=None, lowercase=True,
analyzer='word', tokenizer=None, preprocessor=None,
decode_error='strict', stop_words=None, mp_pool=None,
ngram_range=None, postprocessor=None, ngram_words=None,
binary=False, sparse=True, upper_limit=0,
dtype=None,typecode=None,
partitioned=False,
stream=False):
"""Convert a collection of text documents into a collection of token counts
Parameters
----------
X : iterable
vocabulary : iterable or mapping
TODO
binary : boolean, default=False
TODO
sparse : boolean, default=True
TODO
upper_limit : int, default=0
Upper limit for token counts in the vector
dtype : numpy.dtype or None (default)
numpy data type of the result
typecode : str or None (default)
array.array typecode of the result
partitioned : boolean, default=False
Whether the result should be partitioned or merged
into a single list
"""
data = []
for lo,hi in partitions(len(X),workers):
kwargs = dict(
X = X[lo:hi]
,token_pattern = token_pattern
,split_pattern = split_pattern
,encoding = encoding
,lowercase = lowercase
,analyzer = analyzer
,ngram_range = ngram_range
,tokenizer = tokenizer
,preprocessor = preprocessor
,decode_error = decode_error
,stop_words = stop_words
,postprocessor = postprocessor
,ngram_words = ngram_words
,vocabulary = vocabulary
,binary = binary
,sparse = sparse
,stream = stream
,dtype = dtype
,typecode = typecode
,upper_limit = upper_limit
)
data.append(kwargs)
pool = mp_pool or Pool(workers)
v_partitions = pool.map(vectorize_part, data)
if partitioned:
out = v_partitions
else:
out = list(chain.from_iterable(v_partitions))
return out
# ---[ cooccurrence ]-----------------------------------------------------------
# TODO parallelize
# TODO Y
def get_co(X, diagonal=True, triangular=False, sparse=True, binary=False,
dtype=None,stream=False,ngram_max=None,symetry=True,
output_dtype=None, upper_limit=0, output_len=None):
"""Calculate cooccurence count from a collection of token counts.
"""
import numpy as np
co = Counter()
for x in X:
if sparse:
if binary:
for t1 in x:
for t2 in x:
cnt = 1
# TODO refactor
if symetry:
a = min(t1,t2)
b = max(t1,t2)
else:
a = t1
b = t2
if a==b and not diagonal:
continue
co[a,b] += cnt
if a!=b and not triangular and symetry:
co[b,a] += cnt
elif stream:
for i in range(len(x)):
t1 = x[i]
if ngram_max:
j_range = range(i,i+ngram_max)
else:
j_range = range(i,len(x))
for j in j_range:
if j>=len(x): break
t2=x[j]
if t2==0: break
cnt = 1
# TODO refactor
if symetry:
a = min(t1,t2)
b = max(t1,t2)
else:
a = t1
b = t2
if a==b and not diagonal:
continue
co[a,b] += cnt
if a!=b and not triangular and symetry:
co[b,a] += cnt
else:
if dtype:
pass # TODO
else:
for t1 in x:
for t2 in x:
cnt = min(x[t1],x[t2])
# TODO refactor
if symetry:
a = min(t1,t2)
b = max(t1,t2)
else:
a = t1
b = t2
if a==b and not diagonal:
continue
co[a,b] += cnt
if a!=b and not triangular and symetry:
co[b,a] += cnt
else:
pass # TODO
if output_dtype and output_len:
out = np.zeros((output_len,output_len),dtype=output_dtype)
# for (t1,t2),f in co.items():
# out[t1,t2] = min(upper_limit,f) if upper_limit else f
if upper_limit:
for (t1,t2),f in co.items():
out[t1,t2] = min(upper_limit,f)
else:
for (t1,t2),f in co.items():
out[t1,t2] = f
return out
else:
return co
def get_coy(X,Y,sort=True,**kwargs):
"""Calculate per topic cooccurence count from a collection of token counts.
"""
coy = {}
if sort:
data = sorted(zip(Y,X))
else:
data = zip(Y,X)
for y,g in groupby(data,lambda x:x[0]):
print('xxx',y) # XXX
x = [v[1] for v in g]
coy[y] = get_co(x,**kwargs)
return coy
# TODO np.array input
def get_co_from_coy(coy,dtype=None):
"""Convert per topic cooccurence count into total cooccurence count.
"""
if dtype:
import numpy as np
co = np.zeros_like(list(coy.values())[0],dtype=dtype)
for y in coy:
co += coy[y]
else:
co = Counter()
for y in coy:
co.update(coy[y])
return co
# ---[ tokens ]-----------------------------------------------------------------
def iter_tokens_part(kwargs):
X = kwargs['X']
token_pattern = kwargs['token_pattern']
split_pattern = kwargs['split_pattern']
stop_words = kwargs['stop_words']
lowercase = kwargs['lowercase']
encoding = kwargs['encoding']
decode_error = kwargs['decode_error']
preprocessor = kwargs['preprocessor']
tokenizer = kwargs['tokenizer']
postprocessor = kwargs['postprocessor']
ngram_range = kwargs.get('ngram_range')
ngram_words = kwargs.get('ngram_words')
analyzer = kwargs.get('analyzer') or 'word'
stop_hashes = kwargs.get('stop_hashes')
hash_fun = kwargs.get('hash_fun') or hash
stop_words_set = set(stop_words or [])
stop_hashes_set = set(stop_hashes or [])
ngram_words_set = set(ngram_words or [])
if token_pattern:
re_tok = re.compile(token_pattern,re.U)
if split_pattern:
re_split = re.compile(split_pattern,re.U)
out = []
for text in X:
if encoding:
text = text.decode(encoding,decode_error)
if preprocessor:
if callable(preprocessor):
text = preprocessor(text)
else:
for p in preprocessor:
text = p(text)
if lowercase:
text = text.lower()
if tokenizer:
tokens = tokenizer(text)
elif split_pattern:
tokens = re_split.split(text)
elif token_pattern:
tokens = re_tok.findall(text)
if postprocessor:
if callable(postprocessor):
tokens = postprocessor(tokens)
else:
for p in postprocessor:
tokens = p(tokens)
if stop_words:
tokens = [t for t in tokens if t not in stop_words_set]
if stop_hashes:
tokens = [t for t in tokens if hash_fun(t) not in stop_hashes_set]
if ngram_range:
lo,hi = ngram_range
ngrams = []
if analyzer=='word':
for i in range(len(tokens)-lo): # TEST off-by-one
for n in range(lo,hi+1):
if i+n>len(tokens): break # TEST off-by-one
ngram = tuple(tokens[i:i+n])
if not ngram_words_set&set(ngram): continue
ngrams.append(ngram) # TODO tuple vs string
elif analyzer=='char':
for t in tokens:
for n in range(lo,hi+1):
if len(t)<n: pass
elif len(t)==n:
ngrams.append(t)
else:
for i in range(len(t)-n+1):
ngrams.append(t[i:i+n])
tokens = ngrams
yield tokens
# ---[ utils ]------------------------------------------------------------------
def transform_items_topics(items, topics):
"transforms items of dict[token][topic]->val into dict[topic][token]->val dictionary"
out = {y:{} for y in topics}
for t,d in items:
for y in topics:
val = d[y]
if val:
out[y][t] = val
return out
|
|
#-*- coding: utf-8 -*-
'''
This file contains classes to handle getting data from different sources,
which provide consistent interface for uploader.
'''
'''
Possible modifications:
1) Change way of obtaining data to upload -> create new class extending BasicDataSrc
2) Change way of defining hierarchy -> create new class extending BasicHierarchy
3) Change way of uploading data -> create new class extending BasicUploader
'''
from urllib2 import urlopen
from collections import deque
from exceptions import StopIteration
from csv import reader as csvreader
import simplejson as json
import string
class BasicReader:
'''Read data from a source. Can be used as iterator.'''
def __init__( self, src, std_size=10000, stop_sign='\n', enc='utf-8' ):
self.src = src
self.size = std_size
self.buffer = deque()
self.stop_sign = stop_sign
self.enc = enc
def __iter__( self ):
return self
def next( self ):
'''Read until comes across stop_sign or end of data.'''
row = []
while self.stop_sign not in self.buffer:
bulk = self.read_bulk()
if bulk == '':
raise StopIteration
self.buffer += bulk
# Get data from the buffer until stop_sign.
left = ''
while left != self.stop_sign:
left = self.buffer.popleft()
row.append( left )
return (''.join( row ))
def read_bulk( self, size=None ):
'''Read size bytes of data, if bytes is not specified, then default
value will be used.'''
read_size = size if size is not None else self.size
# Copy data to tmp buffer and clear the original one
bulk = self.src.read( read_size - len( self.buffer ) )
self.buffer += bulk
buffer_copy = ''.join( self.buffer )
self.buffer.clear()
return buffer_copy
def read_all( self ):
'''Read all data.'''
bulk = self.src.read()
self.buffer += bulk
buffer_copy = ''.join( self.buffer )
self.buffer.clear()
return buffer_copy
def is_all_read( self ):
'''Checks if anything is left in the buffer.'''
data_part = self.src.read(1)
if data_part != '':
self.buffer.append( data_part )
return len( self.buffer ) == 0
class FileReader(BasicReader):
'''Reads data from a file.'''
def __init__( self, filename, std_size=10000, stop_sign='\n', enc='utf-8' ):
self.src = open( filename, 'rb' )
self.size = std_size
self.buffer = deque()
self.stop_sign = stop_sign
self.enc = enc
def __del__( self ):
self.src.close()
class UrlReader(BasicReader):
'''Reads data from a URL.'''
def __init__( self, url, std_size=10000, stop_sign='\n', enc='utf-8' ):
self.src = urlopen( url )
self.size = std_size
self.buffer = deque()
self.stop_sign = stop_sign
self.enc = enc
def __del__( self ):
self.src.close()
class Meta:
'''Reads meta data from a data source. Meta data contains collection name and description,
hierarchy description, columns description. Data reader should be an instance of a subclass
of BasicReader.'''
def __init__( self, reader ):
self.reader = reader
content = reader.read_all()
json_content = json.loads( content )
self.node = {
'name': json_content['name'],
'description': json_content['description'],
'label': json_content['label']
}
self.columns = json_content['columns']
self.hierarchy = json_content['hierarchy']
self.parents = json_content['parents']
self.user = json_content['user']
def get_node( self ):
return self.node
def get_columns( self ):
return self.columns
def get_hierarchy( self ):
return self.hierarchy
def get_parents( self ):
return self.parents
def get_user( self ):
return self.user
class DataReceiver:
'''Receives data from a data source and knows how to interpret it. Data reader
is an instance of a subclass of BasicReader.'''
def __init__( self, reader ):
self.reader = reader
self.rows = deque()
self.buffer = ''
def read_rows( self ):
'''Gets at least one row from the data source if not everything is read.'''
self.buffer += self.reader.read_bulk()
while not self.is_row_in_buffer() and not self.reader.is_all_read():
self.buffer += self.reader.read_bulk()
splitted_for_rows = self.buffer.split('\n')
if self.reader.is_all_read():
self.buffer = ''
else:
self.buffer = splitted_for_rows[-1]
del splitted_for_rows[-1]
return splitted_for_rows
def get_rows( self ):
if len( self.rows ) == 0:
return self.read_rows()
else:
rows_copy = list( self.rows )
self.rows.clear()
return rows_copy
def get_all_rows( self ):
list_rows = []
while not self.reader.is_all_read():
list_rows += self.get_rows()
return list_rows
def is_row_in_buffer( self ):
return '\n' in self.buffer
class CSVDataReceiver(DataReceiver):
'''Receives data from the CSV file.'''
def __init__( self, reader, delim=';', quote='"' ):
self.reader = csvreader( reader, delimiter=delim, quotechar=quote )
self.rows = deque()
self.buffer = ''
def get_rows( self ):
'''If any row is left, returns the first one, otherwise returns [].'''
try:
return self.reader.next()
except:
return []
def get_all_rows( self ):
'''Gets all rows from the CSV file.'''
rows = []
try:
while True:
rows.append( self.reader.next() )
except:
return rows
# TODO: Not working yet
class APIDataReceiver(DataReceiver):
'''Receives data from API'''
def __init__( self, base_url ):
self.rows = deque()
self.buffer = ''
self.base_url = base_url
top_data_url = base_url + 'a/'
top_reader = UrlReader( top_data_url, stop_sign=None )
top_data = top_reader.read_all()
json_data = json.loads( top_data )
self.top_data = json_data['data']
self.next_ind = 0
def get_rows( self ):
try:
rows = self.get_subtree( self.top_ids[ self.next_ind ] )
self.next_ind += 1
return rows
except:
return []
def get_all_rows( self ):
rows = []
try:
while True:
rows += self.get_rows()
except:
return rows
def get_children( self, prev_url, par_id, level ):
url = self.next_level_url( prev_url, par_id, level )
ureader = UrlReader( url, stop_sign=None )
children_str = ureader.read_all()
children = json.loads( children_str )['data']
return children
def get_children_rec( self, prev_url, par_id, level ):
data = []
children = self.get_children( prev_url, par_id, level )
for child in children:
data.append( child )
if not child['leaf']:
url = self.next_level_url( prev_url, par_id, level )
data += self.get_children_rec( url, child['idef'], level + 1 )
return data
def get_subtree( self, ind ):
data = [ self.top_data[ ind ] ]
root_id = self.top_data[ ind ]['idef']
if not act_root['leaf']:
data += self.get_children_rec( self.base_url, root_id, 0 )
return data
def next_level_url( self, prev_url, par_id, level ):
next_level = level + 1
next_letter = string.lowercase[ next_level ]
return ( prev_url + '%s/%s/' % ( par_id, next_letter ) )
|
|
import re
import logging
from tyggbot import TyggBot
from command import Command
log = logging.getLogger('tyggbot')
class Substitution:
def __init__(self, cb, key=None, argument=None):
self.cb = cb
self.key = key
self.argument = argument
class BaseAction:
type = '??'
class MultiAction(BaseAction):
type = 'multi'
def __init__(self, args, default):
self.commands = {}
self.default = default
for command in args:
cmd = Command.from_json(command)
for alias in command['command'].split('|'):
if alias not in self.commands:
self.commands[alias] = cmd
else:
log.error('Alias {0} for this multiaction is already in use.'.format(alias))
def run(self, tyggbot, source, message, event={}, args={}):
if message:
msg_lower_parts = message.lower().split(' ')
command = msg_lower_parts[0]
extra_msg = ' '.join(message.split(' ')[1:])
else:
command = self.default
extra_msg = None
if command in self.commands:
cmd = self.commands[command]
if source.level >= cmd.level:
return cmd.run(tyggbot, source, extra_msg, event, args)
else:
log.info('User {0} tried running a sub-command he had no access to ({1}).'.format(source.username, command))
class FuncAction(BaseAction):
type = 'func'
def __init__(self, cb):
self.cb = cb
def run(self, tyggbot, source, message, event={}, args={}):
try:
return self.cb(tyggbot, source, message, event, args)
except Exception:
log.exception('Uncaught exception in FuncAction')
class RawFuncAction(BaseAction):
type = 'rawfunc'
def __init__(self, cb):
self.cb = cb
def run(self, tyggbot, source, message, event={}, args={}):
return self.cb()
class MessageAction(BaseAction):
type = 'message'
regex = re.compile('(\$\([a-zA-Z:;_0-9 ]+\))')
inner_regex = re.compile(r'([a-z]+)(:[a-zA-Z_0-9 ]+|;[0-9]+)')
argument_regex = re.compile('(\$\([0-9]+\))')
argument_inner_regex = re.compile('\$\(([0-9]+)\)')
def __init__(self, response):
self.response = response
self.argument_subs = []
self.subs = {}
self.init_parse()
def init_parse(self):
for sub_key in self.argument_regex.findall(self.response):
inner_match = self.argument_inner_regex.search(sub_key)
if inner_match:
argument_num = inner_match.group(1)
try:
argument_num = int(argument_num)
except:
continue
found = False
for sub in self.argument_subs:
if sub.argument == argument_num:
# We already matched this argument variable
found = True
break
if found:
continue
self.argument_subs.append(Substitution(None, argument=argument_num))
for sub_key in self.regex.findall(self.response):
if sub_key in self.subs:
# We already matched this variable
continue
inner_match = self.inner_regex.search(sub_key)
if inner_match:
path = inner_match.group(1)
key = inner_match.group(2)
key_type = key[:1]
key_value = key[1:]
if path == 'kvi':
cb = TyggBot.instance.get_kvi_value
elif path == 'tb':
cb = TyggBot.instance.get_value
elif path == 'lasttweet':
cb = TyggBot.instance.get_last_tweet
elif path == 'etm':
cb = TyggBot.instance.get_emote_tm
elif path == 'ecount':
cb = TyggBot.instance.get_emote_count
elif path == 'etmrecord':
cb = TyggBot.instance.get_emote_tm_record
elif path == 'source':
cb = TyggBot.instance.get_source_value
else:
log.error('Unimplemented path: {0}'.format(path))
continue
if key_type == ':':
self.subs[sub_key] = Substitution(cb, key=key_value)
elif key_type == ';':
self.subs[sub_key] = Substitution(cb, argument=int(key_value))
def get_argument_value(message, index):
if not message:
return ''
msg_parts = message.split(' ')
try:
return msg_parts[index]
except:
pass
return ''
def get_response(self, tyggbot, extra):
resp = self.response
for sub in self.argument_subs:
needle = '$({0})'.format(sub.argument)
value = str(MessageAction.get_argument_value(extra['message'], sub.argument - 1))
resp = resp.replace(needle, value)
log.debug('Replacing {0} with {1}'.format(needle, value))
for needle, sub in self.subs.items():
if sub.key:
param = sub.key
elif sub.argument:
param = MessageAction.get_argument_value(extra['message'], sub.argument - 1)
else:
log.error('Unknown param for response.')
continue
value = sub.cb(param, extra)
if value is None:
return None
resp = resp.replace(needle, str(value))
log.debug('Replacing {0} with {1}'.format(needle, str(value)))
return resp
def get_extra_data(self, source, message):
return {
'user': source.username,
'source': source,
'message': message,
}
def run(self, tyggbot, source, message, event={}, args={}):
raise NotImplementedError('Please implement the run method.')
class SayAction(MessageAction):
def run(self, tyggbot, source, message, event={}, args={}):
resp = self.get_response(tyggbot, self.get_extra_data(source, message))
if resp:
tyggbot.say(resp)
class MeAction(MessageAction):
def run(self, tyggbot, source, message, event={}, args={}):
resp = self.get_response(tyggbot, self.get_extra_data(source, message))
if resp:
tyggbot.me(resp)
class WhisperAction(MessageAction):
def run(self, tyggbot, source, message, event={}, args={}):
resp = self.get_response(tyggbot, self.get_extra_data(source, message))
if resp:
tyggbot.whisper(source.username, resp)
|
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.water_heaters_and_thermal_storage import WaterHeaterStratified
log = logging.getLogger(__name__)
class TestWaterHeaterStratified(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_waterheaterstratified(self):
pyidf.validation_level = ValidationLevel.error
obj = WaterHeaterStratified()
# alpha
var_name = "Name"
obj.name = var_name
# alpha
var_enduse_subcategory = "End-Use Subcategory"
obj.enduse_subcategory = var_enduse_subcategory
# real
var_tank_volume = 0.0001
obj.tank_volume = var_tank_volume
# real
var_tank_height = 0.0001
obj.tank_height = var_tank_height
# alpha
var_tank_shape = "VerticalCylinder"
obj.tank_shape = var_tank_shape
# real
var_tank_perimeter = 0.0
obj.tank_perimeter = var_tank_perimeter
# real
var_maximum_temperature_limit = 7.7
obj.maximum_temperature_limit = var_maximum_temperature_limit
# alpha
var_heater_priority_control = "MasterSlave"
obj.heater_priority_control = var_heater_priority_control
# object-list
var_heater_1_setpoint_temperature_schedule_name = "object-list|Heater 1 Setpoint Temperature Schedule Name"
obj.heater_1_setpoint_temperature_schedule_name = var_heater_1_setpoint_temperature_schedule_name
# real
var_heater_1_deadband_temperature_difference = 0.0
obj.heater_1_deadband_temperature_difference = var_heater_1_deadband_temperature_difference
# real
var_heater_1_capacity = 0.0
obj.heater_1_capacity = var_heater_1_capacity
# real
var_heater_1_height = 0.0
obj.heater_1_height = var_heater_1_height
# object-list
var_heater_2_setpoint_temperature_schedule_name = "object-list|Heater 2 Setpoint Temperature Schedule Name"
obj.heater_2_setpoint_temperature_schedule_name = var_heater_2_setpoint_temperature_schedule_name
# real
var_heater_2_deadband_temperature_difference = 0.0
obj.heater_2_deadband_temperature_difference = var_heater_2_deadband_temperature_difference
# real
var_heater_2_capacity = 0.0
obj.heater_2_capacity = var_heater_2_capacity
# real
var_heater_2_height = 0.0
obj.heater_2_height = var_heater_2_height
# alpha
var_heater_fuel_type = "Electricity"
obj.heater_fuel_type = var_heater_fuel_type
# real
var_heater_thermal_efficiency = 0.50005
obj.heater_thermal_efficiency = var_heater_thermal_efficiency
# real
var_off_cycle_parasitic_fuel_consumption_rate = 0.0
obj.off_cycle_parasitic_fuel_consumption_rate = var_off_cycle_parasitic_fuel_consumption_rate
# alpha
var_off_cycle_parasitic_fuel_type = "Electricity"
obj.off_cycle_parasitic_fuel_type = var_off_cycle_parasitic_fuel_type
# real
var_off_cycle_parasitic_heat_fraction_to_tank = 0.5
obj.off_cycle_parasitic_heat_fraction_to_tank = var_off_cycle_parasitic_heat_fraction_to_tank
# real
var_off_cycle_parasitic_height = 0.0
obj.off_cycle_parasitic_height = var_off_cycle_parasitic_height
# real
var_on_cycle_parasitic_fuel_consumption_rate = 0.0
obj.on_cycle_parasitic_fuel_consumption_rate = var_on_cycle_parasitic_fuel_consumption_rate
# alpha
var_on_cycle_parasitic_fuel_type = "Electricity"
obj.on_cycle_parasitic_fuel_type = var_on_cycle_parasitic_fuel_type
# real
var_on_cycle_parasitic_heat_fraction_to_tank = 0.5
obj.on_cycle_parasitic_heat_fraction_to_tank = var_on_cycle_parasitic_heat_fraction_to_tank
# real
var_on_cycle_parasitic_height = 0.0
obj.on_cycle_parasitic_height = var_on_cycle_parasitic_height
# alpha
var_ambient_temperature_indicator = "Schedule"
obj.ambient_temperature_indicator = var_ambient_temperature_indicator
# object-list
var_ambient_temperature_schedule_name = "object-list|Ambient Temperature Schedule Name"
obj.ambient_temperature_schedule_name = var_ambient_temperature_schedule_name
# object-list
var_ambient_temperature_zone_name = "object-list|Ambient Temperature Zone Name"
obj.ambient_temperature_zone_name = var_ambient_temperature_zone_name
# node
var_ambient_temperature_outdoor_air_node_name = "node|Ambient Temperature Outdoor Air Node Name"
obj.ambient_temperature_outdoor_air_node_name = var_ambient_temperature_outdoor_air_node_name
# real
var_uniform_skin_loss_coefficient_per_unit_area_to_ambient_temperature = 0.0
obj.uniform_skin_loss_coefficient_per_unit_area_to_ambient_temperature = var_uniform_skin_loss_coefficient_per_unit_area_to_ambient_temperature
# real
var_skin_loss_fraction_to_zone = 0.5
obj.skin_loss_fraction_to_zone = var_skin_loss_fraction_to_zone
# real
var_off_cycle_flue_loss_coefficient_to_ambient_temperature = 0.0
obj.off_cycle_flue_loss_coefficient_to_ambient_temperature = var_off_cycle_flue_loss_coefficient_to_ambient_temperature
# real
var_off_cycle_flue_loss_fraction_to_zone = 0.5
obj.off_cycle_flue_loss_fraction_to_zone = var_off_cycle_flue_loss_fraction_to_zone
# real
var_peak_use_flow_rate = 0.0
obj.peak_use_flow_rate = var_peak_use_flow_rate
# object-list
var_use_flow_rate_fraction_schedule_name = "object-list|Use Flow Rate Fraction Schedule Name"
obj.use_flow_rate_fraction_schedule_name = var_use_flow_rate_fraction_schedule_name
# object-list
var_cold_water_supply_temperature_schedule_name = "object-list|Cold Water Supply Temperature Schedule Name"
obj.cold_water_supply_temperature_schedule_name = var_cold_water_supply_temperature_schedule_name
# node
var_use_side_inlet_node_name = "node|Use Side Inlet Node Name"
obj.use_side_inlet_node_name = var_use_side_inlet_node_name
# node
var_use_side_outlet_node_name = "node|Use Side Outlet Node Name"
obj.use_side_outlet_node_name = var_use_side_outlet_node_name
# real
var_use_side_effectiveness = 0.5
obj.use_side_effectiveness = var_use_side_effectiveness
# real
var_use_side_inlet_height = 0.0
obj.use_side_inlet_height = var_use_side_inlet_height
# real
var_use_side_outlet_height = 0.0
obj.use_side_outlet_height = var_use_side_outlet_height
# node
var_source_side_inlet_node_name = "node|Source Side Inlet Node Name"
obj.source_side_inlet_node_name = var_source_side_inlet_node_name
# node
var_source_side_outlet_node_name = "node|Source Side Outlet Node Name"
obj.source_side_outlet_node_name = var_source_side_outlet_node_name
# real
var_source_side_effectiveness = 0.5
obj.source_side_effectiveness = var_source_side_effectiveness
# real
var_source_side_inlet_height = 0.0
obj.source_side_inlet_height = var_source_side_inlet_height
# real
var_source_side_outlet_height = 0.0
obj.source_side_outlet_height = var_source_side_outlet_height
# alpha
var_inlet_mode = "Fixed"
obj.inlet_mode = var_inlet_mode
# real
var_use_side_design_flow_rate = 0.0
obj.use_side_design_flow_rate = var_use_side_design_flow_rate
# real
var_source_side_design_flow_rate = 0.0
obj.source_side_design_flow_rate = var_source_side_design_flow_rate
# real
var_indirect_water_heating_recovery_time = 0.0001
obj.indirect_water_heating_recovery_time = var_indirect_water_heating_recovery_time
# integer
var_number_of_nodes = 6
obj.number_of_nodes = var_number_of_nodes
# real
var_additional_destratification_conductivity = 0.0
obj.additional_destratification_conductivity = var_additional_destratification_conductivity
# real
var_node_1_additional_loss_coefficient = 54.54
obj.node_1_additional_loss_coefficient = var_node_1_additional_loss_coefficient
# real
var_node_2_additional_loss_coefficient = 55.55
obj.node_2_additional_loss_coefficient = var_node_2_additional_loss_coefficient
# real
var_node_3_additional_loss_coefficient = 56.56
obj.node_3_additional_loss_coefficient = var_node_3_additional_loss_coefficient
# real
var_node_4_additional_loss_coefficient = 57.57
obj.node_4_additional_loss_coefficient = var_node_4_additional_loss_coefficient
# real
var_node_5_additional_loss_coefficient = 58.58
obj.node_5_additional_loss_coefficient = var_node_5_additional_loss_coefficient
# real
var_node_6_additional_loss_coefficient = 59.59
obj.node_6_additional_loss_coefficient = var_node_6_additional_loss_coefficient
# real
var_node_7_additional_loss_coefficient = 60.6
obj.node_7_additional_loss_coefficient = var_node_7_additional_loss_coefficient
# real
var_node_8_additional_loss_coefficient = 61.61
obj.node_8_additional_loss_coefficient = var_node_8_additional_loss_coefficient
# real
var_node_9_additional_loss_coefficient = 62.62
obj.node_9_additional_loss_coefficient = var_node_9_additional_loss_coefficient
# real
var_node_10_additional_loss_coefficient = 63.63
obj.node_10_additional_loss_coefficient = var_node_10_additional_loss_coefficient
# real
var_node_11_additional_loss_coefficient = 64.64
obj.node_11_additional_loss_coefficient = var_node_11_additional_loss_coefficient
# real
var_node_12_additional_loss_coefficient = 65.65
obj.node_12_additional_loss_coefficient = var_node_12_additional_loss_coefficient
# alpha
var_source_side_flow_control_mode = "StorageTank"
obj.source_side_flow_control_mode = var_source_side_flow_control_mode
# object-list
var_indirect_alternate_setpoint_temperature_schedule_name = "object-list|Indirect Alternate Setpoint Temperature Schedule Name"
obj.indirect_alternate_setpoint_temperature_schedule_name = var_indirect_alternate_setpoint_temperature_schedule_name
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.waterheaterstratifieds[0].name, var_name)
self.assertEqual(idf2.waterheaterstratifieds[0].enduse_subcategory, var_enduse_subcategory)
self.assertAlmostEqual(idf2.waterheaterstratifieds[0].tank_volume, var_tank_volume)
self.assertAlmostEqual(idf2.waterheaterstratifieds[0].tank_height, var_tank_height)
self.assertEqual(idf2.waterheaterstratifieds[0].tank_shape, var_tank_shape)
self.assertAlmostEqual(idf2.waterheaterstratifieds[0].tank_perimeter, var_tank_perimeter)
self.assertAlmostEqual(idf2.waterheaterstratifieds[0].maximum_temperature_limit, var_maximum_temperature_limit)
self.assertEqual(idf2.waterheaterstratifieds[0].heater_priority_control, var_heater_priority_control)
self.assertEqual(idf2.waterheaterstratifieds[0].heater_1_setpoint_temperature_schedule_name, var_heater_1_setpoint_temperature_schedule_name)
self.assertAlmostEqual(idf2.waterheaterstratifieds[0].heater_1_deadband_temperature_difference, var_heater_1_deadband_temperature_difference)
self.assertAlmostEqual(idf2.waterheaterstratifieds[0].heater_1_capacity, var_heater_1_capacity)
self.assertAlmostEqual(idf2.waterheaterstratifieds[0].heater_1_height, var_heater_1_height)
self.assertEqual(idf2.waterheaterstratifieds[0].heater_2_setpoint_temperature_schedule_name, var_heater_2_setpoint_temperature_schedule_name)
self.assertAlmostEqual(idf2.waterheaterstratifieds[0].heater_2_deadband_temperature_difference, var_heater_2_deadband_temperature_difference)
self.assertAlmostEqual(idf2.waterheaterstratifieds[0].heater_2_capacity, var_heater_2_capacity)
self.assertAlmostEqual(idf2.waterheaterstratifieds[0].heater_2_height, var_heater_2_height)
self.assertEqual(idf2.waterheaterstratifieds[0].heater_fuel_type, var_heater_fuel_type)
self.assertAlmostEqual(idf2.waterheaterstratifieds[0].heater_thermal_efficiency, var_heater_thermal_efficiency)
self.assertAlmostEqual(idf2.waterheaterstratifieds[0].off_cycle_parasitic_fuel_consumption_rate, var_off_cycle_parasitic_fuel_consumption_rate)
self.assertEqual(idf2.waterheaterstratifieds[0].off_cycle_parasitic_fuel_type, var_off_cycle_parasitic_fuel_type)
self.assertAlmostEqual(idf2.waterheaterstratifieds[0].off_cycle_parasitic_heat_fraction_to_tank, var_off_cycle_parasitic_heat_fraction_to_tank)
self.assertAlmostEqual(idf2.waterheaterstratifieds[0].off_cycle_parasitic_height, var_off_cycle_parasitic_height)
self.assertAlmostEqual(idf2.waterheaterstratifieds[0].on_cycle_parasitic_fuel_consumption_rate, var_on_cycle_parasitic_fuel_consumption_rate)
self.assertEqual(idf2.waterheaterstratifieds[0].on_cycle_parasitic_fuel_type, var_on_cycle_parasitic_fuel_type)
self.assertAlmostEqual(idf2.waterheaterstratifieds[0].on_cycle_parasitic_heat_fraction_to_tank, var_on_cycle_parasitic_heat_fraction_to_tank)
self.assertAlmostEqual(idf2.waterheaterstratifieds[0].on_cycle_parasitic_height, var_on_cycle_parasitic_height)
self.assertEqual(idf2.waterheaterstratifieds[0].ambient_temperature_indicator, var_ambient_temperature_indicator)
self.assertEqual(idf2.waterheaterstratifieds[0].ambient_temperature_schedule_name, var_ambient_temperature_schedule_name)
self.assertEqual(idf2.waterheaterstratifieds[0].ambient_temperature_zone_name, var_ambient_temperature_zone_name)
self.assertEqual(idf2.waterheaterstratifieds[0].ambient_temperature_outdoor_air_node_name, var_ambient_temperature_outdoor_air_node_name)
self.assertAlmostEqual(idf2.waterheaterstratifieds[0].uniform_skin_loss_coefficient_per_unit_area_to_ambient_temperature, var_uniform_skin_loss_coefficient_per_unit_area_to_ambient_temperature)
self.assertAlmostEqual(idf2.waterheaterstratifieds[0].skin_loss_fraction_to_zone, var_skin_loss_fraction_to_zone)
self.assertAlmostEqual(idf2.waterheaterstratifieds[0].off_cycle_flue_loss_coefficient_to_ambient_temperature, var_off_cycle_flue_loss_coefficient_to_ambient_temperature)
self.assertAlmostEqual(idf2.waterheaterstratifieds[0].off_cycle_flue_loss_fraction_to_zone, var_off_cycle_flue_loss_fraction_to_zone)
self.assertAlmostEqual(idf2.waterheaterstratifieds[0].peak_use_flow_rate, var_peak_use_flow_rate)
self.assertEqual(idf2.waterheaterstratifieds[0].use_flow_rate_fraction_schedule_name, var_use_flow_rate_fraction_schedule_name)
self.assertEqual(idf2.waterheaterstratifieds[0].cold_water_supply_temperature_schedule_name, var_cold_water_supply_temperature_schedule_name)
self.assertEqual(idf2.waterheaterstratifieds[0].use_side_inlet_node_name, var_use_side_inlet_node_name)
self.assertEqual(idf2.waterheaterstratifieds[0].use_side_outlet_node_name, var_use_side_outlet_node_name)
self.assertAlmostEqual(idf2.waterheaterstratifieds[0].use_side_effectiveness, var_use_side_effectiveness)
self.assertAlmostEqual(idf2.waterheaterstratifieds[0].use_side_inlet_height, var_use_side_inlet_height)
self.assertAlmostEqual(idf2.waterheaterstratifieds[0].use_side_outlet_height, var_use_side_outlet_height)
self.assertEqual(idf2.waterheaterstratifieds[0].source_side_inlet_node_name, var_source_side_inlet_node_name)
self.assertEqual(idf2.waterheaterstratifieds[0].source_side_outlet_node_name, var_source_side_outlet_node_name)
self.assertAlmostEqual(idf2.waterheaterstratifieds[0].source_side_effectiveness, var_source_side_effectiveness)
self.assertAlmostEqual(idf2.waterheaterstratifieds[0].source_side_inlet_height, var_source_side_inlet_height)
self.assertAlmostEqual(idf2.waterheaterstratifieds[0].source_side_outlet_height, var_source_side_outlet_height)
self.assertEqual(idf2.waterheaterstratifieds[0].inlet_mode, var_inlet_mode)
self.assertAlmostEqual(idf2.waterheaterstratifieds[0].use_side_design_flow_rate, var_use_side_design_flow_rate)
self.assertAlmostEqual(idf2.waterheaterstratifieds[0].source_side_design_flow_rate, var_source_side_design_flow_rate)
self.assertAlmostEqual(idf2.waterheaterstratifieds[0].indirect_water_heating_recovery_time, var_indirect_water_heating_recovery_time)
self.assertEqual(idf2.waterheaterstratifieds[0].number_of_nodes, var_number_of_nodes)
self.assertAlmostEqual(idf2.waterheaterstratifieds[0].additional_destratification_conductivity, var_additional_destratification_conductivity)
self.assertAlmostEqual(idf2.waterheaterstratifieds[0].node_1_additional_loss_coefficient, var_node_1_additional_loss_coefficient)
self.assertAlmostEqual(idf2.waterheaterstratifieds[0].node_2_additional_loss_coefficient, var_node_2_additional_loss_coefficient)
self.assertAlmostEqual(idf2.waterheaterstratifieds[0].node_3_additional_loss_coefficient, var_node_3_additional_loss_coefficient)
self.assertAlmostEqual(idf2.waterheaterstratifieds[0].node_4_additional_loss_coefficient, var_node_4_additional_loss_coefficient)
self.assertAlmostEqual(idf2.waterheaterstratifieds[0].node_5_additional_loss_coefficient, var_node_5_additional_loss_coefficient)
self.assertAlmostEqual(idf2.waterheaterstratifieds[0].node_6_additional_loss_coefficient, var_node_6_additional_loss_coefficient)
self.assertAlmostEqual(idf2.waterheaterstratifieds[0].node_7_additional_loss_coefficient, var_node_7_additional_loss_coefficient)
self.assertAlmostEqual(idf2.waterheaterstratifieds[0].node_8_additional_loss_coefficient, var_node_8_additional_loss_coefficient)
self.assertAlmostEqual(idf2.waterheaterstratifieds[0].node_9_additional_loss_coefficient, var_node_9_additional_loss_coefficient)
self.assertAlmostEqual(idf2.waterheaterstratifieds[0].node_10_additional_loss_coefficient, var_node_10_additional_loss_coefficient)
self.assertAlmostEqual(idf2.waterheaterstratifieds[0].node_11_additional_loss_coefficient, var_node_11_additional_loss_coefficient)
self.assertAlmostEqual(idf2.waterheaterstratifieds[0].node_12_additional_loss_coefficient, var_node_12_additional_loss_coefficient)
self.assertEqual(idf2.waterheaterstratifieds[0].source_side_flow_control_mode, var_source_side_flow_control_mode)
self.assertEqual(idf2.waterheaterstratifieds[0].indirect_alternate_setpoint_temperature_schedule_name, var_indirect_alternate_setpoint_temperature_schedule_name)
|
|
from django.test import SimpleTestCase, override_settings
from casexml.apps.phone.models import IndexTree, SimplifiedSyncLog
class TestExtendedFootprint(SimpleTestCase):
def test_simple_linear_structure(self):
[grandparent_id, parent_id, child_id] = all_cases = ['grandparent', 'parent', 'child']
tree = IndexTree(indices={
child_id: convert_list_to_dict([parent_id]),
parent_id: convert_list_to_dict([grandparent_id]),
})
cases = IndexTree.get_all_dependencies(grandparent_id, tree, IndexTree())
self.assertEqual(cases, set(all_cases))
def test_multiple_children(self):
[grandparent_id, parent_id, child_id_1, child_id_2] = all_cases = ['rickard', 'ned', 'bran', 'arya']
tree = IndexTree(indices={
child_id_1: convert_list_to_dict([parent_id]),
child_id_2: convert_list_to_dict([parent_id]),
parent_id: convert_list_to_dict([grandparent_id]),
})
cases = IndexTree.get_all_dependencies(grandparent_id, tree, IndexTree())
self.assertEqual(cases, set(all_cases))
def test_simple_extension(self):
[host_id, extension_id] = all_ids = ['host', 'extension']
extension_tree = IndexTree(indices={
extension_id: convert_list_to_dict([host_id]),
})
child_tree = IndexTree()
extension_dependencies = IndexTree.get_all_dependencies(extension_id, child_tree, extension_tree)
self.assertEqual(extension_dependencies, set(all_ids))
def test_extension_long_chain(self):
[host_id, extension_id, extension_id_2, extension_id_3] = all_ids = [
'host', 'extension', 'extension_2', 'extension_3']
extension_tree = IndexTree(indices={
extension_id: convert_list_to_dict([host_id]),
extension_id_2: convert_list_to_dict([extension_id]),
extension_id_3: convert_list_to_dict([extension_id_2]),
})
child_tree = IndexTree()
extension_dependencies = IndexTree.get_all_dependencies(extension_id, child_tree, extension_tree)
self.assertEqual(set(all_ids), extension_dependencies)
host_dependencies = IndexTree.get_all_dependencies(host_id, child_tree, extension_tree)
self.assertEqual(set(all_ids), host_dependencies)
def test_child_and_extension(self):
"""
+---+ +---+
| C +--c--->| H |
+-+-+ +-+-+
^ ^
|e |e
+-+-+ +-+-+
|E2 | |E1 |
+---+ +---+
"""
[host_id, extension_id, child_id, extension_id_2] = all_ids = ['host', 'extension', 'child', 'extension_2']
child_tree = IndexTree(indices={
child_id: convert_list_to_dict([host_id]),
})
extension_tree = IndexTree(indices={
extension_id: convert_list_to_dict([host_id]),
extension_id_2: convert_list_to_dict([child_id]),
})
extension_dependencies = IndexTree.get_all_dependencies(extension_id, child_tree, extension_tree)
self.assertEqual(set(all_ids), extension_dependencies)
host_dependencies = IndexTree.get_all_dependencies(host_id, child_tree, extension_tree)
self.assertEqual(set(all_ids), host_dependencies)
child_dependencies = IndexTree.get_all_dependencies(child_id, child_tree, extension_tree)
self.assertEqual(set([child_id, extension_id_2]), child_dependencies)
def test_multiple_indices(self):
"""
+---+ +---+
| C +--c--->| H |
+---+--e--->+-+-+
^
+---+ |
| E +----e----+
+---+
"""
[host_id, extension_id, child_id] = all_ids = ['host', 'extension', 'child']
child_tree = IndexTree(indices={
child_id: convert_list_to_dict([host_id]),
})
extension_tree = IndexTree(indices={
extension_id: convert_list_to_dict([host_id]),
child_id: convert_list_to_dict([host_id]),
})
child_dependencies = IndexTree.get_all_dependencies(child_id, child_tree, extension_tree)
self.assertEqual(set(all_ids), child_dependencies)
extension_dependencies = IndexTree.get_all_dependencies(extension_id, child_tree, extension_tree)
self.assertEqual(set(all_ids), extension_dependencies)
class PurgingTest(SimpleTestCase):
def test_purge_parent_then_child(self):
[parent_id, child_id] = all_ids = ['parent', 'child']
tree = IndexTree(indices={
child_id: convert_list_to_dict([parent_id]),
})
sync_log = SimplifiedSyncLog(index_tree=tree, case_ids_on_phone=set(all_ids))
# this has no effect
sync_log.purge(parent_id)
self.assertTrue(child_id in sync_log.case_ids_on_phone)
self.assertTrue(parent_id in sync_log.case_ids_on_phone)
self.assertFalse(child_id in sync_log.dependent_case_ids_on_phone)
self.assertTrue(parent_id in sync_log.dependent_case_ids_on_phone)
# this should purge it entirely
sync_log.purge(child_id)
self.assertFalse(child_id in sync_log.case_ids_on_phone)
self.assertFalse(parent_id in sync_log.case_ids_on_phone)
def test_purge_child_then_parent(self):
[parent_id, child_id] = all_ids = ['parent', 'child']
tree = IndexTree(indices={
child_id: convert_list_to_dict([parent_id]),
})
sync_log = SimplifiedSyncLog(index_tree=tree, case_ids_on_phone=set(all_ids))
# this should purge the child but not the parent
sync_log.purge(child_id)
self.assertFalse(child_id in sync_log.case_ids_on_phone)
self.assertTrue(parent_id in sync_log.case_ids_on_phone)
self.assertFalse(child_id in sync_log.dependent_case_ids_on_phone)
self.assertFalse(parent_id in sync_log.dependent_case_ids_on_phone)
# then purging the parent should purge it
sync_log.purge(parent_id)
self.assertFalse(parent_id in sync_log.case_ids_on_phone)
self.assertFalse(parent_id in sync_log.dependent_case_ids_on_phone)
def test_purge_tiered_top_down(self):
[grandparent_id, parent_id, child_id] = all_ids = ['grandparent', 'parent', 'child']
tree = IndexTree(indices={
child_id: convert_list_to_dict([parent_id]),
parent_id: convert_list_to_dict([grandparent_id]),
})
sync_log = SimplifiedSyncLog(index_tree=tree, case_ids_on_phone=set(all_ids))
# this has no effect other than to move the grandparent to dependent
sync_log.purge(grandparent_id)
for id in all_ids:
self.assertTrue(id in sync_log.case_ids_on_phone)
self.assertTrue(grandparent_id in sync_log.dependent_case_ids_on_phone)
self.assertFalse(parent_id in sync_log.dependent_case_ids_on_phone)
self.assertFalse(child_id in sync_log.dependent_case_ids_on_phone)
# likewise, this should have no effect other than to move the parent to dependent
sync_log.purge(parent_id)
for id in all_ids:
self.assertTrue(id in sync_log.case_ids_on_phone)
self.assertTrue(grandparent_id in sync_log.dependent_case_ids_on_phone)
self.assertTrue(parent_id in sync_log.dependent_case_ids_on_phone)
self.assertFalse(child_id in sync_log.dependent_case_ids_on_phone)
# this should now purge everything
sync_log.purge(child_id)
for id in all_ids:
self.assertFalse(id in sync_log.case_ids_on_phone)
self.assertFalse(id in sync_log.dependent_case_ids_on_phone)
def test_purge_tiered_bottom_up(self):
[grandparent_id, parent_id, child_id] = all_ids = ['grandparent', 'parent', 'child']
tree = IndexTree(indices={
child_id: convert_list_to_dict([parent_id]),
parent_id: convert_list_to_dict([grandparent_id]),
})
sync_log = SimplifiedSyncLog(index_tree=tree, case_ids_on_phone=set(all_ids))
# just purging the child should purge just the child
sync_log.purge(child_id)
self.assertTrue(grandparent_id in sync_log.case_ids_on_phone)
self.assertTrue(parent_id in sync_log.case_ids_on_phone)
self.assertFalse(child_id in sync_log.case_ids_on_phone)
# same for the parent
sync_log.purge(parent_id)
self.assertTrue(grandparent_id in sync_log.case_ids_on_phone)
self.assertFalse(parent_id in sync_log.case_ids_on_phone)
# same for the grandparentparent
sync_log.purge(grandparent_id)
self.assertFalse(grandparent_id in sync_log.case_ids_on_phone)
def test_purge_multiple_children(self):
[grandparent_id, parent_id, child_id_1, child_id_2] = all_ids = ['rickard', 'ned', 'bran', 'arya']
tree = IndexTree(indices={
child_id_1: convert_list_to_dict([parent_id]),
child_id_2: convert_list_to_dict([parent_id]),
parent_id: convert_list_to_dict([grandparent_id]),
})
sync_log = SimplifiedSyncLog(index_tree=tree, case_ids_on_phone=set(all_ids))
# first purge the parent and grandparent
sync_log.purge(grandparent_id)
sync_log.purge(parent_id)
self.assertTrue(grandparent_id in sync_log.case_ids_on_phone)
self.assertTrue(grandparent_id in sync_log.dependent_case_ids_on_phone)
self.assertTrue(parent_id in sync_log.case_ids_on_phone)
self.assertTrue(parent_id in sync_log.dependent_case_ids_on_phone)
# just purging one child should preserve the parent index
sync_log.purge(child_id_1)
self.assertTrue(grandparent_id in sync_log.case_ids_on_phone)
self.assertTrue(grandparent_id in sync_log.dependent_case_ids_on_phone)
self.assertTrue(parent_id in sync_log.case_ids_on_phone)
self.assertTrue(parent_id in sync_log.dependent_case_ids_on_phone)
self.assertFalse(child_id_1 in sync_log.case_ids_on_phone)
# purging the other one should wipe it
sync_log.purge(child_id_2)
for id in all_ids:
self.assertFalse(id in sync_log.case_ids_on_phone)
self.assertFalse(id in sync_log.dependent_case_ids_on_phone)
@override_settings(DEBUG=True)
def test_purge_partial_children(self):
[parent_id, child_id_1, child_id_2] = all_ids = ['parent', 'child1', 'child2']
tree = IndexTree(indices={
child_id_1: convert_list_to_dict([parent_id]),
child_id_2: convert_list_to_dict([parent_id]),
})
sync_log = SimplifiedSyncLog(
index_tree=tree,
case_ids_on_phone=set(all_ids),
dependent_case_ids_on_phone=set([parent_id, child_id_2])
)
# this used to fail with an AssertionError
sync_log.purge(parent_id)
def test_purge_multiple_parents(self):
[grandparent_id, mother_id, father_id, child_id] = all_ids = ['heart-tree', 'catelyn', 'ned', 'arya']
tree = IndexTree(indices={
child_id: convert_list_to_dict([mother_id, father_id]),
mother_id: convert_list_to_dict([grandparent_id]),
father_id: convert_list_to_dict([grandparent_id]),
})
sync_log = SimplifiedSyncLog(index_tree=tree, case_ids_on_phone=set(all_ids))
# first purge everything but the child
sync_log.purge(grandparent_id)
sync_log.purge(mother_id)
sync_log.purge(father_id)
# everything should still be relevant because of the child
for id in all_ids:
self.assertTrue(id in sync_log.case_ids_on_phone)
# purging the child should wipe everything else
sync_log.purge(child_id)
for id in all_ids:
self.assertFalse(id in sync_log.case_ids_on_phone)
self.assertFalse(id in sync_log.dependent_case_ids_on_phone)
def test_purge_circular_loops(self):
[peer_id_1, peer_id_2] = all_ids = ['jaime', 'cersei']
tree = IndexTree(indices={
peer_id_1: convert_list_to_dict([peer_id_2]),
peer_id_2: convert_list_to_dict([peer_id_1]),
})
sync_log = SimplifiedSyncLog(index_tree=tree, case_ids_on_phone=set(all_ids))
# purging one peer should keep everything around
sync_log.purge(peer_id_1)
for id in all_ids:
self.assertTrue(id in sync_log.case_ids_on_phone)
# purging the second peer should remove everything
sync_log.purge(peer_id_2)
for id in all_ids:
self.assertFalse(id in sync_log.case_ids_on_phone)
def test_purge_very_circular_loops(self):
[peer_id_1, peer_id_2, peer_id_3] = all_ids = ['drogon', 'rhaegal', 'viserion']
tree = IndexTree(indices={
peer_id_1: convert_list_to_dict([peer_id_2]),
peer_id_2: convert_list_to_dict([peer_id_3]),
peer_id_3: convert_list_to_dict([peer_id_1]),
})
sync_log = SimplifiedSyncLog(index_tree=tree, case_ids_on_phone=set(all_ids))
# purging the first two, should still keep everything around
sync_log.purge(peer_id_1)
sync_log.purge(peer_id_2)
for id in all_ids:
self.assertTrue(id in sync_log.case_ids_on_phone)
sync_log.purge(peer_id_3)
for id in all_ids:
self.assertFalse(id in sync_log.case_ids_on_phone)
def test_purge_self_indexing(self):
[id] = ['recursive']
tree = IndexTree(indices={
id: convert_list_to_dict([id]),
})
sync_log = SimplifiedSyncLog(index_tree=tree, case_ids_on_phone=set([id]))
sync_log.purge(id)
self.assertFalse(id in sync_log.case_ids_on_phone)
self.assertFalse(id in sync_log.dependent_case_ids_on_phone)
class ExtensionCasesPurgingTest(SimpleTestCase):
def test_purge_host(self, ):
"""Purging host removes the extension
"""
[host_id, extension_id] = all_ids = ['host', 'extension']
extension_tree = IndexTree(indices={
extension_id: convert_list_to_dict([host_id]),
})
sync_log = SimplifiedSyncLog(extension_index_tree=extension_tree,
dependent_case_ids_on_phone=set([extension_id]),
case_ids_on_phone=set(all_ids))
sync_log.purge(host_id)
self.assertFalse(extension_id in sync_log.case_ids_on_phone)
self.assertFalse(host_id in sync_log.case_ids_on_phone)
def test_purge_extension(self, ):
"""Purging extension removes host
"""
[host_id, extension_id] = all_ids = ['host', 'extension']
extension_tree = IndexTree(indices={
extension_id: convert_list_to_dict([host_id]),
})
sync_log = SimplifiedSyncLog(extension_index_tree=extension_tree,
dependent_case_ids_on_phone=set([host_id]),
case_ids_on_phone=set(all_ids))
sync_log.purge(extension_id)
self.assertFalse(extension_id in sync_log.case_ids_on_phone)
self.assertFalse(host_id in sync_log.case_ids_on_phone)
def test_purge_host_extension_has_extension(self):
"""Purging host when extension has an extension removes both
"""
[host_id, extension_id, extension_extension_id] = all_ids = ['host', 'extension', 'extension_extension']
extension_tree = IndexTree(indices={
extension_id: convert_list_to_dict([host_id]),
extension_extension_id: convert_list_to_dict([extension_id]),
})
sync_log = SimplifiedSyncLog(extension_index_tree=extension_tree,
dependent_case_ids_on_phone=set([extension_id, extension_extension_id]),
case_ids_on_phone=set(all_ids))
sync_log.purge(host_id)
self.assertFalse(extension_id in sync_log.case_ids_on_phone)
self.assertFalse(extension_extension_id in sync_log.case_ids_on_phone)
self.assertFalse(host_id in sync_log.case_ids_on_phone)
def test_purge_host_has_multiple_extensions(self):
"""Purging host with multiple extensions should remove all extensions
"""
[host_id, extension_id, extension_id_2] = all_ids = ['host', 'extension', 'extension_2']
extension_tree = IndexTree(indices={
extension_id: convert_list_to_dict([host_id]),
extension_id_2: convert_list_to_dict([host_id]),
})
sync_log = SimplifiedSyncLog(extension_index_tree=extension_tree,
dependent_case_ids_on_phone=set([extension_id, extension_id_2]),
case_ids_on_phone=set(all_ids))
sync_log.purge(host_id)
self.assertFalse(extension_id in sync_log.case_ids_on_phone)
self.assertFalse(extension_id_2 in sync_log.case_ids_on_phone)
self.assertFalse(host_id in sync_log.case_ids_on_phone)
def test_purge_extension_host_has_multiple_extensions(self):
"""Purging an extension should remove host and its other extensions
"""
[host_id, extension_id, extension_id_2] = all_ids = ['host', 'extension', 'extension_2']
extension_tree = IndexTree(indices={
extension_id: convert_list_to_dict([host_id]),
extension_id_2: convert_list_to_dict([host_id]),
})
sync_log = SimplifiedSyncLog(extension_index_tree=extension_tree,
dependent_case_ids_on_phone=set([host_id, extension_id_2]),
case_ids_on_phone=set(all_ids))
sync_log.purge(extension_id)
self.assertFalse(extension_id in sync_log.case_ids_on_phone)
self.assertFalse(extension_id_2 in sync_log.case_ids_on_phone)
self.assertFalse(host_id in sync_log.case_ids_on_phone)
def test_purge_extension_non_dependent_host(self):
"""Purging an extension should not remove the host or itself if the host is directly owned
"""
[host_id, extension_id] = all_ids = ['host', 'extension']
extension_tree = IndexTree(indices={
extension_id: convert_list_to_dict([host_id]),
})
sync_log = SimplifiedSyncLog(extension_index_tree=extension_tree,
case_ids_on_phone=set(all_ids))
sync_log.purge(extension_id)
self.assertTrue(extension_id in sync_log.case_ids_on_phone)
self.assertTrue(host_id in sync_log.case_ids_on_phone)
def test_purge_child_of_extension(self):
"""Purging child of extension should remove extension and host
"""
[host_id, extension_id, child_id] = all_ids = ['host', 'extension', 'child']
child_tree = IndexTree(indices={
child_id: convert_list_to_dict([extension_id]),
})
extension_tree = IndexTree(indices={
extension_id: convert_list_to_dict([host_id]),
})
sync_log = SimplifiedSyncLog(extension_index_tree=extension_tree,
index_tree=child_tree,
dependent_case_ids_on_phone=set([host_id, extension_id]),
case_ids_on_phone=set(all_ids))
sync_log.purge(child_id)
self.assertFalse(extension_id in sync_log.case_ids_on_phone)
self.assertFalse(child_id in sync_log.case_ids_on_phone)
self.assertFalse(host_id in sync_log.case_ids_on_phone)
def test_purge_extension_host_is_parent(self):
"""Purging an extension should not purge the host or the extension if the host is a depenency for a child
"""
[host_id, extension_id, child_id] = all_ids = ['host', 'extension', 'child']
child_tree = IndexTree(indices={
child_id: convert_list_to_dict([host_id]),
})
extension_tree = IndexTree(indices={
extension_id: convert_list_to_dict([host_id]),
})
sync_log = SimplifiedSyncLog(extension_index_tree=extension_tree,
index_tree=child_tree,
dependent_case_ids_on_phone=set([host_id]),
case_ids_on_phone=set(all_ids))
sync_log.purge(extension_id)
self.assertTrue(extension_id in sync_log.case_ids_on_phone)
self.assertTrue(child_id in sync_log.case_ids_on_phone)
self.assertTrue(host_id in sync_log.case_ids_on_phone)
def test_open_extension_of_extension(self):
all_ids = ['host', 'extension', 'extension_of_extension']
host_id, extension_id, extension_of_extension_id = all_ids
extension_tree = IndexTree(indices={
extension_id: convert_list_to_dict([host_id]),
extension_of_extension_id: convert_list_to_dict([extension_id]),
})
sync_log = SimplifiedSyncLog(extension_index_tree=extension_tree,
dependent_case_ids_on_phone=set([host_id, extension_id]),
closed_cases=set([host_id, extension_id]),
case_ids_on_phone=set(all_ids))
sync_log.purge(host_id)
self.assertFalse(host_id in sync_log.case_ids_on_phone)
self.assertFalse(extension_id in sync_log.case_ids_on_phone)
self.assertFalse(extension_of_extension_id in sync_log.case_ids_on_phone)
def test_open_child_of_extension(self):
[host_id, extension_id, child_of_extension_id] = all_ids = ['host', 'extension', 'child_of_extension']
extension_tree = IndexTree(indices={
extension_id: convert_list_to_dict([host_id]),
})
child_tree = IndexTree(indices={
child_of_extension_id: convert_list_to_dict([extension_id]),
})
sync_log = SimplifiedSyncLog(extension_index_tree=extension_tree,
index_tree=child_tree,
dependent_case_ids_on_phone=set([host_id, extension_id]),
closed_cases=set([host_id, extension_id]),
case_ids_on_phone=set(all_ids))
for case_id in [host_id, extension_id]:
sync_log.purge(case_id)
self.assertTrue(host_id in sync_log.case_ids_on_phone)
self.assertTrue(extension_id in sync_log.case_ids_on_phone)
self.assertTrue(child_of_extension_id in sync_log.case_ids_on_phone)
def convert_list_to_dict(a_list):
return {str(i): item for i, item in enumerate(a_list)}
|
|
from __future__ import division
import optparse
from mpi4py import MPI
import numpy as np
from serial import find_solution, write_results
from settings import SEARCH_SPACE
def refine_work(work, p, new_min, verbose=False, split=False, split_min=4):
"""
Filter remaining work based on newly found solution and split up
remaining work if necessary to give processes that finish more
quickly work from other processes.
"""
# remove the minimum value numbers that have been skipped over
work[p] = work[p][work[p] > new_min]
# if option selected, divvy up work to processes that have run out
if split and not work[p].size:
# select the biggest queue remaining and split its work
big_p, big_arr = sorted(work.iteritems(),
reverse=True,
key=lambda x: x[1].size)[0]
# only take the other process's work if there are more than the min
# number of values (this prevents each empty "stealing" the last
# element in turn)
if big_arr.size >= split_min:
# print out status if desired
if verbose:
print('taking from process {} ({} left) to give '
'to process {}'.format(big_p, big_arr.size, p))
# split it between the two processes
work[big_p], work[p] = np.array_split(big_arr, 2)
return work
def calculate_progress(work):
"""
Calculate a % progress based on how much of the search space has been
covered.
"""
numerator = SEARCH_SPACE - queue_left(work)
return numerator / SEARCH_SPACE
def queue_left(work):
"""
Figure out how much work is left to in the queue.
"""
return sum([arr.size for arr in work.values()])
def master(comm, min, max, verbose=True, split=False, split_min=4):
"""
Assign work as appropriate to slave processes. When they run out of
work in their range, take work from another process. When nobody has
any work left to do, collect up all the leftovers and write out.
"""
size = comm.Get_size()
status = MPI.Status()
start = MPI.Wtime()
# placeholder for our output data
data_list = []
solved_so_far = 0
# get all possible minimum values and split them up among the non-root processes
process_min_ranges = np.array_split(np.arange(min, max), size - 1)
work = {p + 1: work for p, work in enumerate(process_min_ranges)}
# keep track of which slave processes are still alive
processes_working = range(1, size)
# setup: seed all processes with some work to start with
for p in processes_working:
comm.send(work[p].min(), dest=p)
# collect work as it comes in and assign new minima as processes finish
while True:
# get the result of the slave process
row = comm.recv(None, source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
p = status.Get_source()
linear_value = status.Get_tag()
# update counters
data_list.append(row)
solved_so_far += 1
# update work queues based on p's found linear value
work = refine_work(work, p, linear_value,
verbose=verbose,
split=split,
split_min=split_min)
# if there's no work left for process p, remove it from the working pool
# (otherwise we'll mistakenly try to collect work from it at the end)
if not work[p].size:
processes_working.remove(p)
print('master killing process {}, ({} left)'.
format(p, len(processes_working)))
comm.send(-1, dest=p)
# if nobody has any work left, break out
if not queue_left(work):
print('no work left; master breaking out of while loop, '
'{} processes still working'.format(len(processes_working)))
break
# ... otherwise, if there is work left for p, send it
else:
# pop the lowest value from process p's work queue and send it out
new_min = work[p].min()
comm.send(new_min, dest=p)
work[p] = work[p][work[p] > new_min]
if verbose or solved_so_far % 10 == 0:
print('{:0.2f}s: solved {} so far ({:0.2%} of search space)'.
format(MPI.Wtime() - start, solved_so_far, calculate_progress(work)))
# get the last few
for _ in processes_working:
row = comm.recv(None, source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
data_list.append(row)
solved_so_far += 1
# kill the other processes
for p in processes_working:
comm.send(-1, dest=p)
return data_list
def slave(comm):
"""
Take work from master process until given the signal to shut down.
"""
status = MPI.Status()
rank = comm.Get_rank()
print('slave {} working'.format(rank))
while True:
# get work from the master
min_value = comm.recv(source=0, tag=MPI.ANY_TAG, status=status)
# die if sent a negative value
if min_value < 0:
break
# do the actual computation for row i of the image
linear_value, row = find_solution(min_value)
comm.send(row, dest=0, tag=linear_value)
if __name__ == '__main__':
# parse command line options
parser = optparse.OptionParser()
parser.add_option("-v", dest="verbose", action="store_true",
default=False,
help="verbose (print every solution instead of every 10)")
parser.add_option("-s", dest="split", action="store_true",
default=False,
help="split work queues",)
parser.add_option("--split-min", dest="split_min", type="int",
default=2,
help="threshold for splitting work queues")
parser.add_option("--min", dest="min", type="float",
default=9927.0,
help="minimum objective value")
parser.add_option("--max", dest="max", type="float",
default=11534.0,
help="maximum objective value")
options, args = parser.parse_args()
# get MPI data
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
# if master, take change of work, otherwise prepare to do work
if rank == 0:
print 'master: started with size', size
start_time = MPI.Wtime()
results = master(comm, options.min, options.max,
verbose=options.verbose,
split=options.split,
split_min=options.split_min)
end_time = MPI.Wtime()
total_time = end_time - start_time
write_results(results, total_time, processors=size)
print 'total time', total_time
else:
slave(comm)
|
|
from decimal import Decimal as D
from decimal import ROUND_UP
from django.utils import six
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext
from oscar.apps.offer import utils
from oscar.core.loading import get_model
from oscar.templatetags.currency_filters import currency
Condition = get_model('offer', 'Condition')
__all__ = [
'CountCondition', 'CoverageCondition', 'ValueCondition'
]
class CountCondition(Condition):
"""
An offer condition dependent on the NUMBER of matching items from the
basket.
"""
_description = _("Basket includes %(count)d item(s) from %(range)s")
@property
def name(self):
return self._description % {
'count': self.value,
'range': six.text_type(self.range).lower()}
@property
def description(self):
return self._description % {
'count': self.value,
'range': utils.range_anchor(self.range)}
class Meta:
app_label = 'offer'
proxy = True
verbose_name = _("Count condition")
verbose_name_plural = _("Count conditions")
def is_satisfied(self, offer, basket):
"""
Determines whether a given basket meets this condition
"""
num_matches = 0
for line in basket.all_lines():
if (self.can_apply_condition(line)
and line.quantity_without_discount > 0):
num_matches += line.quantity_without_discount
if num_matches >= self.value:
return True
return False
def _get_num_matches(self, basket):
if hasattr(self, '_num_matches'):
return getattr(self, '_num_matches')
num_matches = 0
for line in basket.all_lines():
if (self.can_apply_condition(line)
and line.quantity_without_discount > 0):
num_matches += line.quantity_without_discount
self._num_matches = num_matches
return num_matches
def is_partially_satisfied(self, offer, basket):
num_matches = self._get_num_matches(basket)
return 0 < num_matches < self.value
def get_upsell_message(self, offer, basket):
num_matches = self._get_num_matches(basket)
delta = self.value - num_matches
return ungettext('Buy %(delta)d more product from %(range)s',
'Buy %(delta)d more products from %(range)s', delta) \
% {'delta': delta, 'range': self.range}
def consume_items(self, offer, basket, affected_lines):
"""
Marks items within the basket lines as consumed so they
can't be reused in other offers.
:basket: The basket
:affected_lines: The lines that have been affected by the discount.
This should be list of tuples (line, discount, qty)
"""
# We need to count how many items have already been consumed as part of
# applying the benefit, so we don't consume too many items.
num_consumed = 0
for line, __, quantity in affected_lines:
num_consumed += quantity
to_consume = max(0, self.value - num_consumed)
if to_consume == 0:
return
for __, line in self.get_applicable_lines(offer, basket,
most_expensive_first=True):
quantity_to_consume = min(line.quantity_without_discount,
to_consume)
line.consume(quantity_to_consume)
to_consume -= quantity_to_consume
if to_consume == 0:
break
class CoverageCondition(Condition):
"""
An offer condition dependent on the number of DISTINCT matching items from
the basket.
"""
_description = _("Basket includes %(count)d distinct item(s) from"
" %(range)s")
@property
def name(self):
return self._description % {
'count': self.value,
'range': six.text_type(self.range).lower()}
@property
def description(self):
return self._description % {
'count': self.value,
'range': utils.range_anchor(self.range)}
class Meta:
app_label = 'offer'
proxy = True
verbose_name = _("Coverage Condition")
verbose_name_plural = _("Coverage Conditions")
def is_satisfied(self, offer, basket):
"""
Determines whether a given basket meets this condition
"""
covered_ids = []
for line in basket.all_lines():
if not line.is_available_for_discount:
continue
product = line.product
if (self.can_apply_condition(line) and product.id not in
covered_ids):
covered_ids.append(product.id)
if len(covered_ids) >= self.value:
return True
return False
def _get_num_covered_products(self, basket):
covered_ids = []
for line in basket.all_lines():
if not line.is_available_for_discount:
continue
product = line.product
if (self.can_apply_condition(line) and product.id not in
covered_ids):
covered_ids.append(product.id)
return len(covered_ids)
def get_upsell_message(self, offer, basket):
delta = self.value - self._get_num_covered_products(basket)
return ungettext('Buy %(delta)d more product from %(range)s',
'Buy %(delta)d more products from %(range)s', delta) \
% {'delta': delta, 'range': self.range}
def is_partially_satisfied(self, offer, basket):
return 0 < self._get_num_covered_products(basket) < self.value
def consume_items(self, offer, basket, affected_lines):
"""
Marks items within the basket lines as consumed so they
can't be reused in other offers.
"""
# Determine products that have already been consumed by applying the
# benefit
consumed_products = []
for line, __, quantity in affected_lines:
consumed_products.append(line.product)
to_consume = max(0, self.value - len(consumed_products))
if to_consume == 0:
return
for line in basket.all_lines():
product = line.product
if not self.can_apply_condition(line):
continue
if product in consumed_products:
continue
if not line.is_available_for_discount:
continue
# Only consume a quantity of 1 from each line
line.consume(1)
consumed_products.append(product)
to_consume -= 1
if to_consume == 0:
break
def get_value_of_satisfying_items(self, offer, basket):
covered_ids = []
value = D('0.00')
for line in basket.all_lines():
if (self.can_apply_condition(line) and line.product.id not in
covered_ids):
covered_ids.append(line.product.id)
value += utils.unit_price(offer, line)
if len(covered_ids) >= self.value:
return value
return value
class ValueCondition(Condition):
"""
An offer condition dependent on the VALUE of matching items from the
basket.
"""
_description = _("Basket includes %(amount)s from %(range)s")
@property
def name(self):
return self._description % {
'amount': currency(self.value),
'range': six.text_type(self.range).lower()}
@property
def description(self):
return self._description % {
'amount': currency(self.value),
'range': utils.range_anchor(self.range)}
class Meta:
app_label = 'offer'
proxy = True
verbose_name = _("Value condition")
verbose_name_plural = _("Value conditions")
def is_satisfied(self, offer, basket):
"""
Determine whether a given basket meets this condition
"""
value_of_matches = D('0.00')
for line in basket.all_lines():
if (self.can_apply_condition(line) and
line.quantity_without_discount > 0):
price = utils.unit_price(offer, line)
value_of_matches += price * int(line.quantity_without_discount)
if value_of_matches >= self.value:
return True
return False
def _get_value_of_matches(self, offer, basket):
if hasattr(self, '_value_of_matches'):
return getattr(self, '_value_of_matches')
value_of_matches = D('0.00')
for line in basket.all_lines():
if (self.can_apply_condition(line) and
line.quantity_without_discount > 0):
price = utils.unit_price(offer, line)
value_of_matches += price * int(line.quantity_without_discount)
self._value_of_matches = value_of_matches
return value_of_matches
def is_partially_satisfied(self, offer, basket):
value_of_matches = self._get_value_of_matches(offer, basket)
return D('0.00') < value_of_matches < self.value
def get_upsell_message(self, offer, basket):
value_of_matches = self._get_value_of_matches(offer, basket)
return _('Spend %(value)s more from %(range)s') % {
'value': currency(self.value - value_of_matches),
'range': self.range}
def consume_items(self, offer, basket, affected_lines):
"""
Marks items within the basket lines as consumed so they
can't be reused in other offers.
We allow lines to be passed in as sometimes we want them sorted
in a specific order.
"""
# Determine value of items already consumed as part of discount
value_consumed = D('0.00')
for line, __, qty in affected_lines:
price = utils.unit_price(offer, line)
value_consumed += price * qty
to_consume = max(0, self.value - value_consumed)
if to_consume == 0:
return
for price, line in self.get_applicable_lines(
offer, basket, most_expensive_first=True):
quantity_to_consume = min(
line.quantity_without_discount,
(to_consume / price).quantize(D(1), ROUND_UP))
line.consume(quantity_to_consume)
to_consume -= price * quantity_to_consume
if to_consume <= 0:
break
|
|
# Copyright 2015-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Resource Filtering Logic
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import datetime
from datetime import timedelta
import fnmatch
import logging
import operator
import re
from dateutil.tz import tzutc
from dateutil.parser import parse
import jmespath
import ipaddress
import six
from c7n.executor import ThreadPoolExecutor
from c7n.registry import PluginRegistry
from c7n.resolver import ValuesFrom
from c7n.utils import set_annotation, type_schema, parse_cidr
class FilterValidationError(Exception):
pass
# Matching filters annotate their key onto objects
ANNOTATION_KEY = "c7n:MatchedFilters"
def glob_match(value, pattern):
if not isinstance(value, six.string_types):
return False
return fnmatch.fnmatch(value, pattern)
def regex_match(value, regex):
if not isinstance(value, six.string_types):
return False
# Note python 2.5+ internally cache regex
# would be nice to use re2
return bool(re.match(regex, value, flags=re.IGNORECASE))
def operator_in(x, y):
return x in y
def operator_ni(x, y):
return x not in y
def intersect(x, y):
return bool(set(x).intersection(y))
OPERATORS = {
'eq': operator.eq,
'equal': operator.eq,
'ne': operator.ne,
'not-equal': operator.ne,
'gt': operator.gt,
'greater-than': operator.gt,
'ge': operator.ge,
'gte': operator.ge,
'le': operator.le,
'lte': operator.le,
'lt': operator.lt,
'less-than': operator.lt,
'glob': glob_match,
'regex': regex_match,
'in': operator_in,
'ni': operator_ni,
'not-in': operator_ni,
'contains': operator.contains,
'intersect': intersect}
class FilterRegistry(PluginRegistry):
def __init__(self, *args, **kw):
super(FilterRegistry, self).__init__(*args, **kw)
self.register('value', ValueFilter)
self.register('or', Or)
self.register('and', And)
self.register('not', Not)
self.register('event', EventFilter)
def parse(self, data, manager):
results = []
for d in data:
results.append(self.factory(d, manager))
return results
def factory(self, data, manager=None):
"""Factory func for filters.
data - policy config for filters
manager - resource type manager (ec2, s3, etc)
"""
# Make the syntax a little nicer for common cases.
if isinstance(data, dict) and len(data) == 1 and 'type' not in data:
op = list(data.keys())[0]
if op == 'or':
return Or(data, self, manager)
elif op == 'and':
return And(data, self, manager)
elif op == 'not':
return Not(data, self, manager)
return ValueFilter(data, manager).validate()
if isinstance(data, six.string_types):
filter_type = data
data = {'type': data}
else:
filter_type = data.get('type')
if not filter_type:
raise FilterValidationError(
"%s Invalid Filter %s" % (
self.plugin_type, data))
filter_class = self.get(filter_type)
if filter_class is not None:
return filter_class(data, manager)
else:
raise FilterValidationError(
"%s Invalid filter type %s" % (
self.plugin_type, data))
# Really should be an abstract base class (abc) or
# zope.interface
class Filter(object):
executor_factory = ThreadPoolExecutor
log = logging.getLogger('custodian.filters')
metrics = ()
permissions = ()
schema = {'type': 'object'}
def __init__(self, data, manager=None):
self.data = data
self.manager = manager
def get_permissions(self):
return self.permissions
def validate(self):
"""validate filter config, return validation error or self"""
return self
def process(self, resources, event=None):
""" Bulk process resources and return filtered set."""
return list(filter(self, resources))
class Or(Filter):
def __init__(self, data, registry, manager):
super(Or, self).__init__(data)
self.registry = registry
self.filters = registry.parse(list(self.data.values())[0], manager)
self.manager = manager
def process(self, resources, event=None):
if self.manager:
return self.process_set(resources, event)
return super(Or, self).process(resources, event)
def __call__(self, r):
"""Fallback for older unit tests that don't utilize a query manager"""
for f in self.filters:
if f(r):
return True
return False
def process_set(self, resources, event):
resource_type = self.manager.get_model()
resource_map = {r[resource_type.id]: r for r in resources}
results = set()
for f in self.filters:
results = results.union([
r[resource_type.id] for r in f.process(resources, event)])
return [resource_map[r_id] for r_id in results]
class And(Filter):
def __init__(self, data, registry, manager):
super(And, self).__init__(data)
self.registry = registry
self.filters = registry.parse(list(self.data.values())[0], manager)
def process(self, resources, events=None):
for f in self.filters:
resources = f.process(resources, events)
return resources
class Not(Filter):
def __init__(self, data, registry, manager):
super(Not, self).__init__(data)
self.registry = registry
self.filters = registry.parse(list(self.data.values())[0], manager)
self.manager = manager
def process(self, resources, event=None):
if self.manager:
return self.process_set(resources, event)
return super(Not, self).process(resources, event)
def __call__(self, r):
"""Fallback for older unit tests that don't utilize a query manager"""
# There is an implicit 'and' for self.filters
# ~(A ^ B ^ ... ^ Z) = ~A v ~B v ... v ~Z
for f in self.filters:
if not f(r):
return True
return False
def process_set(self, resources, event):
resource_type = self.manager.get_model()
resource_map = {r[resource_type.id]: r for r in resources}
for f in self.filters:
resources = f.process(resources, event)
before = set(resource_map.keys())
after = set([r[resource_type.id] for r in resources])
results = before - after
return [resource_map[r_id] for r_id in results]
class ValueFilter(Filter):
"""Generic value filter using jmespath
"""
expr = None
op = v = vtype = None
schema = {
'type': 'object',
# Doesn't mix well with inherits that extend
'additionalProperties': False,
'required': ['type'],
'properties': {
# Doesn't mix well as enum with inherits that extend
'type': {'enum': ['value']},
'key': {'type': 'string'},
'value_type': {'enum': [
'age', 'integer', 'expiration', 'normalize', 'size',
'cidr', 'cidr_size', 'swap', 'resource_count', 'expr']},
'default': {'type': 'object'},
'value_from': ValuesFrom.schema,
'value': {'oneOf': [
{'type': 'array'},
{'type': 'string'},
{'type': 'boolean'},
{'type': 'number'},
{'type': 'null'}]},
'op': {'enum': list(OPERATORS.keys())}}}
annotate = True
def __init__(self, data, manager=None):
super(ValueFilter, self).__init__(data, manager)
self.expr = {}
def _validate_resource_count(self):
""" Specific validation for `resource_count` type
The `resource_count` type works a little differently because it operates
on the entire set of resources. It:
- does not require `key`
- `value` must be a number
- supports a subset of the OPERATORS list
"""
for field in ('op', 'value'):
if field not in self.data:
raise FilterValidationError(
"Missing '%s' in value filter %s" % (field, self.data))
if not (isinstance(self.data['value'], int) or
isinstance(self.data['value'], list)):
raise FilterValidationError(
"`value` must be an integer in resource_count filter %s" % self.data)
# I don't see how to support regex for this?
if self.data['op'] not in OPERATORS or self.data['op'] == 'regex':
raise FilterValidationError(
"Invalid operator in value filter %s" % self.data)
return self
def validate(self):
if len(self.data) == 1:
return self
# `resource_count` requires a slightly different schema than the rest of
# the value filters because it operates on the full resource list
if self.data.get('value_type') == 'resource_count':
return self._validate_resource_count()
if 'key' not in self.data:
raise FilterValidationError(
"Missing 'key' in value filter %s" % self.data)
if 'value' not in self.data and 'value_from' not in self.data:
raise FilterValidationError(
"Missing 'value' in value filter %s" % self.data)
if 'op' in self.data:
if not self.data['op'] in OPERATORS:
raise FilterValidationError(
"Invalid operator in value filter %s" % self.data)
if self.data['op'] == 'regex':
# Sanity check that we can compile
try:
re.compile(self.data['value'])
except re.error as e:
raise FilterValidationError(
"Invalid regex: %s %s" % (e, self.data))
return self
def __call__(self, i):
if self.data.get('value_type') == 'resource_count':
return self.process(i)
matched = self.match(i)
if matched and self.annotate:
set_annotation(i, ANNOTATION_KEY, self.k)
return matched
def process(self, resources, event=None):
# For the resource_count filter we operate on the full set of resources.
if self.data.get('value_type') == 'resource_count':
op = OPERATORS[self.data.get('op')]
if op(len(resources), self.data.get('value')):
return resources
return []
return super(ValueFilter, self).process(resources, event)
def get_resource_value(self, k, i):
if k.startswith('tag:'):
tk = k.split(':', 1)[1]
r = None
for t in i.get("Tags", []):
if t.get('Key') == tk:
r = t.get('Value')
break
elif k in i:
r = i.get(k)
elif k not in self.expr:
self.expr[k] = jmespath.compile(k)
r = self.expr[k].search(i)
else:
r = self.expr[k].search(i)
return r
def match(self, i):
if self.v is None and len(self.data) == 1:
[(self.k, self.v)] = self.data.items()
elif self.v is None:
self.k = self.data.get('key')
self.op = self.data.get('op')
if 'value_from' in self.data:
values = ValuesFrom(self.data['value_from'], self.manager)
self.v = values.get_values()
else:
self.v = self.data.get('value')
self.vtype = self.data.get('value_type')
if i is None:
return False
# value extract
r = self.get_resource_value(self.k, i)
if self.op in ('in', 'not-in') and r is None:
r = ()
# value type conversion
if self.vtype is not None:
v, r = self.process_value_type(self.v, r, i)
else:
v = self.v
# Value match
if r is None and v == 'absent':
return True
elif r is not None and v == 'present':
return True
elif v == 'not-null' and r:
return True
elif v == 'empty' and not r:
return True
elif self.op:
op = OPERATORS[self.op]
try:
return op(r, v)
except TypeError:
return False
elif r == self.v:
return True
return False
def process_value_type(self, sentinel, value, resource):
if self.vtype == 'normalize' and isinstance(value, six.string_types):
return sentinel, value.strip().lower()
elif self.vtype == 'expr':
return sentinel, self.get_resource_value(value, resource)
elif self.vtype == 'integer':
try:
value = int(value.strip())
except ValueError:
value = 0
elif self.vtype == 'size':
try:
return sentinel, len(value)
except TypeError:
return sentinel, 0
elif self.vtype == 'swap':
return value, sentinel
elif self.vtype == 'age':
if not isinstance(sentinel, datetime.datetime):
sentinel = datetime.datetime.now(tz=tzutc()) - timedelta(sentinel)
if not isinstance(value, datetime.datetime):
# EMR bug when testing ages in EMR. This is due to
# EMR not having more functionality.
try:
value = parse(value, default=datetime.datetime.now(tz=tzutc()))
except (AttributeError, TypeError, ValueError):
value = 0
# Reverse the age comparison, we want to compare the value being
# greater than the sentinel typically. Else the syntax for age
# comparisons is intuitively wrong.
return value, sentinel
elif self.vtype == 'cidr':
s = parse_cidr(sentinel)
v = parse_cidr(value)
if (isinstance(s, ipaddress._BaseAddress) and isinstance(v, ipaddress._BaseNetwork)):
return v, s
return s, v
elif self.vtype == 'cidr_size':
cidr = parse_cidr(value)
if cidr:
return sentinel, cidr.prefixlen
return sentinel, 0
# Allows for expiration filtering, for events in the future as opposed
# to events in the past which age filtering allows for.
elif self.vtype == 'expiration':
if not isinstance(sentinel, datetime.datetime):
sentinel = datetime.datetime.now(tz=tzutc()) + timedelta(sentinel)
if not isinstance(value, datetime.datetime):
try:
value = parse(value, default=datetime.datetime.now(tz=tzutc()))
except (AttributeError, TypeError, ValueError):
value = 0
return sentinel, value
return sentinel, value
class AgeFilter(Filter):
"""Automatically filter resources older than a given date.
"""
threshold_date = None
# The name of attribute to compare to threshold; must override in subclass
date_attribute = None
schema = None
def validate(self):
if not self.date_attribute:
raise NotImplementedError(
"date_attribute must be overriden in subclass")
return self
def get_resource_date(self, i):
v = i[self.date_attribute]
if not isinstance(v, datetime.datetime):
v = parse(v)
if not v.tzinfo:
v = v.replace(tzinfo=tzutc())
return v
def __call__(self, i):
v = self.get_resource_date(i)
if v is None:
return False
op = OPERATORS[self.data.get('op', 'greater-than')]
if not self.threshold_date:
days = self.data.get('days', 0)
hours = self.data.get('hours', 0)
minutes = self.data.get('minutes', 0)
# Work around placebo issues with tz
if v.tzinfo:
n = datetime.datetime.now(tz=tzutc())
else:
n = datetime.datetime.now()
self.threshold_date = n - timedelta(days=days, hours=hours, minutes=minutes)
return op(self.threshold_date, v)
class EventFilter(ValueFilter):
"""Filter against a cloudwatch event associated to a resource type."""
schema = type_schema('event', rinherit=ValueFilter.schema)
def validate(self):
if 'mode' not in self.manager.data:
raise FilterValidationError(
"Event filters can only be used with lambda policies")
return self
def process(self, resources, event=None):
if event is None:
return resources
if self(event):
return resources
return []
|
|
"""
NEW FEATURES:
1. Score ouptut is now dumped to validation_results.json
2. Custom instructor log lines can now be dumped to validation_logfile.txt
This file presents an example of a python custom validator for use in your Submitty assignment.
In this assignment, the student has been asked to randomly generate
n numbers, output them, and then output their sum.
To test that the output is truly random, we run the student program multiple
times. For each run, we make sure that:
1. The student produced n numbers.
2. They are correctly summed.
3. Between each pair of runs we make sure that the generated numbers aren't
identical (that they are random)
To read this file, begin at the bottom with do_the_grading, then progress to
grade_a_single_file. If you are interested, you may also examine the return_result
functions and the get_actual_files helper function or you may just copy them.
If you are interested in parsing command line arguments, examine the parse_args function.
"""
import os
import sys
import argparse
import json
import traceback
"""
This is the agreed upon name for the input data to the custom validator.
This is identical to the validation blob for this validator, plus the
value 'testcase_prefix', which denotes the testcase that is to be processed.
"""
GLOBAL_INPUT_JSON_PATH = 'custom_validator_input.json'
def parse_args():
"""
A simple argument parsing function.
This function is not necessary, but can be used as a template to help process command line arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--numbers", required=True, help="The number of numbers we expect", type=int)
return parser.parse_args()
"""
Helper functions for returning a score to the student.
"""
def log_line(line):
"""
Write a line to an instructor log file.
"""
mode = 'a' if os.path.exists('validation_logfile.txt') else 'w'
with open('validation_logfile.txt', mode) as outfile:
outfile.write(line+"\n")
def return_result(score, message, status):
"""
Return a non-error result to the student.
"""
# Create response to student.
# the message. Status can be 'information', 'failure', 'warning', or 'success'.
result = {
# Status success means the grader did not crash (no validator failures).
'status' : "success",
'data': {
# Score is on a range from zero (no credit) to 1 (full credit)
'score' : score,
# A message to the student
'message' : message,
# The status of the submission (indicates if the
# student succeeded at the testcase).
# Values can be 'information', 'failure', 'warning'
# or 'success'.
'status':status
}
}
# Dump the results to validation_results.json as expected by Submitty.
with open('validation_results.json', 'w') as outfile:
json.dump(result, outfile, indent=4)
# End the program, because we have returned a result.
sys.exit(0)
def return_error(error_message):
"""
This function should be used to return an error if the validator crashes.
If this function is called, the student will receive a score of zero.
"""
result = {
# Status fail means that the validator failed to process the
# student submission.
'status':"fail",
# A message to be output to help the instructor/student to debug.
'message':error_message
}
# Print the result json to stdout so that it can be read by submitty.
with open('validation_results.json', 'w') as outfile:
json.dump(result, outfile, indent=4)
# End the program, because we have returned a result.
sys.exit(0)
def get_actual_files():
"""
A helper function written to load in actual files.
To find actual files, we look for all of the files listed in the
'actual_file' section of this validator.
"""
try:
# Open the custom_validator_input.json that we specified in our config.
with open(GLOBAL_INPUT_JSON_PATH) as json_file:
testcase = json.load(json_file)
# Grab the folder housing the files.
prefix = testcase['testcase_prefix']
except Exception as e:
return_error('Could not open custom_validator_input.json')
# There can be either one actual file (a string) or a list of actual files.
# If there is only one actual file (a string)
if isinstance(testcase['actual_file'], str):
# The actual file is the prefix (test##) + the filename
# (e.g. test01/my_file.txt)
actual_file = [os.path.join(prefix, testcase['actual_file']),]
# Add the actual file to the actual file list.
actual_files = list(actual_file)
else:
# If there are many actual files (a list of them), iterate over them and
# append them all to the actual file list.
actual_files = list()
for file in testcase['actual_file']:
# The actual file is the prefix (test##) + the filename
# (e.g. test01/my_file.txt)
actual_files.append(os.path.join(prefix, file))
# Return a list of all the actual files.
return actual_files
def grade_a_single_file(file, number_of_numbers):
"""
For a file and a number of numbers, see if they sum correctly.
"""
data = list()
try:
with open(file) as f:
# Read in all of the lines of the file (there is one number on each line)
numbers = f.readlines()
# Remove newlines/spaces from all lines of the file.
numbers = [x.strip() for x in numbers]
# The last line of the file is of the form "total = #" so we split with space as our delimiter.
numbers[-1] = numbers[-1].split()
# Make sure that the last line had 'total' in it.
if not 'total' in numbers[-1]:
return_result(score=0, message="ERROR: total is not included", status='failure')
# The last line was of the form "total = #". We split earlier, and now we
# remove everything but the number.
numbers[-1] = numbers[-1][-1]
# Convert all of the numbers we read in from string to int.
numbers = [int(x) for x in numbers]
# Make sure that the 0 to n-1th numbers sum to the nth number.
if sum(numbers[:-1]) != numbers[-1]:
# If they do not, return zero credit with an error message.
return_result(score=0, message="ERROR: The numbers do not sum correctly", status='failure')
elif len(numbers[:-1]) != number_of_numbers:
# If they do sum correctly, make sure that we have the desired number of numbers.
return_result(score=0, message="ERROR: Incorrect number of numbers ({0} instead of {1})".format(len(numbers[:-1]), number_of_numbers), status='failure')
except Exception as e:
return_result(score=0, message="ERROR: Could not open output file.",status='failure')
# If no exception occurred and the numbers sum, return them so that we can do one last processing step.
return numbers
def do_the_grading():
"""
Process a number of runs of the student program to make sure that
1) All runs resulted in a correct output.
2) All runs were different (and therefore were likely random).
"""
try:
# Parse command line arguments. In this assignment, this is how we learn
# how many numbers the student was supposed to sum together.
args = parse_args()
except Exception as e:
# If we can't parse the command line arguments, we must have done something
# wrong, so we'll return a failure message.
return_error(message='ERROR: Incorrect arguments to custom validator')
number_of_numbers = args.numbers
# Grab all of the files we are supposed to check.
actual_files = get_actual_files()
# This variable will hold the numbers summed in the previous file. That way,
# we will be able to check that they are different in the next run.
prev_data = None
# For every student file
for file in actual_files:
log_line("Processing " + file)
# Make sure that the output in the file sums correctly
data = grade_a_single_file(file, number_of_numbers)
# If we are on the first file, save the this output so that we can check that the next
# run is different (random).
if prev_data == None:
prev_data = data
else:
# If two runs of the student program yield the same random output, then the program
# is probably not actually random, so return partial credit
if data == prev_data:
return_result(score=0.6, message="ERROR: Program is not random.", status='failure')
# If we make it all the way to the end, the student had the correct output and it was random,
# so return full credit.
return_result(score=1.0, message="Success: numbers summed correctly.", status='success')
if __name__ == '__main__':
"""
If this script is invoked directly, call the "do_the_grading" function (above).
"""
log_line("In this new version of the grader, we can now write to a logfile! This is helpful for debugging!")
do_the_grading()
|
|
r"""
The potential fields of a homogeneous sphere.
"""
from __future__ import division, absolute_import
import numpy as np
from ..constants import SI2MGAL, G, CM, T2NT, SI2EOTVOS
from .. import utils
from .._our_duecredit import due, Doi
due.cite(Doi("10.1017/CBO9780511549816"),
description='Forward modeling formula for spheres.',
path='fatiando.gravmag.sphere')
# These are the second derivatives of the V = 1/r function that is used by the
# magnetic field component, total-field magnetic anomaly, gravity gradients,
# and the kernel functions.
def _v_xx(x, y, z, r_sqr, r_5):
return (3*x**2 - r_sqr)/r_5
def _v_xy(x, y, z, r_sqr, r_5):
return 3*x*y/r_5
def _v_xz(x, y, z, r_sqr, r_5):
return 3*x*z/r_5
def _v_yy(x, y, z, r_sqr, r_5):
return (3*y**2 - r_sqr)/r_5
def _v_yz(x, y, z, r_sqr, r_5):
return 3*y*z/r_5
def _v_zz(x, y, z, r_sqr, r_5):
return (3*z**2 - r_sqr)/r_5
def tf(xp, yp, zp, spheres, inc, dec, pmag=None):
r"""
The total-field magnetic anomaly.
The anomaly is defined as (Blakely, 1995):
.. math::
\Delta T = |\mathbf{T}| - |\mathbf{F}|,
where :math:`\mathbf{T}` is the measured field and :math:`\mathbf{F}` is a
reference (regional) field.
The anomaly of a homogeneous sphere can be calculated as:
.. math::
\Delta T \approx \hat{\mathbf{F}}\cdot\mathbf{B}.
where :math:`\mathbf{B}` is the magnetic induction produced by the sphere.
The coordinate system of the input parameters is x -> North, y -> East and
z -> Down.
Input units should be SI. Output is in nT.
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the anomaly will be calculated
* spheres : list of :class:`fatiando.mesher.Sphere`
The spheres. Spheres must have the physical property
``'magnetization'``. Spheres that are ``None`` or without
``'magnetization'`` will be ignored. The magnetization is the total
(remanent + induced + any demagnetization effects) magnetization given
as a 3-component vector.
* inc, dec : floats
The inclination and declination of the regional field (in degrees)
* pmag : [mx, my, mz] or None
A magnetization vector. If not None, will use this value instead of the
``'magnetization'`` property of the spheres. Use this, e.g., for
sensitivity matrix building.
Returns:
* tf : array
The total-field anomaly
References:
Blakely, R. J. (1995), Potential Theory in Gravity and Magnetic
Applications, Cambridge University Press.
"""
fx, fy, fz = utils.dircos(inc, dec)
if pmag is not None:
pmx, pmy, pmz = pmag
res = 0
for sphere in spheres:
if sphere is None:
continue
if 'magnetization' not in sphere.props and pmag is None:
continue
if pmag is None:
mx, my, mz = sphere.props['magnetization']
else:
mx, my, mz = pmx, pmy, pmz
x = sphere.x - xp
y = sphere.y - yp
z = sphere.z - zp
r_sqr = x**2 + y**2 + z**2
# This is faster than r5 = r_sqrt**2.5
r = np.sqrt(r_sqr)
r_5 = r*r*r*r*r
volume = 4*np.pi*(sphere.radius**3)/3
# Calculating v_xx, etc to calculate B is ~2x slower than this
dotprod = mx*x + my*y + mz*z
bx = (3*dotprod*x - r_sqr*mx)/r_5
by = (3*dotprod*y - r_sqr*my)/r_5
bz = (3*dotprod*z - r_sqr*mz)/r_5
res += volume*(fx*bx + fy*by + fz*bz)
res *= CM*T2NT
return res
def bx(xp, yp, zp, spheres, pmag=None):
"""
The x component of the magnetic induction.
The coordinate system of the input parameters is x -> North, y -> East and
z -> Down.
Input units should be SI. Output is in nT.
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the anomaly will be calculated
* spheres : list of :class:`fatiando.mesher.Sphere`
The spheres. Spheres must have the physical property
``'magnetization'``. Spheres that are ``None`` or without
``'magnetization'`` will be ignored. The magnetization is the total
(remanent + induced + any demagnetization effects) magnetization given
as a 3-component vector.
* pmag : [mx, my, mz] or None
A magnetization vector. If not None, will use this value instead of the
``'magnetization'`` property of the spheres. Use this, e.g., for
sensitivity matrix building.
Returns:
* bx: array
The x component of the magnetic induction
References:
Blakely, R. J. (1995), Potential Theory in Gravity and Magnetic
Applications, Cambridge University Press.
"""
if pmag is not None:
pmx, pmy, pmz = pmag
res = 0
for sphere in spheres:
if sphere is None:
continue
if 'magnetization' not in sphere.props and pmag is None:
continue
if pmag is None:
mx, my, mz = sphere.props['magnetization']
else:
mx, my, mz = pmx, pmy, pmz
x = sphere.x - xp
y = sphere.y - yp
z = sphere.z - zp
r_sqr = x**2 + y**2 + z**2
# This is faster than r5 = r_sqrt**2.5
r = np.sqrt(r_sqr)
r_5 = r*r*r*r*r
volume = 4*np.pi*(sphere.radius**3)/3
# Calculating v_xx, etc to calculate B is ~1.3x slower than this
dotprod = mx*x + my*y + mz*z
res += volume*(3*dotprod*x - r_sqr*mx)/r_5
res *= CM * T2NT
return res
def by(xp, yp, zp, spheres, pmag=None):
"""
The y component of the magnetic induction.
The coordinate system of the input parameters is x -> North, y -> East and
z -> Down.
Input units should be SI. Output is in nT.
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the anomaly will be calculated
* spheres : list of :class:`fatiando.mesher.Sphere`
The spheres. Spheres must have the physical property
``'magnetization'``. Spheres that are ``None`` or without
``'magnetization'`` will be ignored. The magnetization is the total
(remanent + induced + any demagnetization effects) magnetization given
as a 3-component vector.
* pmag : [mx, my, mz] or None
A magnetization vector. If not None, will use this value instead of the
``'magnetization'`` property of the spheres. Use this, e.g., for
sensitivity matrix building.
Returns:
* by: array
The y component of the magnetic induction
References:
Blakely, R. J. (1995), Potential Theory in Gravity and Magnetic
Applications, Cambridge University Press.
"""
if pmag is not None:
pmx, pmy, pmz = pmag
res = 0
for sphere in spheres:
if sphere is None:
continue
if 'magnetization' not in sphere.props and pmag is None:
continue
if pmag is None:
mx, my, mz = sphere.props['magnetization']
else:
mx, my, mz = pmx, pmy, pmz
x = sphere.x - xp
y = sphere.y - yp
z = sphere.z - zp
r_sqr = x**2 + y**2 + z**2
# This is faster than r5 = r_sqrt**2.5
r = np.sqrt(r_sqr)
r_5 = r*r*r*r*r
volume = 4*np.pi*(sphere.radius**3)/3
# Calculating v_xx, etc to calculate B is ~1.3x slower than this
dotprod = mx*x + my*y + mz*z
res += volume*(3*dotprod*y - r_sqr*my)/r_5
res *= CM * T2NT
return res
def bz(xp, yp, zp, spheres, pmag=None):
"""
The z component of the magnetic induction.
The coordinate system of the input parameters is x -> North, y -> East and
z -> Down.
Input units should be SI. Output is in nT.
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the anomaly will be calculated
* spheres : list of :class:`fatiando.mesher.Sphere`
The spheres. Spheres must have the physical property
``'magnetization'``. Spheres that are ``None`` or without
``'magnetization'`` will be ignored. The magnetization is the total
(remanent + induced + any demagnetization effects) magnetization given
as a 3-component vector.
* pmag : [mx, my, mz] or None
A magnetization vector. If not None, will use this value instead of the
``'magnetization'`` property of the spheres. Use this, e.g., for
sensitivity matrix building.
Returns:
* bz : array
The z component of the magnetic induction
References:
Blakely, R. J. (1995), Potential Theory in Gravity and Magnetic
Applications, Cambridge University Press.
"""
if pmag is not None:
pmx, pmy, pmz = pmag
res = 0
for sphere in spheres:
if sphere is None:
continue
if 'magnetization' not in sphere.props and pmag is None:
continue
if pmag is None:
mx, my, mz = sphere.props['magnetization']
else:
mx, my, mz = pmx, pmy, pmz
x = sphere.x - xp
y = sphere.y - yp
z = sphere.z - zp
r_sqr = x**2 + y**2 + z**2
# This is faster than r5 = r_sqrt**2.5
r = np.sqrt(r_sqr)
r_5 = r*r*r*r*r
volume = 4*np.pi*(sphere.radius**3)/3
# Calculating v_xx, etc to calculate B is ~1.3x slower than this
dotprod = mx*x + my*y + mz*z
res += volume*(3*dotprod*z - r_sqr*mz)/r_5
res *= CM * T2NT
return res
def gz(xp, yp, zp, spheres, dens=None):
r"""
The :math:`g_z` gravitational acceleration component.
.. math::
g_z(x, y, z) = \rho 4 \pi \dfrac{radius^3}{3} \dfrac{z - z'}{r^3}
in which :math:`\rho` is the density and
:math:`r = \sqrt{(x - x')^2 + (y - y')^2 + (z - z')^2}`.
The coordinate system of the input parameters is x -> North, y -> East and
z -> Down.
All input values should be in SI and output is in mGal.
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the field will be calculated
* spheres : list of :class:`fatiando.mesher.Sphere`
The spheres. Spheres must have the property ``'density'``. The ones
that are ``None`` or without a density will be ignored.
* dens : float or None
If not None, will use this value instead of the ``'density'`` property
of the spheres. Use this, e.g., for sensitivity matrix building.
Returns:
* res : array
The field calculated on xp, yp, zp
References:
Blakely, R. J. (1995), Potential Theory in Gravity and Magnetic
Applications, Cambridge University Press.
"""
res = 0
for sphere in spheres:
if sphere is None:
continue
if 'density' not in sphere.props and dens is None:
continue
if dens is None:
density = sphere.props['density']
else:
density = dens
x = sphere.x - xp
y = sphere.y - yp
z = sphere.z - zp
r = np.sqrt(x**2 + y**2 + z**2)
# This is faster than r3 = r_sqrt**1.5
r_cb = r*r*r
mass = density*4*np.pi*(sphere.radius**3)/3
res += mass*z/r_cb
res *= G*SI2MGAL
return res
def gxx(xp, yp, zp, spheres, dens=None):
r"""
The :math:`g_{xx}` gravity gradient component.
.. math::
g_{xx}(x, y, z) = \rho 4 \pi \dfrac{radius^3}{3}
\dfrac{3 (x - x')^2 - r^2}{r^5}
in which :math:`\rho` is the density and
:math:`r = \sqrt{(x - x')^2 + (y - y')^2 + (z - z')^2}`.
The coordinate system of the input parameters is x -> North, y -> East and
z -> Down.
All input values should be in SI and output is in Eotvos.
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the field will be calculated
* spheres : list of :class:`fatiando.mesher.Sphere`
The spheres. Spheres must have the property ``'density'``. The ones
that are ``None`` or without a density will be ignored.
* dens : float or None
If not None, will use this value instead of the ``'density'`` property
of the spheres. Use this, e.g., for sensitivity matrix building.
Returns:
* res : array
The field calculated on xp, yp, zp
References:
Blakely, R. J. (1995), Potential Theory in Gravity and Magnetic
Applications, Cambridge University Press.
"""
res = 0
for sphere in spheres:
if sphere is None:
continue
if 'density' not in sphere.props and dens is None:
continue
if dens is None:
density = sphere.props['density']
else:
density = dens
x = sphere.x - xp
y = sphere.y - yp
z = sphere.z - zp
r_sqr = x**2 + y**2 + z**2
# This is faster than r5 = r_sqrt**2.5
r = np.sqrt(r_sqr)
r_5 = r*r*r*r*r
volume = 4*np.pi*(sphere.radius**3)/3
res += density*volume*_v_xx(x, y, z, r_sqr, r_5)
res *= G*SI2EOTVOS
return res
def gxy(xp, yp, zp, spheres, dens=None):
r"""
The :math:`g_{xy}` gravity gradient component.
.. math::
g_{xy}(x, y, z) = \rho 4 \pi \dfrac{radius^3}{3}
\dfrac{3(x - x')(y - y')}{r^5}
in which :math:`\rho` is the density and
:math:`r = \sqrt{(x - x')^2 + (y - y')^2 + (z - z')^2}`.
The coordinate system of the input parameters is x -> North, y -> East and
z -> Down.
All input values should be in SI and output is in Eotvos.
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the field will be calculated
* spheres : list of :class:`fatiando.mesher.Sphere`
The spheres. Spheres must have the property ``'density'``. The ones
that are ``None`` or without a density will be ignored.
* dens : float or None
If not None, will use this value instead of the ``'density'`` property
of the spheres. Use this, e.g., for sensitivity matrix building.
Returns:
* res : array
The field calculated on xp, yp, zp
References:
Blakely, R. J. (1995), Potential Theory in Gravity and Magnetic
Applications, Cambridge University Press.
"""
res = 0
for sphere in spheres:
if sphere is None:
continue
if 'density' not in sphere.props and dens is None:
continue
if dens is None:
density = sphere.props['density']
else:
density = dens
x = sphere.x - xp
y = sphere.y - yp
z = sphere.z - zp
r_sqr = x**2 + y**2 + z**2
# This is faster than r5 = r_sqrt**2.5
r = np.sqrt(r_sqr)
r_5 = r*r*r*r*r
volume = 4*np.pi*(sphere.radius**3)/3
res += density*volume*_v_xy(x, y, z, r_sqr, r_5)
res *= G*SI2EOTVOS
return res
def gxz(xp, yp, zp, spheres, dens=None):
r"""
The :math:`g_{xz}` gravity gradient component.
.. math::
g_{xz}(x, y, z) = \rho 4 \pi \dfrac{radius^3}{3}
\dfrac{3(x - x')(z - z')}{r^5}
in which :math:`\rho` is the density and
:math:`r = \sqrt{(x - x')^2 + (y - y')^2 + (z - z')^2}`.
The coordinate system of the input parameters is x -> North, y -> East and
z -> Down.
All input values should be in SI and output is in Eotvos.
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the field will be calculated
* spheres : list of :class:`fatiando.mesher.Sphere`
The spheres. Spheres must have the property ``'density'``. The ones
that are ``None`` or without a density will be ignored.
* dens : float or None
If not None, will use this value instead of the ``'density'`` property
of the spheres. Use this, e.g., for sensitivity matrix building.
Returns:
* res : array
The field calculated on xp, yp, zp
References:
Blakely, R. J. (1995), Potential Theory in Gravity and Magnetic
Applications, Cambridge University Press.
"""
res = 0
for sphere in spheres:
if sphere is None:
continue
if 'density' not in sphere.props and dens is None:
continue
if dens is None:
density = sphere.props['density']
else:
density = dens
x = sphere.x - xp
y = sphere.y - yp
z = sphere.z - zp
r_sqr = x**2 + y**2 + z**2
# This is faster than r5 = r_sqrt**2.5
r = np.sqrt(r_sqr)
r_5 = r*r*r*r*r
volume = 4*np.pi*(sphere.radius**3)/3
res += density*volume*_v_xz(x, y, z, r_sqr, r_5)
res *= G*SI2EOTVOS
return res
def gyy(xp, yp, zp, spheres, dens=None):
r"""
The :math:`g_{yy}` gravity gradient component.
.. math::
g_{yy}(x, y, z) = \rho 4 \pi \dfrac{radius^3}{3}
\dfrac{3(y - y')^2 - r^2}{r^5}
in which :math:`\rho` is the density and
:math:`r = \sqrt{(x - x')^2 + (y - y')^2 + (z - z')^2}`.
The coordinate system of the input parameters is x -> North, y -> East and
z -> Down.
All input values should be in SI and output is in Eotvos.
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the field will be calculated
* spheres : list of :class:`fatiando.mesher.Sphere`
The spheres. Spheres must have the property ``'density'``. The ones
that are ``None`` or without a density will be ignored.
* dens : float or None
If not None, will use this value instead of the ``'density'`` property
of the spheres. Use this, e.g., for sensitivity matrix building.
Returns:
* res : array
The field calculated on xp, yp, zp
References:
Blakely, R. J. (1995), Potential Theory in Gravity and Magnetic
Applications, Cambridge University Press.
"""
res = 0
for sphere in spheres:
if sphere is None:
continue
if 'density' not in sphere.props and dens is None:
continue
if dens is None:
density = sphere.props['density']
else:
density = dens
x = sphere.x - xp
y = sphere.y - yp
z = sphere.z - zp
r_sqr = x**2 + y**2 + z**2
# This is faster than r5 = r_sqrt**2.5
r = np.sqrt(r_sqr)
r_5 = r*r*r*r*r
volume = 4*np.pi*(sphere.radius**3)/3
res += density*volume*_v_yy(x, y, z, r_sqr, r_5)
res *= G*SI2EOTVOS
return res
def gyz(xp, yp, zp, spheres, dens=None):
r"""
The :math:`g_{yz}` gravity gradient component.
.. math::
g_{yz}(x, y, z) = \rho 4 \pi \dfrac{radius^3}{3}
\dfrac{3(y - y')(z - z')}{r^5}
in which :math:`\rho` is the density and
:math:`r = \sqrt{(x - x')^2 + (y - y')^2 + (z - z')^2}`.
The coordinate system of the input parameters is x -> North, y -> East and
z -> Down.
All input values should be in SI and output is in Eotvos.
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the field will be calculated
* spheres : list of :class:`fatiando.mesher.Sphere`
The spheres. Spheres must have the property ``'density'``. The ones
that are ``None`` or without a density will be ignored.
* dens : float or None
If not None, will use this value instead of the ``'density'`` property
of the spheres. Use this, e.g., for sensitivity matrix building.
Returns:
* res : array
The field calculated on xp, yp, zp
References:
Blakely, R. J. (1995), Potential Theory in Gravity and Magnetic
Applications, Cambridge University Press.
"""
res = 0
for sphere in spheres:
if sphere is None:
continue
if 'density' not in sphere.props and dens is None:
continue
if dens is None:
density = sphere.props['density']
else:
density = dens
x = sphere.x - xp
y = sphere.y - yp
z = sphere.z - zp
r_sqr = x**2 + y**2 + z**2
# This is faster than r5 = r_sqrt**2.5
r = np.sqrt(r_sqr)
r_5 = r*r*r*r*r
volume = 4*np.pi*(sphere.radius**3)/3
res += density*volume*_v_yz(x, y, z, r_sqr, r_5)
res *= G*SI2EOTVOS
return res
def gzz(xp, yp, zp, spheres, dens=None):
r"""
The :math:`g_{zz}` gravity gradient component.
.. math::
g_{zz}(x, y, z) = \rho 4 \pi \dfrac{radius^3}{3}
\dfrac{3(z - z')^2 - r^2}{r^5}
in which :math:`\rho` is the density and
:math:`r = \sqrt{(x - x')^2 + (y - y')^2 + (z - z')^2}`.
The coordinate system of the input parameters is x -> North, y -> East and
z -> Down.
All input values should be in SI and output is in Eotvos.
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the field will be calculated
* spheres : list of :class:`fatiando.mesher.Sphere`
The spheres. Spheres must have the property ``'density'``. The ones
that are ``None`` or without a density will be ignored.
* dens : float or None
If not None, will use this value instead of the ``'density'`` property
of the spheres. Use this, e.g., for sensitivity matrix building.
Returns:
* res : array
The field calculated on xp, yp, zp
References:
Blakely, R. J. (1995), Potential Theory in Gravity and Magnetic
Applications, Cambridge University Press.
"""
res = 0
for sphere in spheres:
if sphere is None:
continue
if 'density' not in sphere.props and dens is None:
continue
if dens is None:
density = sphere.props['density']
else:
density = dens
x = sphere.x - xp
y = sphere.y - yp
z = sphere.z - zp
r_sqr = x**2 + y**2 + z**2
# This is faster than r5 = r_sqrt**2.5
r = np.sqrt(r_sqr)
r_5 = r*r*r*r*r
volume = 4*np.pi*(sphere.radius**3)/3
res += density*volume*_v_zz(x, y, z, r_sqr, r_5)
res *= G*SI2EOTVOS
return res
def kernelxx(xp, yp, zp, sphere):
r"""
The second x derivative of the kernel function
.. math::
\phi(x,y,z) = \frac{4}{3} \pi radius^3 \frac{1}{r}
where :math:`r = \sqrt{(x - x')^2 + (y - y')^2 + (z - z')^2}`.
The coordinate system of the input parameters is x -> North, y -> East and
z -> Down.
All input values should be in SI and output is in SI.
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the function will be
calculated
* sphere : :class:`fatiando.mesher.Sphere`
The sphere.
Returns:
* res : array
The function calculated on xp, yp, zp
"""
x = sphere.x - xp
y = sphere.y - yp
z = sphere.z - zp
r_sqr = x**2 + y**2 + z**2
# This is faster than r5 = r_sqrt**2.5
r = np.sqrt(r_sqr)
r_5 = r*r*r*r*r
volume = 4*np.pi*(sphere.radius**3)/3
res = volume*_v_xx(x, y, z, r_sqr, r_5)
return res
def kernelxy(xp, yp, zp, sphere):
r"""
The xy derivative of the kernel function
.. math::
\phi(x,y,z) = \frac{4}{3} \pi radius^3 \frac{1}{r}
where :math:`r = \sqrt{(x - x')^2 + (y - y')^2 + (z - z')^2}`.
The coordinate system of the input parameters is x -> North, y -> East and
z -> Down.
All input values should be in SI and output is in SI.
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the function will be
calculated
* sphere : :class:`fatiando.mesher.Sphere`
The sphere.
Returns:
* res : array
The function calculated on xp, yp, zp
"""
x = sphere.x - xp
y = sphere.y - yp
z = sphere.z - zp
r_sqr = x**2 + y**2 + z**2
# This is faster than r5 = r_sqrt**2.5
r = np.sqrt(r_sqr)
r_5 = r*r*r*r*r
volume = 4*np.pi*(sphere.radius**3)/3
res = volume*_v_xy(x, y, z, r_sqr, r_5)
return res
def kernelxz(xp, yp, zp, sphere):
r"""
The xz derivative of the kernel function
.. math::
\phi(x,y,z) = \frac{4}{3} \pi radius^3 \frac{1}{r}
where :math:`r = \sqrt{(x - x')^2 + (y - y')^2 + (z - z')^2}`.
The coordinate system of the input parameters is x -> North, y -> East and
z -> Down.
All input values should be in SI and output is in SI.
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the function will be
calculated
* sphere : :class:`fatiando.mesher.Sphere`
The sphere.
Returns:
* res : array
The function calculated on xp, yp, zp
"""
x = sphere.x - xp
y = sphere.y - yp
z = sphere.z - zp
r_sqr = x**2 + y**2 + z**2
# This is faster than r5 = r_sqrt**2.5
r = np.sqrt(r_sqr)
r_5 = r*r*r*r*r
volume = 4*np.pi*(sphere.radius**3)/3
res = volume*_v_xz(x, y, z, r_sqr, r_5)
return res
def kernelyy(xp, yp, zp, sphere):
r"""
The second y derivative of the kernel function
.. math::
\phi(x,y,z) = \frac{4}{3} \pi radius^3 \frac{1}{r}
where :math:`r = \sqrt{(x - x')^2 + (y - y')^2 + (z - z')^2}`.
The coordinate system of the input parameters is x -> North, y -> East and
z -> Down.
All input values should be in SI and output is in SI.
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the function will be
calculated
* sphere : :class:`fatiando.mesher.Sphere`
The sphere.
Returns:
* res : array
The function calculated on xp, yp, zp
"""
x = sphere.x - xp
y = sphere.y - yp
z = sphere.z - zp
r_sqr = x**2 + y**2 + z**2
# This is faster than r5 = r_sqrt**2.5
r = np.sqrt(r_sqr)
r_5 = r*r*r*r*r
volume = 4*np.pi*(sphere.radius**3)/3
res = volume*_v_yy(x, y, z, r_sqr, r_5)
return res
def kernelyz(xp, yp, zp, sphere):
r"""
The yz derivative of the kernel function
.. math::
\phi(x,y,z) = \frac{4}{3} \pi radius^3 \frac{1}{r}
where :math:`r = \sqrt{(x - x')^2 + (y - y')^2 + (z - z')^2}`.
The coordinate system of the input parameters is x -> North, y -> East and
z -> Down.
All input values should be in SI and output is in SI.
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the function will be
calculated
* sphere : :class:`fatiando.mesher.Sphere`
The sphere.
Returns:
* res : array
The function calculated on xp, yp, zp
"""
x = sphere.x - xp
y = sphere.y - yp
z = sphere.z - zp
r_sqr = x**2 + y**2 + z**2
# This is faster than r5 = r_sqrt**2.5
r = np.sqrt(r_sqr)
r_5 = r*r*r*r*r
volume = 4*np.pi*(sphere.radius**3)/3
res = volume*_v_yz(x, y, z, r_sqr, r_5)
return res
def kernelzz(xp, yp, zp, sphere):
r"""
The second z derivative of the kernel function
.. math::
\phi(x,y,z) = \frac{4}{3} \pi radius^3 \frac{1}{r}
where :math:`r = \sqrt{(x - x')^2 + (y - y')^2 + (z - z')^2}`.
The coordinate system of the input parameters is x -> North, y -> East and
z -> Down.
All input values should be in SI and output is in SI.
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the function will be
calculated
* sphere : :class:`fatiando.mesher.Sphere`
The sphere.
Returns:
* res : array
The function calculated on xp, yp, zp
"""
x = sphere.x - xp
y = sphere.y - yp
z = sphere.z - zp
r_sqr = x**2 + y**2 + z**2
# This is faster than r5 = r_sqrt**2.5
r = np.sqrt(r_sqr)
r_5 = r*r*r*r*r
volume = 4*np.pi*(sphere.radius**3)/3
res = volume*_v_zz(x, y, z, r_sqr, r_5)
return res
|
|
"""Predicates indicating if a level or cell should be unlocked."""
from django.db.models.query_utils import Q
from apps.widgets.smartgrid_play_tester import play_tester
from apps.widgets.smartgrid_design.models import DesignerAction, DesignerGrid
from apps.managers.smartgrid_mgr import smartgrid_mgr
def approved_action(user, draft_slug, action_slug):
"""Returns true if the action is approved."""
draft = smartgrid_mgr.get_designer_draft(draft_slug)
return user.testeractionsubmittion_set.filter(draft=draft, action__slug=action_slug,
approval_status="approved").count() > 0
def approved_all_of_level(user, draft_slug, level_priority):
"""Returns True if the user has had all Actions on the given level approved."""
draft = smartgrid_mgr.get_designer_draft(draft_slug)
c = 0
count = len(DesignerGrid.objects.filter(draft=draft, level__priority=level_priority))
for action in DesignerGrid.objects.filter(level__priority=level_priority):
c += user.testeractionsubmittion_set.filter(action=action,
approval_status="approved").count()
return c >= count
def approved_all_of_resource(user, draft_slug, resource):
"""Returns True if the user has had all Actions of the given resource approved."""
draft = smartgrid_mgr.get_designer_draft(draft_slug)
count = DesignerAction.objects.filter(draft=draft, related_resource=resource).count()
return user.testeractionsubmittion_set.filter(draft=draft, action__related_resource=resource,
approval_status="approved").count() == count
def approved_all_of_type(user, draft_slug, action_type):
"""Returns True if the user has had all Actions of the action_type approved."""
draft = smartgrid_mgr.get_designer_draft(draft_slug)
count = DesignerAction.objects.filter(draft=draft, type=action_type).count()
return user.testeractionsubmittion_set.filter(action__type=action_type,
approval_status="approved").count() == count
def approved_some(user, draft_slug, count=1):
"""Returns True if the user has had count Actions approved."""
draft = smartgrid_mgr.get_designer_draft(draft_slug)
return user.testeractionsubmittion_set.filter(draft=draft,
approval_status='approved').count() >= count
def approved_some_of_level(user, draft_slug, level_priority, count=1):
"""Returns True if the user has had count Actions approved for the given level."""
draft = smartgrid_mgr.get_designer_draft(draft_slug)
c = 0
for action in DesignerGrid.objects.filter(draft=draft, level__priority=level_priority):
c += user.testeractionsubmittion_set.filter(action=action,
approval_status="approved").count()
return c >= count
def approved_some_of_resource(user, draft_slug, resource, count=1):
"""Returns true of the user has had count Actions approved with the given resource."""
draft = smartgrid_mgr.get_designer_draft(draft_slug)
return user.testeractionsubmittion_set.filter(draft=draft, action__related_resource=resource,
approval_status="approved").count() >= count
def approved_some_of_type(user, draft_slug, action_type, count=1):
"""Returns true if the user has had count Actions approved with the given action_type."""
draft = smartgrid_mgr.get_designer_draft(draft_slug)
return user.testeractionsubmittion_set.filter(draft=draft, action__type=action_type,
approval_status="approved").count() >= count
def approved_some_full_spectrum(user, draft_slug, count=1):
"""Returns true if the user has had count Activities, Commitments, and Events approved."""
ret = approved_some_of_type(user, draft_slug, action_type='activity', count=count)
ret = ret and approved_some_of_type(user, draft_slug, action_type='commitment', count=count)
ret = ret and approved_some_of_type(user, draft_slug, action_type='event', count=count)
return ret
def completed_level(user, draft_slug, level_priority):
"""Returns true if the user has had all Activities and Commiments on the give level
approved."""
draft = smartgrid_mgr.get_designer_draft(draft_slug)
count = len(DesignerGrid.objects.filter(draft=draft,
level__priority=level_priority,
action__type='activity'))
count += len(DesignerGrid.objects.filter(draft=draft,
level__priority=level_priority,
action__type='commitment'))
c = 0
for grid in DesignerGrid.objects.filter(draft=draft,
level__priority=level_priority):
c += user.testeractionsubmittion_set.filter(draft=draft, action=grid.action,
approval_status="approved").count()
c += user.testeractionsubmittion_set.filter(draft=draft, action=grid.action,
action__type="commitment",
approval_status="pending").count()
return c >= count
def social_bonus_count(user, draft_slug, count):
"""Returns True if the number of social bonus the user received equals to count."""
draft = smartgrid_mgr.get_designer_draft(draft_slug)
return user.testeractionsubmittion_set.filter(draft=draft,
social_bonus_awarded=True).count() >= count
def submitted_action(user, draft_slug, action_slug):
"""Returns true if the user complete the action."""
return action_slug in play_tester.get_submitted_actions(user, draft_slug)
def submitted_all_of_level(user, draft_slug, level_priority):
"""Returns True if the user has submitted all Actions on the given level."""
draft = smartgrid_mgr.get_designer_draft(draft_slug)
c = 0
count = len(DesignerGrid.objects.filter(draft=draft, level__priority=level_priority))
for action in DesignerGrid.objects.filter(draft=draft, level__priority=level_priority):
c += user.testeractionsubmittion_set.filter(draft=draft, action=action).count()
return c >= count
def submitted_all_of_resource(user, draft_slug, resource):
"""Returns true if user has submitted all Actions of the given resoruce."""
draft = smartgrid_mgr.get_designer_draft(draft_slug)
count = DesignerAction.objects.filter(draft=draft, related_resource=resource).count()
c = user.testeractionsubmittion_set.filter(draft=draft,
action__related_resource=resource).count()
return c == count
def submitted_all_of_type(user, draft_slug, action_type):
"""Returns true if user has submitted all Actions of the given action_type."""
draft = smartgrid_mgr.get_designer_draft(draft_slug)
count = DesignerAction.objects.filter(draft=draft, type=action_type).count()
return user.testeractionsubmittion_set.filter(draft=draft,
action__type=action_type).count() == count
def submitted_some(user, draft_slug, count=1):
"""Returns true if the user has completed count Actions."""
draft = smartgrid_mgr.get_designer_draft(draft_slug)
return user.testeractionsubmittion_set.filter(draft=draft).count() >= count
def submitted_some_of_level(user, draft_slug, level_priority, count=1):
"""Returns true if the user has completed count Actions of the specified level."""
draft = smartgrid_mgr.get_designer_draft(draft_slug)
c = 0
for action in DesignerGrid.objects.filter(draft=draft, level__priority=level_priority):
c += user.testeractionsubmittion_set.filter(action=action).count()
return c >= count
def submitted_some_of_resource(user, draft_slug, resource, count=1):
"""Returns True if user has submitted count Actions with the given resource."""
draft = smartgrid_mgr.get_designer_draft(draft_slug)
return user.testeractionsubmittion_set.filter(draft=draft,
action__related_resource=resource).count() >= \
count
def submitted_some_of_type(user, draft_slug, action_type, count=1):
"""Returns True if user has submitted count Actions with the given action_type."""
draft = smartgrid_mgr.get_designer_draft(draft_slug)
return user.testeractionsubmittion_set.filter(draft=draft,
action__type=action_type).count() >= count
def submitted_some_full_spectrum(user, draft_slug, count=1):
"""Returns true if the user has completed some activities, commitments, and
events."""
ret = submitted_some_of_type(user, draft_slug, action_type='activity', count=count)
ret = ret and submitted_some_of_type(user, draft_slug, action_type='commitment', count=count)
ret = ret and submitted_some_of_type(user, draft_slug, action_type='event', count=count)
return ret
def submitted_level(user, draft_slug, level_priority):
"""Returns true if the user has performed all activities successfully, and
attempted all commitments."""
_ = user
draft = smartgrid_mgr.get_designer_draft(draft_slug)
num_completed = 0
level_actions = DesignerGrid.objects.filter(
Q(action__type='activity') | Q(action__type='commitment'),
draft=draft, level__priority=level_priority)
for grid in level_actions:
testeractionsubmittion = user.testeractionsubmittion_set.filter(draft=draft,
action=grid.action)
if testeractionsubmittion:
num_completed += 1
num_level = level_actions.count()
# check if there is any activity or commitment
if not num_level:
return False
return num_completed == num_level
def unlock_on_date(user, draft_slug, date_string):
"""Returns True."""
_ = user
_ = draft_slug
_ = date_string
return True
def unlock_on_event(user, draft_slug, event_slug, days=0, lock_after_days=0):
"""Returns true if the current date is equal to or after the date of the Event
defined by the event_slug, optionally days before. days should be a negative number.
Optionally lock_after_days, if not zero then will return false lock_after_days
after the event."""
_ = user
_ = draft_slug
_ = event_slug
_ = days
_ = lock_after_days
return True
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
import mox
from heat.common import exception
from heat.common import template_format
from heat.engine.clients.os import ceilometer
from heat.engine.resources.openstack.ceilometer.gnocchi import \
alarm as gnocchi
from heat.engine import scheduler
from heat.tests import common
from heat.tests import utils
gnocchi_resources_alarm_template = '''
heat_template_version: 2013-05-23
description: Gnocchi Resources Alarm Test
resources:
GnoResAlarm:
type: OS::Ceilometer::GnocchiResourcesAlarm
properties:
description: Do stuff with gnocchi
metric: cpu_util
aggregation_method: mean
granularity: 60
evaluation_periods: 1
threshold: 50
alarm_actions: []
resource_type: instance
resource_id: 5a517ceb-b068-4aca-9eb9-3e4eb9b90d9a
comparison_operator: gt
'''
gnocchi_aggregation_by_metrics_alarm_template = '''
heat_template_version: 2013-05-23
description: Gnocchi Aggregation by Metrics Alarm Test
resources:
GnoAggregationByMetricsAlarm:
type: OS::Ceilometer::GnocchiAggregationByMetricsAlarm
properties:
description: Do stuff with gnocchi metrics
metrics: ["911fce07-e0d7-4210-8c8c-4a9d811fcabc",
"2543d435-fe93-4443-9351-fb0156930f94"]
aggregation_method: mean
granularity: 60
evaluation_periods: 1
threshold: 50
alarm_actions: []
comparison_operator: gt
'''
gnocchi_aggregation_by_resources_alarm_template = '''
heat_template_version: 2013-05-23
description: Gnocchi Aggregation by Resources Alarm Test
resources:
GnoAggregationByResourcesAlarm:
type: OS::Ceilometer::GnocchiAggregationByResourcesAlarm
properties:
description: Do stuff with gnocchi aggregation by resource
aggregation_method: mean
granularity: 60
evaluation_periods: 1
threshold: 50
metric: cpu_util
alarm_actions: []
resource_type: instance
query: '{"=": {"server_group": "my_autoscaling_group"}}'
comparison_operator: gt
'''
class FakeCeilometerAlarm(object):
alarm_id = 'foo'
def __init__(self):
self.to_dict = lambda: {'attr': 'val'}
class GnocchiResourcesAlarmTest(common.HeatTestCase):
def setUp(self):
super(GnocchiResourcesAlarmTest, self).setUp()
self.fc = mock.Mock()
def create_alarm(self):
self.m.StubOutWithMock(ceilometer.CeilometerClientPlugin, '_create')
ceilometer.CeilometerClientPlugin._create().AndReturn(
self.fc)
self.m.StubOutWithMock(self.fc.alarms, 'create')
self.fc.alarms.create(
alarm_actions=[],
description=u'Do stuff with gnocchi',
enabled=True,
insufficient_data_actions=None,
ok_actions=None,
name=mox.IgnoreArg(), type='gnocchi_resources_threshold',
repeat_actions=True,
gnocchi_resources_threshold_rule={
"metric": "cpu_util",
"aggregation_method": "mean",
"granularity": 60,
"evaluation_periods": 1,
"threshold": 50,
"resource_type": "instance",
"resource_id": "5a517ceb-b068-4aca-9eb9-3e4eb9b90d9a",
"comparison_operator": "gt",
},
time_constraints=[],
severity='low',
).AndReturn(FakeCeilometerAlarm())
snippet = template_format.parse(gnocchi_resources_alarm_template)
self.stack = utils.parse_stack(snippet)
resource_defns = self.stack.t.resource_definitions(self.stack)
return gnocchi.CeilometerGnocchiResourcesAlarm(
'GnoResAlarm', resource_defns['GnoResAlarm'], self.stack)
def test_update(self):
rsrc = self.create_alarm()
self.m.StubOutWithMock(self.fc.alarms, 'update')
self.fc.alarms.update(
alarm_id='foo',
gnocchi_resources_threshold_rule={
'resource_id': 'd3d6c642-921e-4fc2-9c5f-15d9a5afb598'})
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
update_template['Properties']['resource_id'] = (
'd3d6c642-921e-4fc2-9c5f-15d9a5afb598')
scheduler.TaskRunner(rsrc.update, update_template)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def _prepare_check_resource(self):
snippet = template_format.parse(gnocchi_resources_alarm_template)
self.stack = utils.parse_stack(snippet)
res = self.stack['GnoResAlarm']
res.client = mock.Mock()
mock_alarm = mock.Mock(enabled=True, state='ok')
res.client().alarms.get.return_value = mock_alarm
return res
def test_create(self):
rsrc = self.create_alarm()
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.assertEqual('foo', rsrc.resource_id)
self.m.VerifyAll()
def test_suspend(self):
rsrc = self.create_alarm()
self.m.StubOutWithMock(self.fc.alarms, 'update')
self.fc.alarms.update(alarm_id='foo', enabled=False)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
scheduler.TaskRunner(rsrc.suspend)()
self.assertEqual((rsrc.SUSPEND, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_resume(self):
rsrc = self.create_alarm()
self.m.StubOutWithMock(self.fc.alarms, 'update')
self.fc.alarms.update(alarm_id='foo', enabled=True)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
rsrc.state_set(rsrc.SUSPEND, rsrc.COMPLETE)
scheduler.TaskRunner(rsrc.resume)()
self.assertEqual((rsrc.RESUME, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_check(self):
res = self._prepare_check_resource()
scheduler.TaskRunner(res.check)()
self.assertEqual((res.CHECK, res.COMPLETE), res.state)
def test_check_failure(self):
res = self._prepare_check_resource()
res.client().alarms.get.side_effect = Exception('Boom')
self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(res.check))
self.assertEqual((res.CHECK, res.FAILED), res.state)
self.assertIn('Boom', res.status_reason)
def test_show_resource(self):
res = self._prepare_check_resource()
res.client().alarms.create.return_value = mock.MagicMock(
alarm_id='2')
res.client().alarms.get.return_value = FakeCeilometerAlarm()
scheduler.TaskRunner(res.create)()
self.assertEqual({'attr': 'val'}, res.FnGetAtt('show'))
class GnocchiAggregationByMetricsAlarmTest(GnocchiResourcesAlarmTest):
def create_alarm(self):
self.m.StubOutWithMock(ceilometer.CeilometerClientPlugin, '_create')
ceilometer.CeilometerClientPlugin._create().AndReturn(
self.fc)
self.m.StubOutWithMock(self.fc.alarms, 'create')
self.fc.alarms.create(
alarm_actions=[],
description=u'Do stuff with gnocchi metrics',
enabled=True,
insufficient_data_actions=None,
ok_actions=None,
name=mox.IgnoreArg(),
type='gnocchi_aggregation_by_metrics_threshold',
repeat_actions=True,
gnocchi_aggregation_by_metrics_threshold_rule={
"aggregation_method": "mean",
"granularity": 60,
"evaluation_periods": 1,
"threshold": 50,
"comparison_operator": "gt",
"metrics": ["911fce07-e0d7-4210-8c8c-4a9d811fcabc",
"2543d435-fe93-4443-9351-fb0156930f94"],
},
time_constraints=[],
severity='low',
).AndReturn(FakeCeilometerAlarm())
snippet = template_format.parse(
gnocchi_aggregation_by_metrics_alarm_template)
self.stack = utils.parse_stack(snippet)
resource_defns = self.stack.t.resource_definitions(self.stack)
return gnocchi.CeilometerGnocchiAggregationByMetricsAlarm(
'GnoAggregationByMetricsAlarm',
resource_defns['GnoAggregationByMetricsAlarm'], self.stack)
def test_update(self):
rsrc = self.create_alarm()
self.m.StubOutWithMock(self.fc.alarms, 'update')
self.fc.alarms.update(
alarm_id='foo',
gnocchi_aggregation_by_metrics_threshold_rule={
'metrics': ['d3d6c642-921e-4fc2-9c5f-15d9a5afb598',
'bc60f822-18a0-4a0c-94e7-94c554b00901']})
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
update_template['Properties']['metrics'] = [
'd3d6c642-921e-4fc2-9c5f-15d9a5afb598',
'bc60f822-18a0-4a0c-94e7-94c554b00901']
scheduler.TaskRunner(rsrc.update, update_template)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def _prepare_check_resource(self):
snippet = template_format.parse(
gnocchi_aggregation_by_metrics_alarm_template)
self.stack = utils.parse_stack(snippet)
res = self.stack['GnoAggregationByMetricsAlarm']
res.client = mock.Mock()
mock_alarm = mock.Mock(enabled=True, state='ok')
res.client().alarms.get.return_value = mock_alarm
return res
def test_show_resource(self):
res = self._prepare_check_resource()
res.client().alarms.create.return_value = mock.MagicMock(
alarm_id='2')
res.client().alarms.get.return_value = FakeCeilometerAlarm()
scheduler.TaskRunner(res.create)()
self.assertEqual({'attr': 'val'}, res.FnGetAtt('show'))
class GnocchiAggregationByResourcesAlarmTest(GnocchiResourcesAlarmTest):
def create_alarm(self):
self.m.StubOutWithMock(ceilometer.CeilometerClientPlugin, '_create')
ceilometer.CeilometerClientPlugin._create().AndReturn(
self.fc)
self.m.StubOutWithMock(self.fc.alarms, 'create')
self.fc.alarms.create(
alarm_actions=[],
description=u'Do stuff with gnocchi aggregation by resource',
enabled=True,
insufficient_data_actions=None,
ok_actions=None,
name=mox.IgnoreArg(),
type='gnocchi_aggregation_by_resources_threshold',
repeat_actions=True,
gnocchi_aggregation_by_resources_threshold_rule={
"aggregation_method": "mean",
"granularity": 60,
"evaluation_periods": 1,
"threshold": 50,
"comparison_operator": "gt",
"metric": "cpu_util",
"resource_type": "instance",
"query": '{"=": {"server_group": "my_autoscaling_group"}}',
},
time_constraints=[],
severity='low',
).AndReturn(FakeCeilometerAlarm())
snippet = template_format.parse(
gnocchi_aggregation_by_resources_alarm_template)
self.stack = utils.parse_stack(snippet)
resource_defns = self.stack.t.resource_definitions(self.stack)
return gnocchi.CeilometerGnocchiAggregationByResourcesAlarm(
'GnoAggregationByResourcesAlarm',
resource_defns['GnoAggregationByResourcesAlarm'], self.stack)
def test_update(self):
rsrc = self.create_alarm()
self.m.StubOutWithMock(self.fc.alarms, 'update')
self.fc.alarms.update(
alarm_id='foo',
gnocchi_aggregation_by_resources_threshold_rule={
'query': '{"=": {"server_group": "my_new_group"}}'})
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.create)()
update_template = copy.deepcopy(rsrc.t)
update_template['Properties']['query'] = (
'{"=": {"server_group": "my_new_group"}}')
scheduler.TaskRunner(rsrc.update, update_template)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def _prepare_check_resource(self):
snippet = template_format.parse(
gnocchi_aggregation_by_resources_alarm_template)
self.stack = utils.parse_stack(snippet)
res = self.stack['GnoAggregationByResourcesAlarm']
res.client = mock.Mock()
mock_alarm = mock.Mock(enabled=True, state='ok')
res.client().alarms.get.return_value = mock_alarm
return res
def test_show_resource(self):
res = self._prepare_check_resource()
res.client().alarms.create.return_value = mock.MagicMock(
alarm_id='2')
res.client().alarms.get.return_value = FakeCeilometerAlarm()
scheduler.TaskRunner(res.create)()
self.assertEqual({'attr': 'val'}, res.FnGetAtt('show'))
|
|
"""
The MIT License
Copyright (c) 2007-2010 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import base64
import urllib
import time
import random
import urlparse
import hmac
import binascii
import httplib2
try:
from urlparse import parse_qs
parse_qs # placate pyflakes
except ImportError:
# fall back for Python 2.5
from cgi import parse_qs
try:
from hashlib import sha1
sha = sha1
except ImportError:
# hashlib was added in Python 2.5
import sha
import _version
__version__ = _version.__version__
OAUTH_VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
class Error(RuntimeError):
"""Generic exception class."""
def __init__(self, message='OAuth error occurred.'):
self._message = message
@property
def message(self):
"""A hack to get around the deprecation errors in 2.6."""
return self._message
def __str__(self):
return self._message
class MissingSignature(Error):
pass
def build_authenticate_header(realm=''):
"""Optional WWW-Authenticate header (401 error)"""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def build_xoauth_string(url, consumer, token=None):
"""Build an XOAUTH string for use in SMTP/IMPA authentication."""
request = Request.from_consumer_and_token(consumer, token,
"GET", url)
signing_method = SignatureMethod_HMAC_SHA1()
request.sign_request(signing_method, consumer, token)
params = []
for k, v in sorted(request.iteritems()):
if v is not None:
params.append('%s="%s"' % (k, escape(v)))
return "%s %s %s" % ("GET", url, ','.join(params))
def to_unicode(s):
""" Convert to unicode, raise exception with instructive error
message if s is not unicode, ascii, or utf-8. """
if not isinstance(s, unicode):
if not isinstance(s, str):
raise TypeError('You are required to pass either unicode or string here, not: %r (%s)' % (type(s), s))
try:
s = s.decode('utf-8')
except UnicodeDecodeError, le:
raise TypeError('You are required to pass either a unicode object or a utf-8 string here. You passed a Python string object which contained non-utf-8: %r. The UnicodeDecodeError that resulted from attempting to interpret it as utf-8 was: %s' % (s, le,))
return s
def to_utf8(s):
return to_unicode(s).encode('utf-8')
def to_unicode_if_string(s):
if isinstance(s, basestring):
return to_unicode(s)
else:
return s
def to_utf8_if_string(s):
if isinstance(s, basestring):
return to_utf8(s)
else:
return s
def to_unicode_optional_iterator(x):
"""
Raise TypeError if x is a str containing non-utf8 bytes or if x is
an iterable which contains such a str.
"""
if isinstance(x, basestring):
return to_unicode(x)
try:
l = list(x)
except TypeError, e:
assert 'is not iterable' in str(e)
return x
else:
return [ to_unicode(e) for e in l ]
def to_utf8_optional_iterator(x):
"""
Raise TypeError if x is a str or if x is an iterable which
contains a str.
"""
if isinstance(x, basestring):
return to_utf8(x)
try:
l = list(x)
except TypeError, e:
assert 'is not iterable' in str(e)
return x
else:
return [ to_utf8_if_string(e) for e in l ]
def escape(s):
"""Escape a URL including any /."""
return urllib.quote(s.encode('utf-8'), safe='~')
def generate_timestamp():
"""Get seconds since epoch (UTC)."""
return int(time.time())
def generate_nonce(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def generate_verifier(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
class Consumer(object):
"""A consumer of OAuth-protected services.
The OAuth consumer is a "third-party" service that wants to access
protected resources from an OAuth service provider on behalf of an end
user. It's kind of the OAuth client.
Usually a consumer must be registered with the service provider by the
developer of the consumer software. As part of that process, the service
provider gives the consumer a *key* and a *secret* with which the consumer
software can identify itself to the service. The consumer will include its
key in each request to identify itself, but will use its secret only when
signing requests, to prove that the request is from that particular
registered consumer.
Once registered, the consumer can then use its consumer credentials to ask
the service provider for a request token, kicking off the OAuth
authorization process.
"""
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def __str__(self):
data = {'oauth_consumer_key': self.key,
'oauth_consumer_secret': self.secret}
return urllib.urlencode(data)
class Token(object):
"""An OAuth credential used to request authorization or a protected
resource.
Tokens in OAuth comprise a *key* and a *secret*. The key is included in
requests to identify the token being used, but the secret is used only in
the signature, to prove that the requester is who the server gave the
token to.
When first negotiating the authorization, the consumer asks for a *request
token* that the live user authorizes with the service provider. The
consumer then exchanges the request token for an *access token* that can
be used to access protected resources.
"""
key = None
secret = None
callback = None
callback_confirmed = None
verifier = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def set_callback(self, callback):
self.callback = callback
self.callback_confirmed = 'true'
def set_verifier(self, verifier=None):
if verifier is not None:
self.verifier = verifier
else:
self.verifier = generate_verifier()
def get_callback_url(self):
if self.callback and self.verifier:
# Append the oauth_verifier.
parts = urlparse.urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
else:
query = 'oauth_verifier=%s' % self.verifier
return urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
return self.callback
def to_string(self):
"""Returns this token as a plain string, suitable for storage.
The resulting string includes the token's secret, so you should never
send or store this string where a third party can read it.
"""
data = {
'oauth_token': self.key,
'oauth_token_secret': self.secret,
}
if self.callback_confirmed is not None:
data['oauth_callback_confirmed'] = self.callback_confirmed
return urllib.urlencode(data)
@staticmethod
def from_string(s):
"""Deserializes a token from a string like one returned by
`to_string()`."""
if not len(s):
raise ValueError("Invalid parameter string.")
params = parse_qs(s, keep_blank_values=False)
if not len(params):
raise ValueError("Invalid parameter string.")
try:
key = params['oauth_token'][0]
except Exception:
raise ValueError("'oauth_token' not found in OAuth request.")
try:
secret = params['oauth_token_secret'][0]
except Exception:
raise ValueError("'oauth_token_secret' not found in "
"OAuth request.")
token = Token(key, secret)
try:
token.callback_confirmed = params['oauth_callback_confirmed'][0]
except KeyError:
pass # 1.0, no callback confirmed.
return token
def __str__(self):
return self.to_string()
def setter(attr):
name = attr.__name__
def getter(self):
try:
return self.__dict__[name]
except KeyError:
raise AttributeError(name)
def deleter(self):
del self.__dict__[name]
return property(getter, attr, deleter)
class Request(dict):
"""The parameters and information for an HTTP request, suitable for
authorizing with OAuth credentials.
When a consumer wants to access a service's protected resources, it does
so using a signed HTTP request identifying itself (the consumer) with its
key, and providing an access token authorized by the end user to access
those resources.
"""
version = OAUTH_VERSION
def __init__(self, method=HTTP_METHOD, url=None, parameters=None,
body='', is_form_encoded=False):
if url is not None:
self.url = to_unicode(url)
self.method = method
if parameters is not None:
for k, v in parameters.iteritems():
k = to_unicode(k)
v = to_unicode_optional_iterator(v)
self[k] = v
self.body = body
self.is_form_encoded = is_form_encoded
@setter
def url(self, value):
self.__dict__['url'] = value
if value is not None:
scheme, netloc, path, params, query, fragment = urlparse.urlparse(value)
# Exclude default port numbers.
if scheme == 'http' and netloc[-3:] == ':80':
netloc = netloc[:-3]
elif scheme == 'https' and netloc[-4:] == ':443':
netloc = netloc[:-4]
if scheme not in ('http', 'https'):
raise ValueError("Unsupported URL %s (%s)." % (value, scheme))
# Normalized URL excludes params, query, and fragment.
self.normalized_url = urlparse.urlunparse((scheme, netloc, path, None, None, None))
else:
self.normalized_url = None
self.__dict__['url'] = None
@setter
def method(self, value):
self.__dict__['method'] = value.upper()
def _get_timestamp_nonce(self):
return self['oauth_timestamp'], self['oauth_nonce']
def get_nonoauth_parameters(self):
"""Get any non-OAuth parameters."""
return dict([(k, v) for k, v in self.iteritems()
if not k.startswith('oauth_')])
def to_header(self, realm=''):
"""Serialize as a header for an HTTPAuth request."""
oauth_params = ((k, v) for k, v in self.items()
if k.startswith('oauth_'))
stringy_params = ((k, escape(str(v))) for k, v in oauth_params)
header_params = ('%s="%s"' % (k, v) for k, v in stringy_params)
params_header = ', '.join(header_params)
auth_header = 'OAuth realm="%s"' % realm
if params_header:
auth_header = "%s, %s" % (auth_header, params_header)
return {'Authorization': auth_header}
def to_postdata(self):
"""Serialize as post data for a POST request."""
d = {}
for k, v in self.iteritems():
d[k.encode('utf-8')] = to_utf8_optional_iterator(v)
# tell urlencode to deal with sequence values and map them correctly
# to resulting querystring. for example self["k"] = ["v1", "v2"] will
# result in 'k=v1&k=v2' and not k=%5B%27v1%27%2C+%27v2%27%5D
return urllib.urlencode(d, True).replace('+', '%20')
def to_url(self):
"""Serialize as a URL for a GET request."""
base_url = urlparse.urlparse(self.url)
try:
query = base_url.query
except AttributeError:
# must be python <2.5
query = base_url[4]
query = parse_qs(query)
for k, v in self.items():
query.setdefault(k, []).append(v)
try:
scheme = base_url.scheme
netloc = base_url.netloc
path = base_url.path
params = base_url.params
fragment = base_url.fragment
except AttributeError:
# must be python <2.5
scheme = base_url[0]
netloc = base_url[1]
path = base_url[2]
params = base_url[3]
fragment = base_url[5]
url = (scheme, netloc, path, params,
urllib.urlencode(query, True), fragment)
return urlparse.urlunparse(url)
def get_parameter(self, parameter):
ret = self.get(parameter)
if ret is None:
raise Error('Parameter not found: %s' % parameter)
return ret
def get_normalized_parameters(self):
"""Return a string that contains the parameters that must be signed."""
items = []
for key, value in self.iteritems():
if key == 'oauth_signature':
continue
# 1.0a/9.1.1 states that kvp must be sorted by key, then by value,
# so we unpack sequence values into multiple items for sorting.
if isinstance(value, basestring):
items.append((to_utf8_if_string(key), to_utf8(value)))
else:
try:
value = list(value)
except TypeError, e:
assert 'is not iterable' in str(e)
items.append((to_utf8_if_string(key), to_utf8_if_string(value)))
else:
items.extend((to_utf8_if_string(key), to_utf8_if_string(item)) for item in value)
# Include any query string parameters from the provided URL
query = urlparse.urlparse(self.url)[4]
url_items = self._split_url_string(query).items()
url_items = [(to_utf8(k), to_utf8(v)) for k, v in url_items if k != 'oauth_signature' ]
items.extend(url_items)
items.sort()
encoded_str = urllib.urlencode(items)
# Encode signature parameters per Oauth Core 1.0 protocol
# spec draft 7, section 3.6
# (http://tools.ietf.org/html/draft-hammer-oauth-07#section-3.6)
# Spaces must be encoded with "%20" instead of "+"
return encoded_str.replace('+', '%20').replace('%7E', '~')
def sign_request(self, signature_method, consumer, token):
"""Set the signature parameter to the result of sign."""
if not self.is_form_encoded:
# according to
# http://oauth.googlecode.com/svn/spec/ext/body_hash/1.0/oauth-bodyhash.html
# section 4.1.1 "OAuth Consumers MUST NOT include an
# oauth_body_hash parameter on requests with form-encoded
# request bodies."
self['oauth_body_hash'] = base64.b64encode(sha(self.body).digest())
if 'oauth_consumer_key' not in self:
self['oauth_consumer_key'] = consumer.key
if token and 'oauth_token' not in self:
self['oauth_token'] = token.key
self['oauth_signature_method'] = signature_method.name
self['oauth_signature'] = signature_method.sign(self, consumer, token)
@classmethod
def make_timestamp(cls):
"""Get seconds since epoch (UTC)."""
return str(int(time.time()))
@classmethod
def make_nonce(cls):
"""Generate pseudorandom number."""
return str(random.randint(0, 100000000))
@classmethod
def from_request(cls, http_method, http_url, headers=None, parameters=None,
query_string=None):
"""Combines multiple parameter sources."""
if parameters is None:
parameters = {}
# Headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# Check that the authorization header is OAuth.
if auth_header[:6] == 'OAuth ':
auth_header = auth_header[6:]
try:
# Get the parameters from the header.
header_params = cls._split_header(auth_header)
parameters.update(header_params)
except:
raise Error('Unable to parse OAuth parameters from '
'Authorization header.')
# GET or POST query string.
if query_string:
query_params = cls._split_url_string(query_string)
parameters.update(query_params)
# URL parameters.
param_str = urlparse.urlparse(http_url)[4] # query
url_params = cls._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return cls(http_method, http_url, parameters)
return None
@classmethod
def from_consumer_and_token(cls, consumer, token=None,
http_method=HTTP_METHOD, http_url=None, parameters=None,
body='', is_form_encoded=False):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': consumer.key,
'oauth_timestamp': cls.make_timestamp(),
'oauth_nonce': cls.make_nonce(),
'oauth_version': cls.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
if token.verifier:
parameters['oauth_verifier'] = token.verifier
return Request(http_method, http_url, parameters, body=body,
is_form_encoded=is_form_encoded)
@classmethod
def from_token_and_callback(cls, token, callback=None,
http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return cls(http_method, http_url, parameters)
@staticmethod
def _split_header(header):
"""Turn Authorization: header into parameters."""
params = {}
parts = header.split(',')
for param in parts:
# Ignore realm parameter.
if param.find('realm') > -1:
continue
# Remove whitespace.
param = param.strip()
# Split key-value.
param_parts = param.split('=', 1)
# Remove quotes and unescape the value.
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
@staticmethod
def _split_url_string(param_str):
"""Turn URL string into parameters."""
parameters = parse_qs(param_str.encode('utf-8'), keep_blank_values=True)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
class Client(httplib2.Http):
"""OAuthClient is a worker to attempt to execute a request."""
def __init__(self, consumer, token=None, cache=None, timeout=None,
proxy_info=None):
if consumer is not None and not isinstance(consumer, Consumer):
raise ValueError("Invalid consumer.")
if token is not None and not isinstance(token, Token):
raise ValueError("Invalid token.")
self.consumer = consumer
self.token = token
self.method = SignatureMethod_HMAC_SHA1()
httplib2.Http.__init__(self, cache=cache, timeout=timeout, proxy_info=proxy_info)
def set_signature_method(self, method):
if not isinstance(method, SignatureMethod):
raise ValueError("Invalid signature method.")
self.method = method
def request(self, uri, method="GET", body='', headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS, connection_type=None):
DEFAULT_POST_CONTENT_TYPE = 'application/x-www-form-urlencoded'
if not isinstance(headers, dict):
headers = {}
if method == "POST":
headers['Content-Type'] = headers.get('Content-Type',
DEFAULT_POST_CONTENT_TYPE)
is_form_encoded = \
headers.get('Content-Type') == 'application/x-www-form-urlencoded'
if is_form_encoded and body:
parameters = dict([(k,v[0]) for k,v in parse_qs(body).items()])
else:
parameters = None
req = Request.from_consumer_and_token(self.consumer,
token=self.token, http_method=method, http_url=uri,
parameters=parameters, body=body, is_form_encoded=is_form_encoded)
req.sign_request(self.method, self.consumer, self.token)
schema, rest = urllib.splittype(uri)
if rest.startswith('//'):
hierpart = '//'
else:
hierpart = ''
host, rest = urllib.splithost(rest)
realm = schema + ':' + hierpart + host
if method == "POST" and is_form_encoded:
body = req.to_postdata()
elif method == "GET":
uri = req.to_url()
else:
headers.update(req.to_header(realm=realm))
return httplib2.Http.request(self, uri, method=method, body=body,
headers=headers, redirections=redirections,
connection_type=connection_type)
class Server(object):
"""A skeletal implementation of a service provider, providing protected
resources to requests from authorized consumers.
This class implements the logic to check requests for authorization. You
can use it with your web server or web framework to protect certain
resources with OAuth.
"""
timestamp_threshold = 300 # In seconds, five minutes.
version = OAUTH_VERSION
signature_methods = None
def __init__(self, signature_methods=None):
self.signature_methods = signature_methods or {}
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.name] = signature_method
return self.signature_methods
def verify_request(self, request, consumer, token):
"""Verifies an api call and checks all the parameters."""
self._check_version(request)
self._check_signature(request, consumer, token)
parameters = request.get_nonoauth_parameters()
return parameters
def build_authenticate_header(self, realm=''):
"""Optional support for the authenticate header."""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def _check_version(self, request):
"""Verify the correct version of the request for this server."""
version = self._get_version(request)
if version and version != self.version:
raise Error('OAuth version %s not supported.' % str(version))
def _get_version(self, request):
"""Return the version of the request for this server."""
try:
version = request.get_parameter('oauth_version')
except:
version = OAUTH_VERSION
return version
def _get_signature_method(self, request):
"""Figure out the signature with some defaults."""
try:
signature_method = request.get_parameter('oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# Get the signature method object.
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(self.signature_methods.keys())
raise Error('Signature method %s not supported try one of the following: %s' % (signature_method, signature_method_names))
return signature_method
def _get_verifier(self, request):
return request.get_parameter('oauth_verifier')
def _check_signature(self, request, consumer, token):
timestamp, nonce = request._get_timestamp_nonce()
self._check_timestamp(timestamp)
signature_method = self._get_signature_method(request)
try:
signature = request.get_parameter('oauth_signature')
except:
raise MissingSignature('Missing oauth_signature.')
# Validate the signature.
valid = signature_method.check(request, consumer, token, signature)
if not valid:
key, base = signature_method.signing_base(request, consumer, token)
raise Error('Invalid signature. Expected signature base '
'string: %s' % base)
def _check_timestamp(self, timestamp):
"""Verify that timestamp is recentish."""
timestamp = int(timestamp)
now = int(time.time())
lapsed = now - timestamp
if lapsed > self.timestamp_threshold:
raise Error('Expired timestamp: given %d and now %s has a '
'greater difference than threshold %d' % (timestamp, now,
self.timestamp_threshold))
class SignatureMethod(object):
"""A way of signing requests.
The OAuth protocol lets consumers and service providers pick a way to sign
requests. This interface shows the methods expected by the other `oauth`
modules for signing requests. Subclass it and implement its methods to
provide a new way to sign requests.
"""
def signing_base(self, request, consumer, token):
"""Calculates the string that needs to be signed.
This method returns a 2-tuple containing the starting key for the
signing and the message to be signed. The latter may be used in error
messages to help clients debug their software.
"""
raise NotImplementedError
def sign(self, request, consumer, token):
"""Returns the signature for the given request, based on the consumer
and token also provided.
You should use your implementation of `signing_base()` to build the
message to sign. Otherwise it may be less useful for debugging.
"""
raise NotImplementedError
def check(self, request, consumer, token, signature):
"""Returns whether the given signature is the correct signature for
the given consumer and token signing the given request."""
built = self.sign(request, consumer, token)
return built == signature
class SignatureMethod_HMAC_SHA1(SignatureMethod):
name = 'HMAC-SHA1'
def signing_base(self, request, consumer, token):
if not hasattr(request, 'normalized_url') or request.normalized_url is None:
raise ValueError("Base URL for request is not set.")
sig = (
escape(request.method),
escape(request.normalized_url),
escape(request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def sign(self, request, consumer, token):
"""Builds the base signature string."""
key, raw = self.signing_base(request, consumer, token)
hashed = hmac.new(key, raw, sha)
# Calculate the digest base 64.
return binascii.b2a_base64(hashed.digest())[:-1]
class SignatureMethod_PLAINTEXT(SignatureMethod):
name = 'PLAINTEXT'
def signing_base(self, request, consumer, token):
"""Concatenates the consumer key and secret with the token's
secret."""
sig = '%s&' % escape(consumer.secret)
if token:
sig = sig + escape(token.secret)
return sig, sig
def sign(self, request, consumer, token):
key, raw = self.signing_base(request, consumer, token)
return raw
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""This file contains a unit test for the timelib in Plaso."""
import datetime
import unittest
import uuid
from plaso.lib import errors
from plaso.lib import timelib
import pytz # pylint: disable=wrong-import-order
class TimeLibTest(unittest.TestCase):
"""Tests for timestamp."""
def testCopyFromString(self):
"""Tests the CopyFromString function."""
timestamp = timelib.Timestamp.CopyFromString(u'2012-06-27')
expected_timestamp = 1340755200000000
self.assertEqual(timestamp, expected_timestamp)
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(None)
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'')
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'2012')
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'2012-06')
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'2012-6-27')
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'2012-00-27')
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'2012-13-27')
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'2012-01-00')
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'2012-01-32')
timestamp = timelib.Timestamp.CopyFromString(u'2012-06-27 18:17:01')
expected_timestamp = 1340821021000000
self.assertEqual(timestamp, expected_timestamp)
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'2012-06-27 18')
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'2012-06-27 18:17')
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'2012-06-27 18:17:1')
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'2012-06-27T18:17:01')
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'2012-06-27 24:17:01')
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'2012-06-27 18:60:01')
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'2012-06-27 18:17:60')
timestamp = timelib.Timestamp.CopyFromString(u'2012-06-27 18:17:01.123')
expected_timestamp = 1340821021123000
self.assertEqual(timestamp, expected_timestamp)
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'2012-06-27 18:17:01.')
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'2012-06-27 18:17:01.12')
timestamp = timelib.Timestamp.CopyFromString(u'2012-06-27 18:17:01.123456')
expected_timestamp = 1340821021123456
self.assertEqual(timestamp, expected_timestamp)
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'2012-06-27 18:17:01.1234')
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'2012-06-27 18:17:01.1234567')
timestamp = timelib.Timestamp.CopyFromString(u'2012-06-27 18:17:01+00:00')
expected_timestamp = 1340821021000000
self.assertEqual(timestamp, expected_timestamp)
timestamp = timelib.Timestamp.CopyFromString(u'2012-06-27 18:17:01+01:00')
expected_timestamp = 1340817421000000
self.assertEqual(timestamp, expected_timestamp)
timestamp = timelib.Timestamp.CopyFromString(u'2012-06-27 18:17:01-07:00')
expected_timestamp = 1340846221000000
self.assertEqual(timestamp, expected_timestamp)
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'2012-06-27 18:17:01+1')
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'2012-06-27 18:17:01+01')
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'2012-06-27 18:17:01+01:0')
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'2012-06-27 18:17:01+00:00:0')
with self.assertRaises(ValueError):
_ = timelib.Timestamp.CopyFromString(u'2012-06-27 18:17:01Z')
def testCocoaTime(self):
"""Tests the Cocoa timestamp conversion."""
timestamp = timelib.Timestamp.FromCocoaTime(395011845)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-07-08 21:30:45')
self.assertEqual(timestamp, expected_timestamp)
timestamp = timelib.Timestamp.FromCocoaTime(395353142)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-07-12 20:19:02')
self.assertEqual(timestamp, expected_timestamp)
timestamp = timelib.Timestamp.FromCocoaTime(394993669)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-07-08 16:27:49')
self.assertEqual(timestamp, expected_timestamp)
def testHFSTimes(self):
"""Tests the HFS timestamp conversion."""
timestamp = timelib.Timestamp.FromHfsTime(
3458215528, timezone=pytz.timezone(u'EST5EDT'), is_dst=True)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-08-01 15:25:28-04:00')
self.assertEqual(timestamp, expected_timestamp)
timestamp = timelib.Timestamp.FromHfsPlusTime(3458215528)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-08-01 15:25:28')
self.assertEqual(timestamp, expected_timestamp)
timestamp = timelib.Timestamp.FromHfsPlusTime(3413373928)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2012-02-29 15:25:28')
self.assertEqual(timestamp, expected_timestamp)
def testSystemtime(self):
"""Tests the SYSTEMTIME timestamp conversion."""
timestamp = timelib.Timestamp.FromSystemtime(
b'\xde\x07\x0c\x00\x02\x00\x10\x00\x08\x00\x04\x00\x27\x00\x6a\x00')
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2014-12-16 08:04:39.106')
self.assertEqual(timestamp, expected_timestamp)
timestamp = timelib.Timestamp.FromSystemtime(
b'\xe0\x07\x05\x00\x05\x00\x06\x00\x09\x00\x1b\x00\x24\x00\x30\x01')
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2016-05-06 09:27:36.304')
self.assertEqual(timestamp, expected_timestamp)
def testTimestampIsLeapYear(self):
"""Tests the is leap year check."""
self.assertEqual(timelib.Timestamp.IsLeapYear(2012), True)
self.assertEqual(timelib.Timestamp.IsLeapYear(2013), False)
self.assertEqual(timelib.Timestamp.IsLeapYear(2000), True)
self.assertEqual(timelib.Timestamp.IsLeapYear(1900), False)
def testTimestampDaysInMonth(self):
"""Tests the days in month function."""
self.assertEqual(timelib.Timestamp.DaysInMonth(0, 2013), 31)
self.assertEqual(timelib.Timestamp.DaysInMonth(1, 2013), 28)
self.assertEqual(timelib.Timestamp.DaysInMonth(1, 2012), 29)
self.assertEqual(timelib.Timestamp.DaysInMonth(2, 2013), 31)
self.assertEqual(timelib.Timestamp.DaysInMonth(3, 2013), 30)
self.assertEqual(timelib.Timestamp.DaysInMonth(4, 2013), 31)
self.assertEqual(timelib.Timestamp.DaysInMonth(5, 2013), 30)
self.assertEqual(timelib.Timestamp.DaysInMonth(6, 2013), 31)
self.assertEqual(timelib.Timestamp.DaysInMonth(7, 2013), 31)
self.assertEqual(timelib.Timestamp.DaysInMonth(8, 2013), 30)
self.assertEqual(timelib.Timestamp.DaysInMonth(9, 2013), 31)
self.assertEqual(timelib.Timestamp.DaysInMonth(10, 2013), 30)
self.assertEqual(timelib.Timestamp.DaysInMonth(11, 2013), 31)
with self.assertRaises(ValueError):
timelib.Timestamp.DaysInMonth(-1, 2013)
with self.assertRaises(ValueError):
timelib.Timestamp.DaysInMonth(12, 2013)
def testTimestampDaysInYear(self):
"""Test the days in year function."""
self.assertEqual(timelib.Timestamp.DaysInYear(2013), 365)
self.assertEqual(timelib.Timestamp.DaysInYear(2012), 366)
def testTimestampDayOfYear(self):
"""Test the day of year function."""
self.assertEqual(timelib.Timestamp.DayOfYear(0, 0, 2013), 0)
self.assertEqual(timelib.Timestamp.DayOfYear(0, 2, 2013), 31 + 28)
self.assertEqual(timelib.Timestamp.DayOfYear(0, 2, 2012), 31 + 29)
expected_day_of_year = 31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30
self.assertEqual(
timelib.Timestamp.DayOfYear(0, 11, 2013), expected_day_of_year)
def testTimestampFromDelphiTime(self):
"""Test the Delphi date time conversion."""
timestamp = timelib.Timestamp.FromDelphiTime(41443.8263953)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-06-18 19:50:00')
self.assertEqual(timestamp, expected_timestamp)
def testTimestampFromFatDateTime(self):
"""Test the FAT date time conversion."""
timestamp = timelib.Timestamp.FromFatDateTime(0xa8d03d0c)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2010-08-12 21:06:32')
self.assertEqual(timestamp, expected_timestamp)
# Invalid number of seconds.
fat_date_time = (0xa8d03d0c & ~(0x1f << 16)) | ((30 & 0x1f) << 16)
self.assertEqual(timelib.Timestamp.FromFatDateTime(fat_date_time), 0)
# Invalid number of minutes.
fat_date_time = (0xa8d03d0c & ~(0x3f << 21)) | ((60 & 0x3f) << 21)
self.assertEqual(timelib.Timestamp.FromFatDateTime(fat_date_time), 0)
# Invalid number of hours.
fat_date_time = (0xa8d03d0c & ~(0x1f << 27)) | ((24 & 0x1f) << 27)
self.assertEqual(timelib.Timestamp.FromFatDateTime(fat_date_time), 0)
# Invalid day of month.
fat_date_time = (0xa8d03d0c & ~0x1f) | (32 & 0x1f)
self.assertEqual(timelib.Timestamp.FromFatDateTime(fat_date_time), 0)
# Invalid month.
fat_date_time = (0xa8d03d0c & ~(0x0f << 5)) | ((13 & 0x0f) << 5)
self.assertEqual(timelib.Timestamp.FromFatDateTime(fat_date_time), 0)
def testTimestampFromFiletime(self):
"""Test the FILETIME conversion."""
timestamp = timelib.Timestamp.FromFiletime(0x01cb3a623d0a17ce)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2010-08-12 21:06:31.546875')
self.assertEqual(timestamp, expected_timestamp)
filetime = 86400 * 10000000
timestamp = timelib.Timestamp.FromFiletime(filetime)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'1601-01-02 00:00:00')
self.assertEqual(timestamp, expected_timestamp)
# FILETIME that exceeds lower bound.
filetime = -1
self.assertEqual(timelib.Timestamp.FromFiletime(filetime), 0)
def testTimestampFromPosixTime(self):
"""Test the POSIX time conversion."""
timestamp = timelib.Timestamp.FromPosixTime(1281647191)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2010-08-12 21:06:31')
self.assertEqual(timestamp, expected_timestamp)
timestamp = timelib.Timestamp.FromPosixTime(-122557518)
expected_timestamp = timelib.Timestamp.FromTimeString(
u'1966-02-12 1966 12:14:42 UTC')
self.assertEqual(timestamp, expected_timestamp)
# POSIX time that exceeds upper bound.
self.assertEqual(timelib.Timestamp.FromPosixTime(9223372036855), 0)
# POSIX time that exceeds lower bound.
self.assertEqual(timelib.Timestamp.FromPosixTime(-9223372036855), 0)
def testTimestampFromUUIDTime(self):
"""Test the UUID time conversion."""
uuid_object = uuid.UUID(u'00911b54-9ef4-11e1-be53-525400123456')
timestamp = timelib.Timestamp.FromUUIDTime(uuid_object.time)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2012-05-16 01:11:01.654408')
self.assertEqual(timestamp, expected_timestamp)
uuid_time = 86400 * 10000000
timestamp = timelib.Timestamp.FromUUIDTime(uuid_time)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'1582-10-16 00:00:00')
self.assertEqual(timestamp, expected_timestamp)
# UUID time that exceeds lower bound.
uuid_time = -1
self.assertEqual(timelib.Timestamp.FromUUIDTime(uuid_time), 0)
def testTimestampFromWebKitTime(self):
"""Test the WebKit time conversion."""
timestamp = timelib.Timestamp.FromWebKitTime(0x2dec3d061a9bfb)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2010-08-12 21:06:31.546875')
self.assertEqual(timestamp, expected_timestamp)
webkit_time = 86400 * 1000000
timestamp = timelib.Timestamp.FromWebKitTime(webkit_time)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'1601-01-02 00:00:00')
self.assertEqual(timestamp, expected_timestamp)
# WebKit time that exceeds lower bound.
webkit_time = -((1 << 63) - 1)
self.assertEqual(timelib.Timestamp.FromWebKitTime(webkit_time), 0)
def testMonthDict(self):
"""Test the month dict, both inside and outside of scope."""
self.assertEqual(timelib.MONTH_DICT[u'nov'], 11)
self.assertEqual(timelib.MONTH_DICT[u'jan'], 1)
self.assertEqual(timelib.MONTH_DICT[u'may'], 5)
month = timelib.MONTH_DICT.get(u'doesnotexist')
self.assertIsNone(month)
def testLocaltimeToUTC(self):
"""Test the localtime to UTC conversion."""
timezone = pytz.timezone(u'CET')
local_timestamp = timelib.Timestamp.CopyFromString(u'2013-01-01 01:00:00')
timestamp = timelib.Timestamp.LocaltimeToUTC(local_timestamp, timezone)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-01-01 00:00:00')
self.assertEqual(timestamp, expected_timestamp)
local_timestamp = timelib.Timestamp.CopyFromString(u'2013-07-01 02:00:00')
timestamp = timelib.Timestamp.LocaltimeToUTC(local_timestamp, timezone)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-07-01 00:00:00')
self.assertEqual(timestamp, expected_timestamp)
# In the local timezone this is a non-existent timestamp.
local_timestamp = timelib.Timestamp.CopyFromString(
u'2013-03-31 02:00:00')
with self.assertRaises(pytz.NonExistentTimeError):
timelib.Timestamp.LocaltimeToUTC(local_timestamp, timezone, is_dst=None)
timestamp = timelib.Timestamp.LocaltimeToUTC(
local_timestamp, timezone, is_dst=True)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-03-31 00:00:00')
self.assertEqual(timestamp, expected_timestamp)
timestamp = timelib.Timestamp.LocaltimeToUTC(
local_timestamp, timezone, is_dst=False)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-03-31 01:00:00')
self.assertEqual(timestamp, expected_timestamp)
# In the local timezone this is an ambiguous timestamp.
local_timestamp = timelib.Timestamp.CopyFromString(u'2013-10-27 02:30:00')
with self.assertRaises(pytz.AmbiguousTimeError):
timelib.Timestamp.LocaltimeToUTC(local_timestamp, timezone, is_dst=None)
timestamp = timelib.Timestamp.LocaltimeToUTC(
local_timestamp, timezone, is_dst=True)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-10-27 00:30:00')
self.assertEqual(timestamp, expected_timestamp)
timestamp = timelib.Timestamp.LocaltimeToUTC(local_timestamp, timezone)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-10-27 01:30:00')
self.assertEqual(timestamp, expected_timestamp)
# Use the UTC timezone.
self.assertEqual(
timelib.Timestamp.LocaltimeToUTC(local_timestamp, pytz.UTC),
local_timestamp)
# Use a timezone in the Western Hemisphere.
timezone = pytz.timezone(u'EST')
local_timestamp = timelib.Timestamp.CopyFromString(u'2013-01-01 00:00:00')
timestamp = timelib.Timestamp.LocaltimeToUTC(local_timestamp, timezone)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-01-01 05:00:00')
self.assertEqual(timestamp, expected_timestamp)
def testCopyToDatetime(self):
"""Test the copy to datetime object."""
timezone = pytz.timezone(u'CET')
timestamp = timelib.Timestamp.CopyFromString(u'2013-03-14 20:20:08.850041')
datetime_object = timelib.Timestamp.CopyToDatetime(timestamp, timezone)
expected_datetime_object = datetime.datetime(
2013, 3, 14, 21, 20, 8, 850041, tzinfo=timezone)
self.assertEqual(datetime_object, expected_datetime_object)
def testCopyToPosix(self):
"""Test converting microseconds to seconds."""
timestamp = timelib.Timestamp.CopyFromString(u'2013-10-01 12:00:00')
expected_posixtime, _ = divmod(timestamp, 1000000)
posixtime = timelib.Timestamp.CopyToPosix(timestamp)
self.assertEqual(posixtime, expected_posixtime)
def testTimestampFromTimeString(self):
"""The the FromTimeString function."""
# Test daylight savings.
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-10-01 12:00:00')
# Check certain variance of this timestamp.
timestamp = timelib.Timestamp.FromTimeString(
u'2013-10-01 14:00:00', timezone=pytz.timezone(u'Europe/Rome'))
self.assertEqual(timestamp, expected_timestamp)
timestamp = timelib.Timestamp.FromTimeString(
u'2013-10-01 12:00:00', timezone=pytz.timezone(u'UTC'))
self.assertEqual(timestamp, expected_timestamp)
timestamp = timelib.Timestamp.FromTimeString(
u'2013-10-01 05:00:00', timezone=pytz.timezone(u'PST8PDT'))
self.assertEqual(timestamp, expected_timestamp)
# Now to test outside of the daylight savings.
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2014-02-01 12:00:00')
timestamp = timelib.Timestamp.FromTimeString(
u'2014-02-01 13:00:00', timezone=pytz.timezone(u'Europe/Rome'))
self.assertEqual(timestamp, expected_timestamp)
timestamp = timelib.Timestamp.FromTimeString(
u'2014-02-01 12:00:00', timezone=pytz.timezone(u'UTC'))
self.assertEqual(timestamp, expected_timestamp)
timestamp = timelib.Timestamp.FromTimeString(
u'2014-02-01 04:00:00', timezone=pytz.timezone(u'PST8PDT'))
self.assertEqual(timestamp, expected_timestamp)
# Define two timestamps, one being GMT and the other UTC.
time_string_utc = u'Wed 05 May 2010 03:52:31 UTC'
time_string_gmt = u'Wed 05 May 2010 03:52:31 GMT'
timestamp_utc = timelib.Timestamp.FromTimeString(time_string_utc)
timestamp_gmt = timelib.Timestamp.FromTimeString(time_string_gmt)
# Test if these two are different, and if so, then we'll try again
# using the 'gmt_is_utc' flag, which then should result to the same
# results.
if timestamp_utc != timestamp_gmt:
self.assertEqual(timestamp_utc, timelib.Timestamp.FromTimeString(
time_string_gmt, gmt_as_timezone=False))
timestamp = timelib.Timestamp.FromTimeString(
u'12-15-1984 05:13:00', timezone=pytz.timezone(u'EST5EDT'))
self.assertEqual(timestamp, 471953580000000)
# Swap day and month.
timestamp = timelib.Timestamp.FromTimeString(
u'12-10-1984 05:13:00', timezone=pytz.timezone(u'EST5EDT'),
dayfirst=True)
self.assertEqual(timestamp, 466420380000000)
timestamp = timelib.Timestamp.FromTimeString(u'12-15-1984 10:13:00Z')
self.assertEqual(timestamp, 471953580000000)
# Setting the timezone for string that already contains a timezone
# indicator should not affect the conversion.
timestamp = timelib.Timestamp.FromTimeString(
u'12-15-1984 10:13:00Z', timezone=pytz.timezone(u'EST5EDT'))
self.assertEqual(timestamp, 471953580000000)
timestamp = timelib.Timestamp.FromTimeString(u'15/12/1984 10:13:00Z')
self.assertEqual(timestamp, 471953580000000)
timestamp = timelib.Timestamp.FromTimeString(u'15-12-84 10:13:00Z')
self.assertEqual(timestamp, 471953580000000)
timestamp = timelib.Timestamp.FromTimeString(
u'15-12-84 10:13:00-04', timezone=pytz.timezone(u'EST5EDT'))
self.assertEqual(timestamp, 471967980000000)
with self.assertRaises(errors.TimestampError):
timestamp = timelib.Timestamp.FromTimeString(
u'thisisnotadatetime', timezone=pytz.timezone(u'EST5EDT'))
timestamp = timelib.Timestamp.FromTimeString(
u'12-15-1984 04:13:00', timezone=pytz.timezone(u'America/Chicago'))
self.assertEqual(timestamp, 471953580000000)
timestamp = timelib.Timestamp.FromTimeString(
u'07-14-1984 23:13:00', timezone=pytz.timezone(u'America/Chicago'))
self.assertEqual(timestamp, 458712780000000)
timestamp = timelib.Timestamp.FromTimeString(
u'12-15-1984 05:13:00', timezone=pytz.timezone(u'US/Pacific'))
self.assertEqual(timestamp, 471964380000000)
def testRoundTimestamp(self):
"""Test the RoundToSeconds function."""
# Should be rounded up.
test_one = 442813351785412
# Should be rounded down.
test_two = 1384381247271976
self.assertEqual(
timelib.Timestamp.RoundToSeconds(test_one), 442813352000000)
self.assertEqual(
timelib.Timestamp.RoundToSeconds(test_two), 1384381247000000)
def testTimestampFromTimeParts(self):
"""Test the FromTimeParts function."""
timestamp = timelib.Timestamp.FromTimeParts(
2013, 6, 25, 22, 19, 46, 0, timezone=pytz.timezone(u'PST8PDT'))
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-06-25 22:19:46-07:00')
self.assertEqual(timestamp, expected_timestamp)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-06-26 05:19:46')
timestamp = timelib.Timestamp.FromTimeParts(2013, 6, 26, 5, 19, 46)
self.assertEqual(timestamp, expected_timestamp)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-06-26 05:19:46.000542')
timestamp = timelib.Timestamp.FromTimeParts(
2013, 6, 26, 5, 19, 46, microseconds=542)
self.assertEqual(timestamp, expected_timestamp)
if __name__ == '__main__':
unittest.main()
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import unittest
from collections import OrderedDict
from sentry.utils.canonical import CanonicalKeyView, CanonicalKeyDict
class CanonicalKeyViewTests(unittest.TestCase):
canonical_data = OrderedDict(
[
("release", "asdf"),
("exception", {"type": "DemoException"}),
("user", {"id": "DemoUser"}),
]
)
legacy_data = OrderedDict(
[
("release", "asdf"),
("sentry.interfaces.Exception", {"type": "DemoException"}),
("sentry.interfaces.User", {"id": "DemoUser"}),
]
)
mixed_data = OrderedDict(
[
("release", "asdf"),
("sentry.interfaces.User", {"id": "INVALID"}),
("exception", {"type": "DemoException"}),
("user", {"id": "DemoUser"}),
("sentry.interfaces.Exception", {"type": "INVALID"}),
]
)
def test_len(self):
assert len(CanonicalKeyView(self.canonical_data)) == 3
assert len(CanonicalKeyView(self.legacy_data)) == 3
assert len(CanonicalKeyView(self.mixed_data)) == 3
def test_iter(self):
assert CanonicalKeyView(self.canonical_data).keys() == ["release", "exception", "user"]
assert CanonicalKeyView(self.legacy_data).keys() == ["release", "exception", "user"]
assert CanonicalKeyView(self.mixed_data).keys() == ["release", "exception", "user"]
def test_contains(self):
assert "user" in CanonicalKeyView(self.canonical_data)
assert "user" in CanonicalKeyView(self.legacy_data)
assert "user" in CanonicalKeyView(self.mixed_data)
assert "sentry.interfaces.User" in CanonicalKeyView(self.canonical_data)
assert "sentry.interfaces.User" in CanonicalKeyView(self.legacy_data)
assert "sentry.interfaces.User" in CanonicalKeyView(self.mixed_data)
def test_getitem(self):
assert CanonicalKeyView(self.canonical_data)["user"] == {"id": "DemoUser"}
assert CanonicalKeyView(self.legacy_data)["user"] == {"id": "DemoUser"}
assert CanonicalKeyView(self.mixed_data)["user"] == {"id": "DemoUser"}
assert CanonicalKeyView(self.canonical_data)["sentry.interfaces.User"] == {"id": "DemoUser"}
assert CanonicalKeyView(self.legacy_data)["sentry.interfaces.User"] == {"id": "DemoUser"}
assert CanonicalKeyView(self.mixed_data)["sentry.interfaces.User"] == {"id": "DemoUser"}
class CanonicalKeyDictTests(unittest.TestCase):
canonical_data = {
"release": "asdf",
"exception": {"type": "DemoException"},
"user": {"id": "DemoUser"},
}
def test_canonical(self):
assert (
CanonicalKeyDict(
{
"release": "asdf",
"exception": {"type": "DemoException"},
"user": {"id": "DemoUser"},
}
)
== self.canonical_data
)
def test_legacy(self):
assert (
CanonicalKeyDict(
{
"release": "asdf",
"sentry.interfaces.Exception": {"type": "DemoException"},
"sentry.interfaces.User": {"id": "DemoUser"},
}
)
== self.canonical_data
)
def test_mixed(self):
assert (
CanonicalKeyDict(
{
"release": "asdf",
"exception": {"type": "DemoException"},
"user": {"id": "DemoUser"},
"sentry.interfaces.Exception": {"type": "INVALID"},
"sentry.interfaces.User": {"id": "INVALID"},
}
)
== self.canonical_data
)
def test_getitem_setitem(self):
d = CanonicalKeyDict({"user": {"id": "DemoUser"}})
d["user"] = {"id": "other"}
assert d["user"] == {"id": "other"}
assert d["sentry.interfaces.User"] == {"id": "other"}
d = CanonicalKeyDict({"user": {"id": "DemoUser"}})
d["sentry.interfaces.User"] = {"id": "other"}
assert d["user"] == {"id": "other"}
assert d["sentry.interfaces.User"] == {"id": "other"}
def test_delitem(self):
d = CanonicalKeyDict({"user": {"id": "DemoUser"}})
del d["user"]
assert d == {}
d = CanonicalKeyDict({"user": {"id": "DemoUser"}})
del d["sentry.interfaces.User"]
assert d == {}
def test_contains(self):
d = CanonicalKeyDict({"user": {"id": "DemoUser"}})
"user" in d
"sentry.interfaces.User" in d
def test_len(self):
assert (
len(
CanonicalKeyDict(
{
"release": "asdf",
"exception": {"type": "DemoException"},
"user": {"id": "DemoUser"},
"sentry.interfaces.Exception": {"type": "INVALID"},
"sentry.interfaces.User": {"id": "INVALID"},
}
)
)
== 3
)
class LegacyCanonicalKeyDictTests(unittest.TestCase):
canonical_data = {
"release": "asdf",
"sentry.interfaces.Exception": {"type": "DemoException"},
"sentry.interfaces.User": {"id": "DemoUser"},
}
def test_canonical(self):
assert (
CanonicalKeyDict(
{
"release": "asdf",
"exception": {"type": "DemoException"},
"user": {"id": "DemoUser"},
},
legacy=True,
)
== self.canonical_data
)
def test_legacy(self):
assert (
CanonicalKeyDict(
{
"release": "asdf",
"sentry.interfaces.Exception": {"type": "DemoException"},
"sentry.interfaces.User": {"id": "DemoUser"},
},
legacy=True,
)
== self.canonical_data
)
def test_mixed(self):
assert (
CanonicalKeyDict(
{
"release": "asdf",
"sentry.interfaces.Exception": {"type": "DemoException"},
"sentry.interfaces.User": {"id": "DemoUser"},
"exception": {"type": "INVALID"},
"user": {"id": "INVALID"},
},
legacy=True,
)
== self.canonical_data
)
def test_getitem_setitem(self):
d = CanonicalKeyDict({"user": {"id": "DemoUser"}}, legacy=True)
d["user"] = {"id": "other"}
assert d["user"] == {"id": "other"}
assert d["sentry.interfaces.User"] == {"id": "other"}
d = CanonicalKeyDict({"user": {"id": "DemoUser"}}, legacy=True)
d["sentry.interfaces.User"] = {"id": "other"}
assert d["user"] == {"id": "other"}
assert d["sentry.interfaces.User"] == {"id": "other"}
class DoubleAliasingTests(unittest.TestCase):
def test_canonical(self):
view = CanonicalKeyView({"logentry": "foo"})
assert len(view) == 1
assert view.keys() == ["logentry"]
assert "logentry" in view
assert "sentry.interfaces.Message" in view
assert "message" in view
assert view["logentry"] == "foo"
assert view["sentry.interfaces.Message"] == "foo"
assert view["message"] == "foo"
def test_legacy_first(self):
view = CanonicalKeyView({"sentry.interfaces.Message": "foo"})
assert len(view) == 1
assert view.keys() == ["logentry"]
assert "logentry" in view
assert "sentry.interfaces.Message" in view
assert "message" in view
assert view["logentry"] == "foo"
assert view["sentry.interfaces.Message"] == "foo"
assert view["message"] == "foo"
def test_legacy_second(self):
view = CanonicalKeyView({"message": "foo"})
assert len(view) == 1
assert view.keys() == ["logentry"]
assert "logentry" in view
assert "sentry.interfaces.Message" in view
assert "message" in view
assert view["logentry"] == "foo"
assert view["sentry.interfaces.Message"] == "foo"
assert view["message"] == "foo"
def test_override(self):
view = CanonicalKeyView({"logentry": "foo", "sentry.interfaces.Message": "bar"})
assert len(view) == 1
assert view.keys() == ["logentry"]
assert "logentry" in view
assert "sentry.interfaces.Message" in view
assert "message" in view
assert view["logentry"] == "foo"
assert view["sentry.interfaces.Message"] == "foo"
assert view["message"] == "foo"
def test_two_legacy(self):
view = CanonicalKeyView({"message": "bar", "sentry.interfaces.Message": "foo"})
assert len(view) == 1
assert view.keys() == ["logentry"]
assert "logentry" in view
assert "sentry.interfaces.Message" in view
assert "message" in view
assert view["logentry"] == "foo"
assert view["sentry.interfaces.Message"] == "foo"
assert view["message"] == "foo"
|
|
import bpy
from bpy.props import PointerProperty, StringProperty, BoolProperty, \
EnumProperty, IntProperty, IntVectorProperty, FloatProperty, FloatVectorProperty, \
CollectionProperty, BoolVectorProperty
from .. import rman_bl_nodes
from .. import rfb_icons
from ..rfb_utils.shadergraph_utils import is_renderman_nodetree
from ..rfb_utils import shadergraph_utils
from ..rman_constants import RMAN_AREA_LIGHT_TYPES
class RENDERMAN_UL_Dspy_MetaData_List(bpy.types.UIList):
def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):
layout.label(text=item.name)
class RendermanDspyMetaGroup(bpy.types.PropertyGroup):
name: StringProperty(name="Name", default="")
type: EnumProperty(name="Type",
items=[
('float', 'float', ''),
('int', 'int', ''),
('string', 'string', ''),
('v2f', 'v2f', ''),
('v2i', 'v2i', ''),
('v3f', 'v3f', ''),
('v3i', 'v3i', ''),
('box2f', 'box2f', ''),
('box2i', 'box2i', ''),
('m33f', 'm33f', ''),
('m44f', 'm44f', '')])
value_float: FloatProperty(name="Value", default=0.0)
value_int: IntProperty(name="Value", default=0)
value_string: StringProperty(name="Value", default="")
value_v2f: FloatVectorProperty(name="Value", size=2, default=(0.0, 0.0))
value_v3f: FloatVectorProperty(name="Value", size=3, default=(0.0, 0.0, 0.0))
value_v2i: IntVectorProperty(name="Value", size=2, default=(0, 0))
value_v3i: IntVectorProperty(name="Value", size=3, default=(0, 0, 0))
value_box2f: FloatVectorProperty(name="Value", size=4, default=(0.0, 0.0, 0.0, 0.0))
value_box2i: IntVectorProperty(name="Value", size=4, default=(0, 0, 0, 0))
value_m33f: FloatVectorProperty(name="Value", size=9, default=[0.0]*9)
value_m44f: FloatVectorProperty(name="Value", size=16, default=[0.0]*16)
class RendermanPluginSettings(bpy.types.PropertyGroup):
pass
class RendermanLightFilter(bpy.types.PropertyGroup):
def get_name(self):
if self.linked_filter_ob:
return self.linked_filter_ob.name
return ''
name: StringProperty(default='', get=get_name)
def update_linked_filter_ob(self, context):
pass
def validate_obj(self, ob):
if ob.type == 'LIGHT' and ob.data.renderman.renderman_light_role == 'RMAN_LIGHTFILTER':
return True
return False
linked_filter_ob: PointerProperty(name='Light Filter',
description='Light Filter',
type=bpy.types.Object,
update=update_linked_filter_ob,
poll=validate_obj
)
class RendermanPortalLightPointer(bpy.types.PropertyGroup):
def get_name(self):
if self.linked_portal_ob:
return self.linked_portal_ob.name
return ''
name: StringProperty(default='', get=get_name)
def update_linked_portal_ob(self, context):
if self.linked_portal_ob:
self.linked_portal_ob.update_tag(refresh={'DATA'})
def validate_obj(self, ob):
if ob.type == 'LIGHT':
rm = ob.data.renderman
if rm.renderman_light_role == 'RMAN_LIGHT' and rm.get_light_node_name() == 'PxrPortalLight':
return True
return False
linked_portal_ob: PointerProperty(name='Portal Light',
description='Portal Light',
type=bpy.types.Object,
update=update_linked_portal_ob,
poll=validate_obj
)
class RendermanLightSettings(bpy.types.PropertyGroup):
def get_light_node(self):
'''
Get the light shader node
'''
light = self.id_data
output = None
nt = light.node_tree
if not nt:
return None
output = is_renderman_nodetree(light)
if not output:
return None
if self.renderman_light_role == 'RMAN_LIGHT':
socket = output.inputs[1]
if socket.is_linked:
return socket.links[0].from_node
else:
socket = output.inputs[3]
if socket.is_linked:
return socket.links[0].from_node
return None
def get_light_node_name(self):
'''
Get light shader name
'''
node = self.get_light_node()
if node:
return node.bl_label
return ''
light_node: StringProperty(
name="Light Node",
default='')
def update_vis(self, context):
light = self.id_data
use_renderman_node: BoolProperty(
name="Use RenderMans Light Node",
description="Will enable RenderMan light Nodes, opening more options",
default=False
)
def renderman_light_role_update(self, context):
if self.renderman_light_role == 'RMAN_LIGHT':
self.renderman_light_shader_update(context)
else:
self.renderman_light_filter_shader_update(context)
renderman_light_role: EnumProperty(
name="Light Type",
items=[('RMAN_LIGHT', 'Light', 'RenderMan Light'),
('RMAN_LIGHTFILTER', 'Filter', 'RenderMan Light Filter')],
update=renderman_light_role_update,
default='RMAN_LIGHT'
)
def renderman_light_shader_update(self, context):
light = self.id_data
light_shader = self.get_light_node_name()
if hasattr(light, 'size'):
light.size = 0.0
if light_shader not in RMAN_AREA_LIGHT_TYPES:
light.type = 'POINT'
def get_rman_light_shaders(self, context):
items = []
i = 0
rman_light_icon = rfb_icons.get_light_icon("PxrRectLight")
items.append(('PxrRectLight', 'PxrRectLight', '', rman_light_icon.icon_id, i))
for n in rman_bl_nodes.__RMAN_LIGHT_NODES__:
if n.name != 'PxrRectLight':
i += 1
light_icon = rfb_icons.get_light_icon(n.name)
items.append( (n.name, n.name, '', light_icon.icon_id, i))
return items
renderman_light_shader: EnumProperty(
name="RenderMan Light",
items=get_rman_light_shaders,
update=renderman_light_shader_update
)
def renderman_light_filter_shader_update(self, context):
light = self.id_data
light_shader = self.get_light_node_name()
if hasattr(light, 'size'):
light.size = 0.0
light.type = 'POINT'
def get_rman_light_filter_shaders(self, context):
items = []
i = 0
rman_light_icon = rfb_icons.get_lightfilter_icon("_PxrBlockerLightFilter")
items.append(('PxrBlockerLightFilter', 'PxrBlockerLightFilter', '', rman_light_icon.icon_id, i))
for n in rman_bl_nodes.__RMAN_LIGHTFILTER_NODES__:
if n.name != 'PxrBlockerLightFilter':
i += 1
light_icon = rfb_icons.get_lightfilter_icon(n.name)
items.append( (n.name, n.name, '', light_icon.icon_id, i))
return items
renderman_light_filter_shader: EnumProperty(
name="RenderMan Light Filter",
items=get_rman_light_filter_shaders,
update=renderman_light_filter_shader_update
)
light_filters: CollectionProperty(
type=RendermanLightFilter
)
light_filters_index: IntProperty(min=-1, default=-1)
portal_lights: CollectionProperty(type=RendermanPortalLightPointer)
def update_portal_lights_index(self, context):
if self.portal_lights_index < 0:
return
if self.portal_lights_index > len(self.portal_lights) - 1:
return
portal_ptr = self.portal_lights[self.portal_lights_index]
if not portal_ptr.linked_portal_ob:
self.portal_lights.remove(self.portal_lights_index)
self.portal_lights_index = -1
portal_lights_index: IntProperty(min=-1, default=-1, update=update_portal_lights_index)
def update_dome_light_portal(self, context):
if self.dome_light_portal:
candidate = None
dome_light = self.dome_light_portal
rm = dome_light.data.renderman
for portal_ptr in rm.portal_lights:
if not portal_ptr.linked_portal_ob:
candidate = portal_ptr
break
if not candidate:
candidate = rm.portal_lights.add()
ob = context.object
candidate.linked_portal_ob = ob
self.dome_light_portal.update_tag(refresh={'DATA'})
ob.update_tag(refresh={'DATA'})
else:
# try and remove the portal light on the dome light
for obj in bpy.data.objects:
if not obj.type == 'LIGHT':
continue
rm = obj.data.renderman
if rm.get_light_node_name() != 'PxrDomeLight':
continue
if len(rm.portal_lights) < 1:
continue
for i, portal_ptr in enumerate(rm.portal_lights):
if not portal_ptr.linked_portal_ob:
continue
portal = portal_ptr.linked_portal_ob
rm = portal.data.renderman
if not rm.dome_light_portal:
portal_ptr.linked_portal_ob = None
setattr(rm, 'portal_lights_index', i)
def validate_dome_light(self, ob):
if ob.type == 'LIGHT':
rm = ob.data.renderman
if rm.renderman_light_role == 'RMAN_LIGHT' and rm.get_light_node_name() == 'PxrDomeLight':
return True
return False
dome_light_portal: PointerProperty(name="Dome Light",
type=bpy.types.Object,
description="Dome light to parent this portal light to.",
poll=validate_dome_light,
update=update_dome_light_portal)
rman_coneAngleDepth: FloatProperty(name="Cone Angle Depth",
default=5.0,
min=0.0,
max=10.0,
precision=3,
description="The depth of the cone angle drawn in the viewport"
)
rman_coneAngleOpacity: FloatProperty(name="Cone Angle Opacity",
default=0.5,
min=0.1,
max=1.0,
precision=3,
description="The opaqueness of the cone angle drawn in the viewport"
)
light_primary_visibility: BoolProperty(
name="Light Primary Visibility",
description="Camera visibility for this light",
update=update_vis,
default=True)
mute: BoolProperty(
name="Mute",
description="Turn off this light",
default=False)
def update_solo(self, context):
light = self.id_data
scene = context.scene
# if the scene solo is on already find the old one and turn off
scene.renderman.solo_light = self.solo
if self.solo:
if scene.renderman.solo_light:
for ob in scene.objects:
if shadergraph_utils.is_rman_light(ob, include_light_filters=False):
rm = shadergraph_utils.get_rman_light_properties_group(ob)
if rm != self and rm.solo:
rm.solo = False
break
solo: BoolProperty(
name="Solo",
update=update_solo,
description="Turn on only this light",
default=False)
renderman_lock_light_type: BoolProperty(
name="Lock Type",
default=False,
description="Lock from changing light shader and light role."
)
# OLD PROPERTIES
shadingrate: FloatProperty(
name="Light Shading Rate",
description="Shading Rate for lights. Keep this high unless banding or pixellation occurs on detailed light maps",
default=100.0)
# illuminate
illuminates_by_default: BoolProperty(
name="Illuminates by default",
description="The light illuminates objects by default",
default=True)
renderman_type: EnumProperty(
name="Light Type",
items=[
('AREA', 'Light', 'Area Light'),
('ENV', 'Dome', 'Dome Light'),
('SKY', 'Env Daylight', 'Simulated Sky'),
('DIST', 'Distant', 'Distant Light'),
('SPOT', 'Spot', 'Spot Light'),
('POINT', 'Point', 'Point Light'),
('PORTAL', 'Portal', 'Portal Light'),
('FILTER', 'Filter', 'RenderMan Light Filter'),
('UPDATED', 'UPDATED', '')],
default='UPDATED'
)
area_shape: EnumProperty(
name="Area Shape",
items=[('rect', 'Rectangle', 'Rectangle'),
('disk', 'Disk', 'Disk'),
('sphere', 'Sphere', 'Sphere'),
('cylinder', 'Cylinder', 'Cylinder')],
default='rect'
)
filter_type: EnumProperty(
name="Area Shape",
items=[('barn', 'Barn', 'Barn'),
('blocker', 'Blocker', 'Blocker'),
#('combiner', 'Combiner', 'Combiner'),
('cookie', 'Cookie', 'Cookie'),
('gobo', 'Gobo', 'Gobo'),
('intmult', 'Multiply', 'Multiply'),
('ramp', 'Ramp', 'Ramp'),
('rod', 'Rod', 'Rod')
],
default='blocker'
)
class RendermanDisplayFilterSettings(bpy.types.PropertyGroup):
def get_filter_name(self):
return self.filter_type.replace('_settings', '')
def get_filter_node(self):
return getattr(self, self.filter_type + '_settings')
def displayfilter_items(self, context):
items = []
for n in rman_bl_nodes.__RMAN_DISPLAYFILTER_NODES__ :
items.append((n.name, n.name, ''))
return items
filter_type: EnumProperty(items=displayfilter_items, name='Filter')
class RendermanSampleFilterSettings(bpy.types.PropertyGroup):
def get_filter_name(self):
return self.filter_type.replace('_settings', '')
def get_filter_node(self):
return getattr(self, self.filter_type + '_settings')
def samplefilter_items(self, context):
items = []
for n in rman_bl_nodes.__RMAN_SAMPLEFILTER_NODES__ :
items.append((n.name, n.name, ''))
return items
filter_type: EnumProperty(items=samplefilter_items, name='Filter')
classes = [
RENDERMAN_UL_Dspy_MetaData_List,
RendermanDspyMetaGroup,
RendermanLightFilter,
RendermanPortalLightPointer,
RendermanLightSettings,
RendermanPluginSettings,
RendermanDisplayFilterSettings,
RendermanSampleFilterSettings
]
def register():
for cls in classes:
bpy.utils.register_class(cls)
bpy.types.Light.renderman = PointerProperty(
type=RendermanLightSettings, name="Renderman Light Settings")
# light settings for mesh lights, that are a part of a material
bpy.types.Material.renderman_light = PointerProperty(
type=RendermanLightSettings, name="Renderman Light Settings")
def unregister():
for cls in classes:
try:
bpy.utils.unregister_class(cls)
except RuntimeError:
rfb_log().debug('Could not unregister class: %s' % str(cls))
pass
|
|
#!/usr/bin/env python
# Copyright (c) 2002-2007 ActiveState Software Inc.
# See LICENSE.txt for license details.
# Author:
# Trent Mick (TrentM@ActiveState.com)
# Home:
# http://trentm.com/projects/which/
r"""Find the full path to commands.
which(command, path=None, verbose=0, exts=None)
Return the full path to the first match of the given command on the
path.
whichall(command, path=None, verbose=0, exts=None)
Return a list of full paths to all matches of the given command on
the path.
whichgen(command, path=None, verbose=0, exts=None)
Return a generator which will yield full paths to all matches of the
given command on the path.
By default the PATH environment variable is searched (as well as, on
Windows, the AppPaths key in the registry), but a specific 'path' list
to search may be specified as well. On Windows, the PATHEXT environment
variable is applied as appropriate.
If "verbose" is true then a tuple of the form
(<fullpath>, <matched-where-description>)
is returned for each match. The latter element is a textual description
of where the match was found. For example:
from PATH element 0
from HKLM\SOFTWARE\...\perl.exe
"""
from __future__ import print_function
_cmdlnUsage = """
Show the full path of commands.
Usage:
which [<options>...] [<command-name>...]
Options:
-h, --help Print this help and exit.
-V, --version Print the version info and exit.
-a, --all Print *all* matching paths.
-v, --verbose Print out how matches were located and
show near misses on stderr.
-q, --quiet Just print out matches. I.e., do not print out
near misses.
-p <altpath>, --path=<altpath>
An alternative path (list of directories) may
be specified for searching.
-e <exts>, --exts=<exts>
Specify a list of extensions to consider instead
of the usual list (';'-separate list, Windows
only).
Show the full path to the program that would be run for each given
command name, if any. Which, like GNU's which, returns the number of
failed arguments, or -1 when no <command-name> was given.
Near misses include duplicates, non-regular files and (on Un*x)
files without executable access.
"""
__revision__ = "$Id$"
__version_info__ = (1, 1, 3)
__version__ = '.'.join(map(str, __version_info__))
__all__ = ["which", "whichall", "whichgen", "WhichError"]
import os
import sys
import getopt
import stat
#---- exceptions
class WhichError(Exception):
pass
#---- internal support stuff
def _getRegisteredExecutable(exeName):
"""Windows allow application paths to be registered in the registry."""
registered = None
if sys.platform.startswith('win'):
if os.path.splitext(exeName)[1].lower() != '.exe':
exeName += '.exe'
import _winreg
try:
key = "SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths\\" +\
exeName
value = _winreg.QueryValue(_winreg.HKEY_LOCAL_MACHINE, key)
registered = (value, "from HKLM\\"+key)
except _winreg.error:
pass
if registered and not os.path.exists(registered[0]):
registered = None
return registered
def _samefile(fname1, fname2):
if sys.platform.startswith('win'):
return ( os.path.normpath(os.path.normcase(fname1)) ==\
os.path.normpath(os.path.normcase(fname2)) )
else:
return os.path.samefile(fname1, fname2)
def _cull(potential, matches, verbose=0):
"""Cull inappropriate matches. Possible reasons:
- a duplicate of a previous match
- not a disk file
- not executable (non-Windows)
If 'potential' is approved it is returned and added to 'matches'.
Otherwise, None is returned.
"""
for match in matches: # don't yield duplicates
if _samefile(potential[0], match[0]):
if verbose:
sys.stderr.write("duplicate: %s (%s)\n" % potential)
return None
else:
if not stat.S_ISREG(os.stat(potential[0]).st_mode):
if verbose:
sys.stderr.write("not a regular file: %s (%s)\n" % potential)
elif sys.platform != "win32" \
and not os.access(potential[0], os.X_OK):
if verbose:
sys.stderr.write("no executable access: %s (%s)\n"\
% potential)
else:
matches.append(potential)
return potential
#---- module API
def whichgen(command, path=None, verbose=0, exts=None):
"""Return a generator of full paths to the given command.
"command" is a the name of the executable to search for.
"path" is an optional alternate path list to search. The default it
to use the PATH environment variable.
"verbose", if true, will cause a 2-tuple to be returned for each
match. The second element is a textual description of where the
match was found.
"exts" optionally allows one to specify a list of extensions to use
instead of the standard list for this system. This can
effectively be used as an optimization to, for example, avoid
stat's of "foo.vbs" when searching for "foo" and you know it is
not a VisualBasic script but ".vbs" is on PATHEXT. This option
is only supported on Windows.
This method returns a generator which yields either full paths to
the given command or, if verbose, tuples of the form (<path to
command>, <where path found>).
"""
matches = []
if path is None:
usingGivenPath = 0
path = os.environ.get("PATH", "").split(os.pathsep)
if sys.platform.startswith("win"):
path.insert(0, os.curdir) # implied by Windows shell
else:
usingGivenPath = 1
# Windows has the concept of a list of extensions (PATHEXT env var).
if sys.platform.startswith("win"):
if exts is None:
exts = os.environ.get("PATHEXT", "").split(os.pathsep)
# If '.exe' is not in exts then obviously this is Win9x and
# or a bogus PATHEXT, then use a reasonable default.
for ext in exts:
if ext.lower() == ".exe":
break
else:
exts = ['.COM', '.EXE', '.BAT']
elif not isinstance(exts, list):
raise TypeError("'exts' argument must be a list or None")
else:
if exts is not None:
raise WhichError("'exts' argument is not supported on "\
"platform '%s'" % sys.platform)
exts = []
# File name cannot have path separators because PATH lookup does not
# work that way.
if os.sep in command or os.altsep and os.altsep in command:
if os.path.exists(command):
match = _cull((command, "explicit path given"), matches, verbose)
if verbose:
yield match
else:
yield match[0]
else:
for i in range(len(path)):
dirName = path[i]
# On windows the dirName *could* be quoted, drop the quotes
if sys.platform.startswith("win") and len(dirName) >= 2\
and dirName[0] == '"' and dirName[-1] == '"':
dirName = dirName[1:-1]
for ext in ['']+exts:
absName = os.path.abspath(
os.path.normpath(os.path.join(dirName, command+ext)))
if os.path.isfile(absName):
if usingGivenPath:
fromWhere = "from given path element %d" % i
elif not sys.platform.startswith("win"):
fromWhere = "from PATH element %d" % i
elif i == 0:
fromWhere = "from current directory"
else:
fromWhere = "from PATH element %d" % (i-1)
match = _cull((absName, fromWhere), matches, verbose)
if match:
if verbose:
yield match
else:
yield match[0]
match = _getRegisteredExecutable(command)
if match is not None:
match = _cull(match, matches, verbose)
if match:
if verbose:
yield match
else:
yield match[0]
def which(command, path=None, verbose=0, exts=None):
"""Return the full path to the first match of the given command on
the path.
"command" is a the name of the executable to search for.
"path" is an optional alternate path list to search. The default it
to use the PATH environment variable.
"verbose", if true, will cause a 2-tuple to be returned. The second
element is a textual description of where the match was found.
"exts" optionally allows one to specify a list of extensions to use
instead of the standard list for this system. This can
effectively be used as an optimization to, for example, avoid
stat's of "foo.vbs" when searching for "foo" and you know it is
not a VisualBasic script but ".vbs" is on PATHEXT. This option
is only supported on Windows.
If no match is found for the command, a WhichError is raised.
"""
try:
match = whichgen(command, path, verbose, exts).next()
except StopIteration:
raise WhichError("Could not find '%s' on the path." % command)
return match
def whichall(command, path=None, verbose=0, exts=None):
"""Return a list of full paths to all matches of the given command
on the path.
"command" is a the name of the executable to search for.
"path" is an optional alternate path list to search. The default it
to use the PATH environment variable.
"verbose", if true, will cause a 2-tuple to be returned for each
match. The second element is a textual description of where the
match was found.
"exts" optionally allows one to specify a list of extensions to use
instead of the standard list for this system. This can
effectively be used as an optimization to, for example, avoid
stat's of "foo.vbs" when searching for "foo" and you know it is
not a VisualBasic script but ".vbs" is on PATHEXT. This option
is only supported on Windows.
"""
return list( whichgen(command, path, verbose, exts) )
#---- mainline
def main(argv):
all = 0
verbose = 0
altpath = None
exts = None
try:
optlist, args = getopt.getopt(argv[1:], 'haVvqp:e:',
['help', 'all', 'version', 'verbose', 'quiet', 'path=', 'exts='])
except getopt.GetoptError as msg:
sys.stderr.write("which: error: %s. Your invocation was: %s\n"\
% (msg, argv))
sys.stderr.write("Try 'which --help'.\n")
return 1
for opt, optarg in optlist:
if opt in ('-h', '--help'):
print(_cmdlnUsage)
return 0
elif opt in ('-V', '--version'):
print("which %s" % __version__)
return 0
elif opt in ('-a', '--all'):
all = 1
elif opt in ('-v', '--verbose'):
verbose = 1
elif opt in ('-q', '--quiet'):
verbose = 0
elif opt in ('-p', '--path'):
if optarg:
altpath = optarg.split(os.pathsep)
else:
altpath = []
elif opt in ('-e', '--exts'):
if optarg:
exts = optarg.split(os.pathsep)
else:
exts = []
if len(args) == 0:
return -1
failures = 0
for arg in args:
#print "debug: search for %r" % arg
nmatches = 0
for match in whichgen(arg, path=altpath, verbose=verbose, exts=exts):
if verbose:
print("%s (%s)" % match)
else:
print(match)
nmatches += 1
if not all:
break
if not nmatches:
failures += 1
return failures
if __name__ == "__main__":
sys.exit( main(sys.argv) )
|
|
from itertools import product as cartes
from sympy import (limit, exp, oo, log, sqrt, Limit, sin, floor, cos, ceiling,
atan, gamma, Symbol, S, pi, Integral, cot, Rational, I, zoo,
tan, cot, integrate, Sum, sign)
from sympy.series.limits import heuristics
from sympy.series.order import Order
from sympy.abc import x, y, z
from sympy.utilities.pytest import XFAIL, raises
def test_basic1():
assert limit(x, x, oo) == oo
assert limit(x, x, -oo) == -oo
assert limit(-x, x, oo) == -oo
assert limit(x**2, x, -oo) == oo
assert limit(-x**2, x, oo) == -oo
assert limit(x*log(x), x, 0, dir="+") == 0
assert limit(1/x, x, oo) == 0
assert limit(exp(x), x, oo) == oo
assert limit(-exp(x), x, oo) == -oo
assert limit(exp(x)/x, x, oo) == oo
assert limit(1/x - exp(-x), x, oo) == 0
assert limit(x + 1/x, x, oo) == oo
assert limit(x - x**2, x, oo) == -oo
assert limit((1 + x)**(1 + sqrt(2)), x, 0) == 1
assert limit((1 + x)**oo, x, 0) == oo
assert limit((1 + x)**oo, x, 0, dir='-') == 0
assert limit((1 + x + y)**oo, x, 0, dir='-') == (1 + y)**(oo)
assert limit(y/x/log(x), x, 0) == -oo*sign(y)
assert limit(cos(x + y)/x, x, 0) == sign(cos(y))*oo
raises(NotImplementedError, lambda: limit(Sum(1/x, (x, 1, y)) -
log(y), y, oo))
raises(NotImplementedError, lambda: limit(Sum(1/x, (x, 1, y)) - 1/y, y, oo))
assert limit(gamma(1/x + 3), x, oo) == 2
assert limit(S.NaN, x, -oo) == S.NaN
assert limit(Order(2)*x, x, S.NaN) == S.NaN
assert limit(gamma(1/x + 3), x, oo) == 2
assert limit(S.NaN, x, -oo) == S.NaN
assert limit(Order(2)*x, x, S.NaN) == S.NaN
assert limit(1/(x - 1), x, 1, dir="+") == oo
assert limit(1/(x - 1), x, 1, dir="-") == -oo
assert limit(1/(5 - x)**3, x, 5, dir="+") == -oo
assert limit(1/(5 - x)**3, x, 5, dir="-") == oo
assert limit(1/sin(x), x, pi, dir="+") == -oo
assert limit(1/sin(x), x, pi, dir="-") == oo
assert limit(1/cos(x), x, pi/2, dir="+") == -oo
assert limit(1/cos(x), x, pi/2, dir="-") == oo
assert limit(1/tan(x**3), x, (2*pi)**(S(1)/3), dir="+") == oo
assert limit(1/tan(x**3), x, (2*pi)**(S(1)/3), dir="-") == -oo
assert limit(1/cot(x)**3, x, (3*pi/2), dir="+") == -oo
assert limit(1/cot(x)**3, x, (3*pi/2), dir="-") == oo
# approaching 0
# from dir="+"
assert limit(1 + 1/x, x, 0) == oo
# from dir='-'
# Add
assert limit(1 + 1/x, x, 0, dir='-') == -oo
# Pow
assert limit(x**(-2), x, 0, dir='-') == oo
assert limit(x**(-3), x, 0, dir='-') == -oo
assert limit(1/sqrt(x), x, 0, dir='-') == (-oo)*I
assert limit(x**2, x, 0, dir='-') == 0
assert limit(sqrt(x), x, 0, dir='-') == 0
assert limit(x**-pi, x, 0, dir='-') == oo*sign((-1)**(-pi))
assert limit((1 + cos(x))**oo, x, 0) == oo
def test_basic2():
assert limit(x**x, x, 0, dir="+") == 1
assert limit((exp(x) - 1)/x, x, 0) == 1
assert limit(1 + 1/x, x, oo) == 1
assert limit(-exp(1/x), x, oo) == -1
assert limit(x + exp(-x), x, oo) == oo
assert limit(x + exp(-x**2), x, oo) == oo
assert limit(x + exp(-exp(x)), x, oo) == oo
assert limit(13 + 1/x - exp(-x), x, oo) == 13
def test_basic3():
assert limit(1/x, x, 0, dir="+") == oo
assert limit(1/x, x, 0, dir="-") == -oo
def test_basic4():
assert limit(2*x + y*x, x, 0) == 0
assert limit(2*x + y*x, x, 1) == 2 + y
assert limit(2*x**8 + y*x**(-3), x, -2) == 512 - y/8
assert limit(sqrt(x + 1) - sqrt(x), x, oo) == 0
assert integrate(1/(x**3 + 1), (x, 0, oo)) == 2*pi*sqrt(3)/9
def test_issue_3885():
assert limit(x*y + x*z, z, 2) == x*y + 2*x
def test_Limit():
assert Limit(sin(x)/x, x, 0) != 1
assert Limit(sin(x)/x, x, 0).doit() == 1
def test_floor():
assert limit(floor(x), x, -2, "+") == -2
assert limit(floor(x), x, -2, "-") == -3
assert limit(floor(x), x, -1, "+") == -1
assert limit(floor(x), x, -1, "-") == -2
assert limit(floor(x), x, 0, "+") == 0
assert limit(floor(x), x, 0, "-") == -1
assert limit(floor(x), x, 1, "+") == 1
assert limit(floor(x), x, 1, "-") == 0
assert limit(floor(x), x, 2, "+") == 2
assert limit(floor(x), x, 2, "-") == 1
assert limit(floor(x), x, 248, "+") == 248
assert limit(floor(x), x, 248, "-") == 247
def test_floor_requires_robust_assumptions():
assert limit(floor(sin(x)), x, 0, "+") == 0
assert limit(floor(sin(x)), x, 0, "-") == -1
assert limit(floor(cos(x)), x, 0, "+") == 0
assert limit(floor(cos(x)), x, 0, "-") == 0
assert limit(floor(5 + sin(x)), x, 0, "+") == 5
assert limit(floor(5 + sin(x)), x, 0, "-") == 4
assert limit(floor(5 + cos(x)), x, 0, "+") == 5
assert limit(floor(5 + cos(x)), x, 0, "-") == 5
def test_ceiling():
assert limit(ceiling(x), x, -2, "+") == -1
assert limit(ceiling(x), x, -2, "-") == -2
assert limit(ceiling(x), x, -1, "+") == 0
assert limit(ceiling(x), x, -1, "-") == -1
assert limit(ceiling(x), x, 0, "+") == 1
assert limit(ceiling(x), x, 0, "-") == 0
assert limit(ceiling(x), x, 1, "+") == 2
assert limit(ceiling(x), x, 1, "-") == 1
assert limit(ceiling(x), x, 2, "+") == 3
assert limit(ceiling(x), x, 2, "-") == 2
assert limit(ceiling(x), x, 248, "+") == 249
assert limit(ceiling(x), x, 248, "-") == 248
def test_ceiling_requires_robust_assumptions():
assert limit(ceiling(sin(x)), x, 0, "+") == 1
assert limit(ceiling(sin(x)), x, 0, "-") == 0
assert limit(ceiling(cos(x)), x, 0, "+") == 1
assert limit(ceiling(cos(x)), x, 0, "-") == 1
assert limit(ceiling(5 + sin(x)), x, 0, "+") == 6
assert limit(ceiling(5 + sin(x)), x, 0, "-") == 5
assert limit(ceiling(5 + cos(x)), x, 0, "+") == 6
assert limit(ceiling(5 + cos(x)), x, 0, "-") == 6
def test_atan():
x = Symbol("x", real=True)
assert limit(atan(x)*sin(1/x), x, 0) == 0
assert limit(atan(x) + sqrt(x + 1) - sqrt(x), x, oo) == pi/2
def test_abs():
assert limit(abs(x), x, 0) == 0
assert limit(abs(sin(x)), x, 0) == 0
assert limit(abs(cos(x)), x, 0) == 1
assert limit(abs(sin(x + 1)), x, 0) == sin(1)
def test_heuristic():
x = Symbol("x", real=True)
assert heuristics(sin(1/x) + atan(x), x, 0, '+') == sin(oo)
assert limit(log(2 + sqrt(atan(x))*sqrt(sin(1/x))), x, 0) == log(2)
def test_issue_3871():
z = Symbol("z", positive=True)
f = -1/z*exp(-z*x)
assert limit(f, x, oo) == 0
assert f.limit(x, oo) == 0
def test_exponential():
n = Symbol('n')
x = Symbol('x', real=True)
assert limit((1 + x/n)**n, n, oo) == exp(x)
assert limit((1 + x/(2*n))**n, n, oo) == exp(x/2)
assert limit((1 + x/(2*n + 1))**n, n, oo) == exp(x/2)
assert limit(((x - 1)/(x + 1))**x, x, oo) == exp(-2)
assert limit(1 + (1 + 1/x)**x, x, oo) == 1 + S.Exp1
@XFAIL
def test_exponential2():
n = Symbol('n')
assert limit((1 + x/(n + sin(n)))**n, n, oo) == exp(x)
def test_doit():
f = Integral(2 * x, x)
l = Limit(f, x, oo)
assert l.doit() == oo
@XFAIL
def test_doit2():
f = Integral(2 * x, x)
l = Limit(f, x, oo)
# limit() breaks on the contained Integral.
assert l.doit(deep=False) == l
def test_bug693a():
assert sin(sin(x + 1) + 1).limit(x, 0) == sin(sin(1) + 1)
def test_issue_3792():
assert limit( (1 - cos(x))/x**2, x, S(1)/2) == 4 - 4*cos(S(1)/2)
assert limit(sin(sin(x + 1) + 1), x, 0) == sin(1 + sin(1))
assert limit(abs(sin(x + 1) + 1), x, 0) == 1 + sin(1)
def test_issue_4090():
assert limit(1/(x + 3), x, 2) == S(1)/5
assert limit(1/(x + pi), x, 2) == S(1)/(2 + pi)
assert limit(log(x)/(x**2 + 3), x, 2) == log(2)/7
assert limit(log(x)/(x**2 + pi), x, 2) == log(2)/(4 + pi)
def test_issue_4547():
assert limit(cot(x), x, 0, dir='+') == oo
assert limit(cot(x), x, pi/2, dir='+') == 0
def test_issue_5164():
assert limit(x**0.5, x, oo) == oo**0.5 == oo
assert limit(x**0.5, x, 16) == S(16)**0.5
assert limit(x**0.5, x, 0) == 0
assert limit(x**(-0.5), x, oo) == 0
assert limit(x**(-0.5), x, 4) == S(4)**(-0.5)
def test_issue_5183():
# using list(...) so py.test can recalculate values
tests = list(cartes([x, -x],
[-1, 1],
[2, 3, Rational(1, 2), Rational(2, 3)],
['-', '+']))
results = (oo, oo, -oo, oo, -oo*I, oo, -oo*(-1)**Rational(1, 3), oo,
0, 0, 0, 0, 0, 0, 0, 0,
oo, oo, oo, -oo, oo, -oo*I, oo, -oo*(-1)**Rational(1, 3),
0, 0, 0, 0, 0, 0, 0, 0)
assert len(tests) == len(results)
for i, (args, res) in enumerate(zip(tests, results)):
y, s, e, d = args
eq = y**(s*e)
try:
assert limit(eq, x, 0, dir=d) == res
except AssertionError:
if 0: # change to 1 if you want to see the failing tests
print()
print(i, res, eq, d, limit(eq, x, 0, dir=d))
else:
assert None
def test_issue_5184():
assert limit(sin(x)/x, x, oo) == 0
assert limit(atan(x), x, oo) == pi/2
assert limit(gamma(x), x, oo) == oo
assert limit(cos(x)/x, x, oo) == 0
assert limit(gamma(x), x, Rational(1, 2)) == sqrt(pi)
r = Symbol('r', real=True, finite=True)
assert limit(r*sin(1/r), r, 0) == 0
def test_issue_5229():
assert limit((1 + y)**(1/y) - S.Exp1, y, 0) == 0
def test_issue_4546():
# using list(...) so py.test can recalculate values
tests = list(cartes([cot, tan],
[-pi/2, 0, pi/2, pi, 3*pi/2],
['-', '+']))
results = (0, 0, -oo, oo, 0, 0, -oo, oo, 0, 0,
oo, -oo, 0, 0, oo, -oo, 0, 0, oo, -oo)
assert len(tests) == len(results)
for i, (args, res) in enumerate(zip(tests, results)):
f, l, d = args
eq = f(x)
try:
assert limit(eq, x, l, dir=d) == res
except AssertionError:
if 0: # change to 1 if you want to see the failing tests
print()
print(i, res, eq, l, d, limit(eq, x, l, dir=d))
else:
assert None
def test_issue_3934():
assert limit((1 + x**log(3))**(1/x), x, 0) == 1
assert limit((5**(1/x) + 3**(1/x))**x, x, 0) == 5
def test_calculate_series():
# needs gruntz calculate_series to go to n = 32
assert limit(x**(S(77)/3)/(1 + x**(S(77)/3)), x, oo) == 1
# needs gruntz calculate_series to go to n = 128
assert limit(x**101.1/(1 + x**101.1), x, oo) == 1
def test_issue_5955():
assert limit((x**16)/(1 + x**16), x, oo) == 1
assert limit((x**100)/(1 + x**100), x, oo) == 1
assert limit((x**1885)/(1 + x**1885), x, oo) == 1
assert limit((x**1000/((x + 1)**1000 + exp(-x))), x, oo) == 1
def test_newissue():
assert limit(exp(1/sin(x))/exp(cot(x)), x, 0) == 1
def test_extended_real_line():
assert limit(x - oo, x, oo) == -oo
assert limit(oo - x, x, -oo) == oo
assert limit(x**2/(x - 5) - oo, x, oo) == -oo
assert limit(1/(x + sin(x)) - oo, x, 0) == -oo
assert limit(oo/x, x, oo) == oo
assert limit(x - oo + 1/x, x, oo) == -oo
assert limit(x - oo + 1/x, x, 0) == -oo
@XFAIL
def test_order_oo():
from sympy import C
x = Symbol('x', positive=True, finite=True)
assert C.Order(x)*oo != C.Order(1, x)
assert limit(oo/(x**2 - 4), x, oo) == oo
def test_issue_5436():
raises(NotImplementedError, lambda: limit(exp(x*y), x, oo))
raises(NotImplementedError, lambda: limit(exp(-x*y), x, oo))
def test_Limit_dir():
raises(TypeError, lambda: Limit(x, x, 0, dir=0))
raises(ValueError, lambda: Limit(x, x, 0, dir='0'))
def test_polynomial():
assert limit((x + 1)**1000/((x + 1)**1000 + 1), x, oo) == 1
assert limit((x + 1)**1000/((x + 1)**1000 + 1), x, -oo) == 1
def test_rational():
assert limit(1/y - (1/(y + x) + x/(y + x)/y)/z, x, oo) == (z - 1)/(y*z)
assert limit(1/y - (1/(y + x) + x/(y + x)/y)/z, x, -oo) == (z - 1)/(y*z)
def test_issue_5740():
assert limit(log(x)*z - log(2*x)*y, x, 0) == oo*sign(y - z)
def test_issue_6366():
n = Symbol('n', integer=True, positive=True)
r = (n + 1)*x**(n + 1)/(x**(n + 1) - 1) - x/(x - 1)
assert limit(r, x, 1).simplify() == n/2
def test_factorial():
from sympy import factorial, E
f = factorial(x)
assert limit(f, x, oo) == oo
assert limit(x/f, x, oo) == 0
# see Stirling's approximation:
# http://en.wikipedia.org/wiki/Stirling's_approximation
assert limit(f/(sqrt(2*pi*x)*(x/E)**x), x, oo) == 1
assert limit(f, x, -oo) == factorial(-oo)
assert limit(f, x, x**2) == factorial(x**2)
assert limit(f, x, -x**2) == factorial(-x**2)
def test_issue_6560():
e = 5*x**3/4 - 3*x/4 + (y*(3*x**2/2 - S(1)/2) + \
35*x**4/8 - 15*x**2/4 + S(3)/8)/(2*(y + 1))
assert limit(e, y, oo) == (5*x**3 + 3*x**2 - 3*x - 1)/4
def test_issue_5172():
n = Symbol('n')
r = Symbol('r', positive=True)
c = Symbol('c')
p = Symbol('p', positive=True)
m = Symbol('m', negative=True)
expr = ((2*n*(n - r + 1)/(n + r*(n - r + 1)))**c + \
(r - 1)*(n*(n - r + 2)/(n + r*(n - r + 1)))**c - n)/(n**c - n)
expr = expr.subs(c, c + 1)
raises(NotImplementedError, lambda: limit(expr, n, oo))
assert limit(expr.subs(c, m), n, oo) == 1
assert limit(expr.subs(c, p), n, oo).simplify() == \
(2**(p + 1) + r - 1)/(r + 1)**(p + 1)
def test_issue_7088():
a = Symbol('a')
assert limit(sqrt(x/(x + a)), x, oo) == 1
def test_issue_6364():
a = Symbol('a')
e = z/(1 - sqrt(1 + z)*sin(a)**2 - sqrt(1 - z)*cos(a)**2)
assert limit(e, z, 0).simplify() == 2/cos(2*a)
def test_issue_4099():
a = Symbol('a')
assert limit(a/x, x, 0) == oo*sign(a)
assert limit(-a/x, x, 0) == -oo*sign(a)
assert limit(-a*x, x, oo) == -oo*sign(a)
assert limit(a*x, x, oo) == oo*sign(a)
def test_issue_4503():
dx = Symbol('dx')
assert limit((sqrt(1 + exp(x + dx)) - sqrt(1 + exp(x)))/dx, dx, 0) == \
exp(x)/(2*sqrt(exp(x) + 1))
|
|
from . import Pointer, Structure, Reference, Index, const_index, is_aggregate
from .. import llvm
import ctypes
try:
import numpy as np
except ImportError:
np = None
__all__ = ["Any", "Array", "FastSlice", "Slice"]
class _AnyClass(object):
def __repr__(self):
return "Any"
#: Used in in slice or array specification to indicate variable shape dimension.
Any = _AnyClass()
class _ItemAccessor(object):
"""Mixin for common Array/Slice item accessing routines."""
def emit_getitem(self, builder, v, i):
if len(i) < len(self.shape):
return self._emit_subslice(builder, v, i)
else:
gep = self._item_gep(builder, v, i)
if is_aggregate(self.element_type):
return gep, Reference(self.element_type)
else:
v = llvm.BuildLoad(builder, gep, "getitem")
set_tbaa(v, "n2o.{0}.element".format(self.tag))
return v, self.element_type
def emit_setitem(self, builder, v, i, e):
if not llvm.types_equal(self.element_type.llvm_type, llvm.TypeOf(e)):
# FIXME because we don't have e's nitrous type, for now just state
# what the type *should* be for assignment to succeed.
raise TypeError("Element value must be a(n) {0}".format(self.element_type))
gep = self._item_gep(builder, v, i)
v = llvm.BuildStore(builder, e, gep)
set_tbaa(v, "n2o.{0}.element".format(self.tag))
def _emit_subslice(self, builder, v, i):
"""Emits a sub-slice based on partial index *i*"""
from ..function import entry_alloca
SSTy = Slice(self.element_type, self.shape[len(i):])
ss = entry_alloca(builder, SSTy.llvm_type, "subslice")
# Setting shape dimensions
subshape, subshape_ty = SSTy._struct.emit_getattr(builder, ss, "shape")
# shape is a reference
shape, shape_ty = self.emit_getattr(builder, v, "shape")
for j in range(len(self.shape) - len(i)):
dim, _ = shape_ty.value_type.emit_getitem(builder, shape, (const_index(j + len(i)),))
subshape_ty.value_type.emit_setitem(builder, subshape, (const_index(j),), dim)
# Setting pointer to data sub-block.
data_idx = i + (const_index(0),) * (len(self.shape) - len(i))
SSTy._struct.emit_setattr(builder, ss, "data", self._item_gep(builder, v, data_idx))
return ss, SSTy
class Array(_ItemAccessor):
"""Array backed by llvm.ArrayType rather than pointer to memory.
This enables us to declare it as an aggregate type which can be returned by value.
TODO describe constructor initialization etc.
"""
def __init__(self, element_type, shape):
self.element_type = element_type
self.shape = shape
def __repr__(self):
return "Array({0}, shape={1})".format(self.element_type, repr(self.shape))
def __str__(self):
return "<Array {0}>".format(shape_str(self.element_type, self.shape))
def __call__(self, values=None):
from nitrous.lib import ValueEmitter
from nitrous.function import entry_alloca
from itertools import product
def emit(builder):
v = entry_alloca(builder, self.llvm_type, "v.array")
if values is not None:
for i in product(*(range(d) for d in self.shape)):
ii = tuple(const_index(j) for j in i)
vi = values
for k in i:
vi = vi[k]
self.emit_setitem(builder, v, ii, vi)
return v, Reference(self)
return ValueEmitter(emit)
@property
def llvm_type(self):
from operator import mul
n = reduce(mul, self.shape, 1)
return llvm.ArrayType(self.element_type.llvm_type, n)
@property
def c_type(self):
from operator import mul
return reduce(mul, self.shape[::-1], self.element_type.c_type)
@property
def tag(self):
shape_tag = "".join("d{0}".format(d) for d in self.shape)
return "A{0}{1}".format(shape_tag, self.element_type.tag)
def convert(self, p):
if np and isinstance(p, np.ndarray):
p = np.ctypeslib.as_ctypes(p)
return p
def emit_getattr(self, builder, ref, attr):
ndim = len(self.shape)
if attr == "ndim":
return const_index(ndim), Index
elif attr == "shape":
# First time, initialize a global constant array
# and then use it on every access.
module = llvm.GetParentModule__(builder)
shape_name = "__n2o_array_shape_{0}".format(id(self))
shape = llvm.GetNamedGlobal(module, shape_name)
if not shape:
dims = (llvm.ValueRef * ndim)(*(const_index(d) for d in self.shape))
shape_init = llvm.ConstArray(Index.llvm_type, dims, ndim)
shape = llvm.AddGlobal(module, llvm.TypeOf(shape_init), shape_name)
llvm.SetInitializer(shape, shape_init)
llvm.SetGlobalConstant(shape, llvm.TRUE)
return shape, Array(Index, (ndim,))
else:
raise AttributeError(attr)
def _item_gep(self, builder, v, i):
if len(i) != len(self.shape):
raise TypeError("Index and array shapes don't match ({0} != {1})"
.format(len(i), len(self.shape)))
# TODO check const shape dimension values?
# Build conversion from ND-index to flat memory offset
# FIXME currently assumes row-major memory alignment, first dimension can vary
const_shape = map(const_index, self.shape[1:])
ii = flatten_index(builder, i, const_shape)
# Cast so that we can get GEP to a particular element.
p_type = llvm.PointerType(self.element_type.llvm_type, 0)
p = llvm.BuildPointerCast(builder, v, p_type, "array.ptr")
return llvm.BuildGEP(builder, p, ctypes.byref(ii), 1, "addr")
class FastSlice(_ItemAccessor):
def __init__(self, element_type, shape=(Any,)):
self.element_type = element_type
self.shape = shape
self.ndim = len(shape)
def __repr__(self):
return "FastSlice({0}, shape={1})".format(self.element_type, repr(self.shape))
def __str__(self):
return "<FastSlice {0}>".format(shape_str(self.element_type, self.shape))
@property
def llvm_type(self):
return llvm.PointerType(self.element_type.llvm_type, 0)
@property
def c_type(self):
return ctypes.POINTER(self.element_type.c_type)
@property
def tag(self):
shape_tag = "".join("d{0}".format(d) for d in self.shape)
return "F{0}{1}".format(shape_tag, self.element_type.tag)
def convert(self, p):
pointer_type = ctypes.POINTER(self.element_type.c_type)
# FIXME conversions are unsafe, since they force-cast
# anything to pointer to element_type.
if np and isinstance(p, np.ndarray):
return p.ctypes.data_as(pointer_type)
return ctypes.cast(p, pointer_type)
def emit_getattr(self, builder, ref, attr):
ndim = len(self.shape)
if attr == "ndim":
return const_index(ndim), Index
elif attr == "shape":
# First time, initialize a global constant array
# and then use it on every access.
module = llvm.GetParentModule__(builder)
shape_name = "__n2o_slice_shape_{0}".format(id(self))
shape = llvm.GetNamedGlobal(module, shape_name)
if not shape:
dims = (llvm.ValueRef * ndim)(*(const_index(d) for d in self.shape))
shape_init = llvm.ConstArray(Index.llvm_type, dims, ndim)
shape = llvm.AddGlobal(module, llvm.TypeOf(shape_init), shape_name)
llvm.SetInitializer(shape, shape_init)
llvm.SetGlobalConstant(shape, llvm.TRUE)
return shape, Array(Index, (ndim,))
else:
raise AttributeError(attr)
def _item_gep(self, builder, v, i):
if len(i) != len(self.shape):
raise TypeError("Index and array shapes don't match ({0} != {1})"
.format(len(i), len(self.shape)))
# TODO check const shape dimension values?
# Build conversion from ND-index to flat memory offset
# FIXME currently assumes row-major memory alignment, first dimension can vary
const_shape = [const_index(d) for d in self.shape[1:]]
ii = flatten_index(builder, i, const_shape)
return llvm.BuildGEP(builder, v, ctypes.byref(ii), 1, "addr")
_slice_types = {}
class Slice(_ItemAccessor):
# Wraps incoming np.array or ctypes array into a structure
# with standard shape/number-of-dimensions attributes that can be
# used from compiled function.
#
# The resulting structure supports getitem/setitem so that there's
# no need to address it's `data` attribute.
def __init__(self, element_type, shape=(Any,)):
self.element_type = element_type
self.shape = shape
# Prevent distinct slice LLVM types being allocated every single
# time one declares them. This is a problem in places like
# templates where only the data types being passed in and slice
# type gets derived from it. Key types on their data type and shape.
k = (llvm.address_of(element_type.llvm_type), shape)
try:
self._struct = _slice_types[k]
except KeyError:
self._struct = _slice_types.setdefault(
k, Structure("Slice",
("data", Pointer(element_type)),
("shape", Array(Index, (len(shape),))))
)
def __repr__(self):
return "Slice({0}, shape={1})".format(self.element_type, repr(self.shape))
def __str__(self):
return "<Slice {0}>".format(shape_str(self.element_type, self.shape))
@property
def llvm_type(self):
return self._struct.llvm_type
@property
def c_type(self):
return self._struct.c_type
@property
def tag(self):
shape_tag = "".join("d{0}".format(d) for d in self.shape)
return "B{0}{1}".format(shape_tag, self.element_type.tag)
def convert(self, p):
pointer_type = ctypes.POINTER(self.element_type.c_type)
# FIXME conversions are unsafe, since they force-cast
# anything to pointer to element_type.
if np and isinstance(p, np.ndarray):
return self._struct.c_type(p.ctypes.data_as(pointer_type),
(Index.c_type * len(p.shape))(*p.shape))
shape = ctypes_shape(p)
conv_p = ctypes.cast(p, pointer_type)
return self._struct.c_type(conv_p, (Index.c_type * len(shape))(*shape))
def emit_getattr(self, builder, ref, attr):
if attr == "ndim":
return const_index(len(self.shape)), None
elif attr in ("shape", "data"):
v, t = self._struct.emit_getattr(builder, ref, attr)
set_tbaa(v, "n2o.{0}.{1}".format(self.tag, attr))
return v, t
else:
raise AttributeError(attr)
def emit_setattr(self, builder, ref, attr, v):
raise TypeError("Slice is immutable")
def _item_gep(self, builder, v, i):
if len(i) != len(self.shape):
raise TypeError("Index and slice shapes don't match ({0} != {1})"
.format(len(i), len(self.shape)))
# Get array shape from struct value
shape_value, shape_type = self.emit_getattr(builder, v, "shape")
data_value, data_type = self.emit_getattr(builder, v, "data")
def emit_dimension(i):
# Use direct constants, if possible; otherwise load from actual shape array.
if self.shape[i] == Any:
# Shape type is a reference to array, use the actual type
dim, _ = shape_type.value_type.emit_getitem(builder, shape_value, (const_index(i),))
else:
dim = const_index(self.shape[i])
return dim
# Build conversion from ND-index to flat memory offset
# FIXME currently assumes row-major memory alignment, first dimension can vary
const_shape = [emit_dimension(d) for d in range(1, len(self.shape))]
ii = flatten_index(builder, i, const_shape)
return llvm.BuildGEP(builder, data_value, ctypes.byref(ii), 1, "addr")
def flatten_index(builder, index, const_shape):
"""Converts N-dimensional index into 1-dimensional one.
index is of a form ``(i0, i1, ... iN)``, where *i* is ValueRefs
holding individual dimension indices.
First dimension is considered to be variable. Given array shape
``(d0, d1, ... dN)``, *const_shape* contains ``(d1, d2, ... dN)``.
If array is 1-dimensional, *const_shape* is an empty tuple.
"""
mul_ = lambda x, y: llvm.BuildMul(builder, x, y, "v")
# out = 0
out = const_index(0)
for i in range(0, len(const_shape)):
# out += index[i-1] * reduce(mul, const_shape[i:], 1)
tmp = reduce(mul_, const_shape[i:], const_index(1))
rhs = llvm.BuildMul(builder, index[i], tmp, "v")
out = llvm.BuildAdd(builder, out, rhs, "v")
# return out + index[-1]
return llvm.BuildAdd(builder, out, index[-1], "v")
def ctypes_shape(x):
"""Infer shape of a ctypes array."""
try:
dim = x._length_
return (dim,) + ctypes_shape(x[0])
except AttributeError:
return ()
def shape_str(element_type, shape):
"""Return human-friendly description of array shape."""
dim_0 = "?" if shape[0] in (Any, None) else shape[0]
sub_shape = element_type if len(shape) == 1 else shape_str(element_type, shape[1:])
return "[{0} x {1}]".format(dim_0, sub_shape)
def set_tbaa(v, name):
root = llvm.MDNode__((llvm.ValueRef * 1)(llvm.MDString("n2o.tbaa", 8)), 1)
node = llvm.MDNode__((llvm.ValueRef * 2)(llvm.MDString(name, len(name)), root), 2)
llvm.SetNamedMetadata__(v, "tbaa", node)
|
|
# @HEADER
# ************************************************************************
#
# TriBITS: Tribal Build, Integrate, and Test System
# Copyright 2013 Sandia Corporation
#
# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
# the U.S. Government retains certain rights in this software.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the Corporation nor the names of the
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ************************************************************************
# @HEADER
try:
# Python 2
from urllib2 import urlopen
except ImportError:
# Python 3
from urllib.request import urlopen
import json
import datetime
import pprint
from FindGeneralScriptSupport import *
pp = pprint.PrettyPrinter()
# Validate a date format
def validateYYYYMMDD(dateText):
try:
return datetime.datetime.strptime(dateText, '%Y-%m-%d')
except ValueError:
raise ValueError("Incorrect data format for '"+dateText+"', should be YYYY-MM-DD")
# Construct the full query URL given the pieces
def getCDashIndexQueryUrl(cdashUrl, projectName, date, filterFields):
return cdashUrl+"/api/v1/index.php?project="+projectName+"&date="+date \
+ "&"+filterFields
# Given a CDash query URL, return the full Python CDash data-structure
def extractCDashApiQueryData(cdashApiQueryUrl):
response = urlopen(cdashApiQueryUrl)
return json.load(response)
# Collect CDash index.php build summary fields
def collectCDashIndexBuildSummaryFields(fullCDashIndexBuild):
summaryBuild = {
u('buildname') : fullCDashIndexBuild.get('buildname', 'missing_build_name'),
u('update') : \
fullCDashIndexBuild.get('update', {'errors':9999,'this_field_was_missing':1}),
u('configure') : \
fullCDashIndexBuild.get('configure', {'error':9999,'this_field_was_missing':1}),
u('compilation') : \
fullCDashIndexBuild.get('compilation', {'error':9999,'this_field_was_missing':1}),
u('test') : \
fullCDashIndexBuild.get('test', {'fail':9999, 'notrun':9999,'this_field_was_missing':1} ),
}
return summaryBuild
# Given the full Python CDash API builds data-structure returned from the
# CDash index.php page and query, return an reduced data-structure to be used
# for pass/fail examination.
#
# This function takes in the data-structre directly returned from:
#
# <cdash-url>/api/v1/index.php?project=<project>&date=<YYYY-MM-DD>&<filter-fields>
#
# The input full CDash API collapsed builds data-structure that has the
# following structure and fields of interest:
#
# fullCDashIndexBuilds =
# {
# 'all_buildgroups': [ {'id':1,'name:"Nightly"}, ...],
# 'buildgroups': [
# {
# 'builds":[
# {
# 'buildname':"???",
# 'update': {'errors':???, ...},
# 'configure':{'error': ???, ...},
# 'compilation':{'error': ???, ...},
# 'test': {'fail':???, 'notrun':???, 'pass':???, ...},
# ...
# },
# ...
# ]
# },
# ...
# ...
# ]
# },
# ...
# }
#
# This function gets the data from *all* of the collapsed builds and returns
# the reduced data-structure:
#
# [
# {
# 'buildname':"???",
# 'update': {'errors':???, ...},
# 'configure':{'error': ???, ...},
# 'compilation':{'error': ???, ...},
# 'test': {'fail':???, 'notrun':???, 'pass':???, ...},
# ...
# },
# ...
# }
#
# This collects *all* of the builds from all of the build groups, not just the
# 'Nighlty' build group. Therefore, if you want to only consider on set of
# build groups, you need to add that to the CDash query URL
# (e.g. group='Nighlty').
#
def getCDashIndexBuildsSummary(fullCDashIndexBuilds):
summaryCDashIndexBuilds = []
for buildgroup in fullCDashIndexBuilds["buildgroups"]:
for build in buildgroup["builds"]:
summaryBuild = collectCDashIndexBuildSummaryFields(build)
summaryCDashIndexBuilds.append(summaryBuild)
return summaryCDashIndexBuilds
# Return if a CDash Index build passes
def cdashIndexBuildPasses(cdashIndexBuild):
if cdashIndexBuild['update']['errors'] > 0:
return False
if cdashIndexBuild['configure']['error'] > 0:
return False
if cdashIndexBuild['compilation']['error'] > 0:
return False
if (cdashIndexBuild['test']['fail'] + cdashIndexBuild['test']['notrun']) > 0:
return False
return True
# Return if a list of CDash builds pass or fail and return error string if
# they fail.
def cdashIndexBuildsPass(summaryCDashIndexBuilds):
buildsPass = True
buildFailedMsg = ""
for build in summaryCDashIndexBuilds:
if not cdashIndexBuildPasses(build):
buildsPass = False
buildFailedMsg = "Error, the build " + sorted_dict_str(build) + " failed!"
break
return (buildsPass, buildFailedMsg)
# Extract the set of build names from a list of build names
def getCDashIndexBuildNames(summaryCDashIndexBuilds):
buildNames = []
for build in summaryCDashIndexBuilds:
buildNames.append(build['buildname'])
return buildNames
# Return if all of the expected builds exist and an error message if they
# don't.
def doAllExpectedBuildsExist(buildNames, expectedBuildNames):
allExpectedBuildsExist = True
errMsg = ""
for expectedBuildName in expectedBuildNames:
if findInSequence(buildNames, expectedBuildName) == -1:
allExpectedBuildsExist = False
errMsg = "Error, the expected build '"+expectedBuildName+"'" \
+" does not exist in the list of builds "+str(buildNames)
break
return (allExpectedBuildsExist, errMsg)
# Return if a list of summary CDash index.php builds pass and has all of the
# expected builds.
def cdashIndexBuildsPassAndExpectedExist(summaryCDashIndexBuilds,
expectedBuildNames \
):
cdashIndexBuildsPassAndExpectedExist_pass = True
errMsg = ""
# Check that all of the builds pass!
if cdashIndexBuildsPassAndExpectedExist_pass:
(buildsPass, buildFailedMsg) = cdashIndexBuildsPass(summaryCDashIndexBuilds)
if not buildsPass:
cdashIndexBuildsPassAndExpectedExist_pass = False
errMsg = buildFailedMsg
# Check that all of the expected builds are listed
if cdashIndexBuildsPassAndExpectedExist_pass:
buildNames = getCDashIndexBuildNames(summaryCDashIndexBuilds)
(allExpectedBuildsExist, errMsg) = \
doAllExpectedBuildsExist(buildNames, expectedBuildNames)
if not allExpectedBuildsExist:
cdashIndexBuildsPassAndExpectedExist_pass = False
errMsg = errMsg
return (cdashIndexBuildsPassAndExpectedExist_pass, errMsg)
# Determine if CDash index.php query builds all pass and has all expected
# builds.
def queryCDashAndDeterminePassFail(cdashUrl, projectName, date, filterFields,
expectedBuildNames, printCDashUrl=True,
extractCDashApiQueryData_in=extractCDashApiQueryData \
):
# Get the query data
cdashQueryUrl = getCDashIndexQueryUrl(cdashUrl, projectName, date, filterFields)
if printCDashUrl:
print("Getting data from:\n\n " + cdashQueryUrl )
fullCDashIndexBuilds = extractCDashApiQueryData_in(cdashQueryUrl)
summaryCDashIndexBuilds = getCDashIndexBuildsSummary(fullCDashIndexBuilds)
# Determine pass/fail
(cdashIndexBuildsPassAndExpectedExist_pass, errMsg) = \
cdashIndexBuildsPassAndExpectedExist(summaryCDashIndexBuilds, expectedBuildNames)
if not cdashIndexBuildsPassAndExpectedExist_pass:
return (False, errMsg)
return (True, "")
|
|
import warnings
import numpy as np
from scipy import linalg
from scipy import stats
from numpy.linalg import svd
class Model(object):
name = 'Model'
status_need_for_eval = 0
""" Base class for a model. Actual models should inherit from this class.
In this class the functions that should be implemented by each model are
defined.
Attributes
----------
name : str
Name of the model.
status : int
Indicates the status of the model:
-1 : Instance created. Not filled with values yet.
0 : Filled with values
1 : Filled with values and x0 set (optional level).
"""
def __init__(self):
self.status = -1
self.has_background = False
def initialize(self):
"""This function should be called with all needed values. To actually
fill all the models with values.
"""
self.status = 0
def evaluate(self):
"""Evaluates the model.
Actual implementation of this functions should return:
vec_g : Observable vector
vec_f : Solution vector
vec_f_reg : Vector used in the regularization
"""
if self.status < 0 and self.status_need_for_eval == 0:
raise RuntimeError("Model has to be intilized. "
"Run 'model.initialize' first!")
if self.status < 1 and self.status_need_for_eval == 1:
raise RuntimeError("Model has to be intilized and x0 has to be"
"set. Run 'model.initialize' and "
"'model.set_x0' first!")
def set_model_x0(self):
"""Some models need to be set up with a x0 for the model. For those .
models the class parameter 'status_need_for_eval' should be set to 1.
"""
if self.status < 0:
raise RuntimeError("Model has to be intilized, before setting x0. "
"Run 'model.initialize' first!")
self.status = 1
def generate_fit_x0(self):
"""The model should be able to return resonable starting values
for the fitter.
"""
if self.status < 0 and self.status_need_for_eval == 0:
raise RuntimeError("Model has to be intilized. "
"Run 'model.initialize' first!")
if self.status < 1 and self.status_need_for_eval == 1:
raise RuntimeError("Model has to be intilized and x0 has to be"
"set. Run 'model.initialize' and "
"'model.set_x0' first!")
def generate_fit_bounds(self):
"""The model should be able to return resonable bounds for the fitter.
"""
if self.status < 0 and self.status_need_for_eval == 0:
raise RuntimeError("Model has to be intilized. "
"Run 'model.initialize' first!")
if self.status < 1 and self.status_need_for_eval == 1:
raise RuntimeError("Model has to be intilized and x0 has to be"
"set. Run 'model.initialize' and "
"'model.set_x0' first!")
def add_background(self):
self.has_background = True
def remove_background(self):
"""Disables the background vector. A stored background vector is
not deleted.
"""
self.has_background = False
class LinearModel(Model):
name = 'LinearModel'
status_need_for_eval = 0
""" Basic Linear model:
g = A * f
Attributes
----------
name : str
Name of the model.
status : int
Indicates the status of the model:
-1 : Instance created. Not filled with values yet.
0 : Filled with values
dim_g :
Dimension of the histogrammed observable vector.
dim_f :
Dimension of the histogrammed truth vector.
range_obs : tuple (int, int)
Tuple containing the lowest and highest bin number used in
the digitized observable vector. For performance reasons it is
assumed that all numbers between min and max are used.
range_truth : tuple (int, int)
Tuple containing the lowest and highest bin number used in
the digitized truth vector. For performance reasons it is
assumed that all numbers between min and max are used.
A : numpy.array shape=(dim_g, dim_f)
Response matrix.
vec_b : numpy.array, shape=(dim_f)
Observable vector for the background.
has_background : boolean
Indicator if self.vec_b should be added to the model evaluationg
"""
def __init__(self, random_state=None):
super(LinearModel, self).__init__()
self.range_obs = None
self.range_truth = None
self.A = None
self.dim_f = None
self.dim_g = None
self.vec_b = None
self.dim_fit_vector = None
self.x0_distributions = None
self.n_nuissance_parameters = 0
if not isinstance(random_state, np.random.RandomState):
random_state = np.random.RandomState(random_state)
self.random_state = random_state
def initialize(self, digitized_obs, digitized_truth, sample_weight=None):
"""
"""
super(LinearModel, self).initialize()
self.range_obs = (min(digitized_obs), max(digitized_obs))
self.range_truth = (min(digitized_truth), max(digitized_truth))
self.dim_f = self.range_truth[1] - self.range_truth[0] + 1
self.dim_g = self.range_obs[1] - self.range_obs[0] + 1
binning_g, binning_f = self.__generate_binning__()
self.A = np.histogram2d(x=digitized_obs,
y=digitized_truth,
bins=(binning_g, binning_f),
weights=sample_weight)[0]
M_norm = np.diag(1 / np.sum(self.A, axis=0))
self.A = np.dot(self.A, M_norm)
self.dim_fit_vector = self.dim_f
self.x0_distributions = [('poisson', None, 1)] * self.dim_f
def evaluate(self, vec_fit):
"""Evaluating the model for a given vector f
Parameters
----------
vec_fit : numpy.array, shape=(dim_f,)
Vector f for which the model should be evaluated.
Returns
-------
vec_g : nump.array, shape=(dim_g,)
Vector containing the number of events in observable space.
If background was added the returned vector is A * vec_f + vec_b.
vec_f : nump.array, shape=(dim_f,)
Vector used to evaluate A * vec_f
vec_f_reg : nump.array, shape=(dim_f,)
Vector that should be passed to the regularization. For the
BasisLinearModel it is identical to f.
"""
super(LinearModel, self).evaluate()
vec_g = np.dot(self.A, vec_fit)
if self.has_background:
vec_g += self.vec_b
return vec_g, vec_fit, vec_fit
def generate_fit_x0(self, vec_g, vec_f_0, size=None):
"""Generates a default seed for the minimization.
The default seed vec_f_0 is a uniform distribution with
sum(vec_f_0) = sum(vec_g). If background is present the default seed
is: sum(vec_f_0) = sum(vec_g) - sum(self.vec_b).
Parameters
----------
vec_g : np.array, shape=(dim_g)
Observable vector which should be used to get the correct
normalization for vec_f_0.
Returns
-------
vec_f_0 : np.array, shape=(dim_f)
Seed vector of a minimization.
"""
super(LinearModel, self).generate_fit_x0()
n = self.dim_f
if vec_f_0 is None:
if self.has_background:
vec_f_0 = np.ones(n) * (np.sum(vec_g) - np.sum(self.vec_b)) / n
else:
vec_f_0 = np.ones(n) * np.sum(vec_g) / n
if size is None:
return vec_f_0
pos_x0 = np.ones((size, n), dtype=float)
x0_pointer = 0
for (sample_x0, _, n_parameters) in self.x0_distributions[:self.dim_f]:
if n_parameters == 1:
x0_slice = x0_pointer
else:
x0_slice = slice(x0_pointer, x0_pointer + n_parameters)
x0_i = vec_f_0[x0_slice]
if sample_x0 == 'poisson':
pos_x0_i = self.random_state.poisson(x0_i,
size=size)
else:
raise ValueError(
'Only "poisson" as name for x0 sample'
'dist is implemented')
pos_x0[:, x0_slice] = pos_x0_i
x0_pointer += 1
# wiggle on each point
wiggle = np.absolute(self.random_state.normal(size=pos_x0.shape))
pos_x0 += wiggle
return pos_x0
def generate_fit_bounds(self, vec_g):
"""Generates a bounds for a minimization.
The bounds are (0, sum(vec_g)) without background and
(0, sum(vec_g - self.vec_b)) with background. The bounds are for
each fit parameter/entry in f.
Parameters
----------
vec_g : np.array, shape=(dim_g)
Observable vector which should be used to get the correct
upper bound
Returns
-------
bounds : list, shape=(dim_f)
List of tuples with the bounds.
"""
super(LinearModel, self).generate_fit_bounds()
n = self.A.shape[1]
if self.has_background:
n_events = np.sum(vec_g) - np.sum(self.vec_b)
else:
n_events = np.sum(vec_g)
bounds = [(0, n_events)] * n
return bounds
def set_model_x0(self):
"""The LinearModel has no referenz model_x0.
"""
super(LinearModel, self).set_model_x0()
warnings.warn('\tx0 has no effect for {}'.format(self.name))
def evaluate_condition(self, normalize=True):
"""Returns an ordered array of the singular values of matrix A.
Parameters
----------
normalize : boolean (optional)
If True the singular values return relativ to the largest
value.
Returns
-------
S_values : np.array, shape=(dim_f)
Ordered array of the singular values.
"""
if self.status < 0:
raise RuntimeError("Model has to be intilized. "
"Run 'model.initialize' first!")
U, S_values, V = linalg.svd(self.A)
if normalize:
S_values = S_values / S_values[0]
return S_values
def __generate_binning__(self):
if self.status < 0:
raise RuntimeError("Model has to be intilized. "
"Run 'model.initialize' first!")
binning_obs = np.linspace(self.range_obs[0],
self.range_obs[1] + 1,
self.dim_g + 1)
binning_truth = np.linspace(self.range_truth[0],
self.range_truth[1] + 1,
self.dim_f + 1)
return binning_obs, binning_truth
def generate_vectors(self,
digitized_obs=None,
digitized_truth=None,
obs_weights=None,
truth_weights=None):
"""Returns vec_g, vec_f for digitized values. Either f, g or both
can be provided to the function.
Parameters
----------
digitized_obs : np.intarray (optional)
Array with digitized values form the observable space
digitized_truth : np.intarray (optinal)
Array with digitized values for the sought-after quantity.
Returns
-------
vec_g : None or np.array shape=(dim_g)
None if no digitized_obs was provided otherwise the histrogram
of digitized_obs.
vec_f : None or np.array shape=(dim_f)
None if no digitized_truth was provided otherwise the histrogram
of digitized_truth.
"""
binning_obs, binning_truth = self.__generate_binning__()
if digitized_obs is not None:
vec_g = np.histogram(digitized_obs,
bins=binning_obs,
weights=obs_weights)[0]
else:
vec_g = None
if digitized_truth is not None:
vec_f = np.histogram(digitized_truth,
bins=binning_truth,
weights=truth_weights)[0]
else:
vec_f = None
return vec_g, vec_f
def add_background(self, vec_b):
"""Adds a background vector to the model.
Parameters
----------
vec_b : numpy.array, shape=(dim_g)
Vector g which is added to the model evaluation.
"""
super(LinearModel, self).add_background()
self.vec_b = vec_b
class PolynominalSytematic(object):
n_parameters = 1
def __init__(self,
name,
degree,
prior=None,
use_stat_error=True,
bounds=None):
self.name = name
self.degree = degree
self.use_stat_error = use_stat_error
if bounds is None:
self.bounds = lambda x: True
self._bounds = None
elif len(bounds) == 2:
scale = bounds[1] - bounds[0]
uniform_prior = stats.uniform(loc=bounds[0], scale=scale)
self.bounds = lambda x: uniform_prior.pdf(x) > 0
self._bounds = bounds
else:
raise ValueError('bounds can be None or array-type with length 2')
self.x = None
self.coeffs = None
if prior is None:
def prior_pdf(x):
return 1.
elif hasattr(prior, 'pdf'):
prior_pdf = lambda x: sum(prior.pdf(x))
elif callable(prior):
prior_pdf = lambda x: sum(prior(x))
else:
raise TypeError('The provided prior has to be None, '
'scipy.stats frozen rv or callable!')
self.prior = prior
self.prior_pdf = prior_pdf
self.baseline_value = None
def lnprob_prior(self, x):
if self.bounds(x):
pdf_val = self.prior_pdf(x)
if pdf_val > 0.:
return np.log(pdf_val)
else:
return np.inf * -1
else:
return np.inf * -1
def sample(self, size, sample_func_name=None):
if hasattr(self.prior, 'rvs'):
if self.bounds is None:
samples = self.prior.rvs(size)
else:
samples = np.zeros(size, dtype=float)
pointer = 0
while pointer < size:
r = self.prior.rvs()
if self.bounds(r):
samples[pointer] = r
pointer += 1
elif sample_func_name is not None:
f = getattr(self.prior, sample_func_name)
samples = f(size)
else:
raise TypeError(
'Provided prior has neither a function called \'rvs\' nor '
'\'sample_func_name\' was passed to the function!')
return samples
def add_data(self,
x,
baseline_idx,
digitized_obs,
sample_weights=None,
minlength_vec_g=0):
x = np.atleast_1d(x)
self.baseline_idx = baseline_idx
self.baseline_value = x[baseline_idx]
if len(digitized_obs) != len(x):
raise ValueError('digitized_obs has invalid shape! It needs to '
'be of shape (n_events, len(x))!')
if sample_weights is not None:
if len(sample_weights) != len(x):
raise ValueError(
'digitized_obs has invalid shape! It needs to '
'be of shape (n_events, len(x))!')
else:
sample_weights = [None] * len(x)
vector_g = []
rel_uncert = []
mean_w = None
for y_i, w_i in zip(digitized_obs, sample_weights):
if w_i is not None:
if mean_w is None:
mean_w = np.mean(sample_weights[baseline_idx])
w_i = w_i / mean_w
vector_g.append(np.bincount(y_i,
weights=w_i,
minlength=minlength_vec_g))
rel_uncert.append(np.sqrt(np.bincount(y_i,
weights=w_i**2,
minlength=minlength_vec_g)))
del digitized_obs
del sample_weights
n_bins = np.unique(len(g) for g in vector_g)
if len(n_bins) > 1:
raise ValueError(
'digitized_obs has different number of populated bins! '
'Either use different/same binning for all dataset or '
'set minlength_vec_g')
else:
n_bins = n_bins[0]
vector_g = np.atleast_2d(vector_g).T
rel_uncert = np.atleast_2d(rel_uncert).T
rel_uncert /= vector_g
for i in range(len(x)):
if i == baseline_idx:
continue
else:
vector_g[:, i] /= vector_g[:, baseline_idx]
vector_g[:, baseline_idx] = 1.
self.coeffs = np.empty((len(vector_g), self.degree + 1), dtype=float)
for i, (y, uncert) in enumerate(zip(vector_g, rel_uncert)):
if self.use_stat_error:
c = np.polyfit(x, y, self.degree, w=1. / (uncert * y))
else:
c = np.polyfit(x, y, self.degree)
self.coeffs[i, :] = c
self.vector_g = vector_g
self.rel_uncert = rel_uncert
self.x = x
def plot(self, bin_i):
from matplotlib import pyplot as plt
if self.coeffs is None:
raise RuntimeError("No data added yet. Call 'add_data' first.")
fig, ax = plt.subplots()
x_lim = [min(self.x), max(self.x)]
x_lim[0] = x_lim[0] - (x_lim[1] - x_lim[0]) * 0.1
x_lim[1] = x_lim[1] + (x_lim[1] - x_lim[0]) * 0.1
if self._bounds is not None:
x_lim = self._bounds
ax.set_xlim(x_lim)
x_points = np.linspace(x_lim[0], x_lim[1], 100)
y_points = np.zeros_like(x_points)
for i in range(self.degree + 1)[::-1]:
coeff_pointer = self.coeffs.shape[1] - 1 - i
y_points += x_points**i * self.coeffs[bin_i, coeff_pointer]
ax.plot(x_points, y_points, '-', color='0.5')
yerr = self.rel_uncert[bin_i] * self.vector_g[bin_i]
y_min = np.min(self.vector_g[bin_i] - yerr)
y_max = np.max(self.vector_g[bin_i] + yerr)
offset = (y_max - y_min) * 0.05
ax.set_ylim(y_min - offset, y_max + offset)
ax.errorbar(np.array(self.x),
self.vector_g[bin_i],
yerr=yerr,
fmt='o',
color='b')
return fig, ax
def evaluate(self, baseline_digitized, x):
factors = self.get_bin_factors(x)
return factors[baseline_digitized]
def get_bin_factors(self, x):
if not self.bounds(x):
return None
factors = np.zeros(self.coeffs.shape[0], dtype=float)
for i in range(self.degree + 1)[::-1]:
coeff_pointer = self.coeffs.shape[1] - 1 - i
factors += x**i * self.coeffs[:, coeff_pointer]
return factors
def __call__(self, baseline_digitized, x):
return self.evaluate(baseline_digitized, x)
def plane_fit(points):
"""
https://stackoverflow.com/a/18968498
p, n = planeFit(points)
Given an array, points, of shape (d,...)
representing points in d-dimensional space,
fit an d-dimensional plane to the points.
Return a point, p, on the plane (the point-cloud centroid),
and the normal, n.
"""
points = np.reshape(points, (np.shape(points)[0], -1))
assert points.shape[0] <= points.shape[1], \
"There are only {} points in {} dimensions.".format(points.shape[1],
points.shape[0])
ctr = points.mean(axis=1)
x = points - ctr[:, np.newaxis]
M = np.dot(x, x.T) # Could also use np.cov(x) here.
return ctr, svd(M)[0][:, -1]
def plane_fit_least_squares(points):
A = np.ones((points.shape[0], 3), dtype=float)
A[:, :2] = points[:, :2]
A = np.matrix(A)
b = np.matrix(points[:, 2]).T
fit = (A.T * A).I * A.T * b
errors = b - A * fit
return fit, errors
class CircularSystematic(object):
n_parameters = 1
def __init__(self,
name,
prior=None,
bounds=None):
self.name = name
if bounds is None:
self.bounds = lambda x: True
self._bounds = None
elif len(bounds) == 2:
scale = bounds[1] - bounds[0]
uniform_prior = stats.uniform(loc=bounds[0], scale=scale)
self.bounds = lambda x: uniform_prior.pdf(x) > 0
self._bounds = bounds
else:
raise ValueError('bounds can be None or array-type with length 2')
self.x = None
self.coeffs = None
if prior is None:
def prior_pdf(x):
return 1.
elif hasattr(prior, 'pdf'):
prior_pdf = prior.pdf
elif callable(prior):
prior_pdf = prior
else:
raise TypeError('The provided prior has to be None, '
'scipy.stats frozen rv or callable!')
self.prior = prior
self.prior_pdf = prior_pdf
self.baseline_value = None
def lnprob_prior(self, x):
if self.bounds(x):
pdf_val = self.prior_pdf(x)
if pdf_val > 0.:
return np.log(pdf_val)
else:
return np.inf * -1
else:
return np.inf * -1
def sample(self, size, sample_func_name=None):
if hasattr(self.prior, 'rvs'):
if self.bounds is None:
samples = self.prior.rvs(size)
else:
samples = np.zeros(size, dtype=float)
pointer = 0
while pointer < size:
r = self.prior.rvs()
if self.bounds(r):
samples[pointer] = r
pointer += 1
elif sample_func_name is not None:
f = getattr(self.prior, sample_func_name)
samples = f(size)
else:
raise TypeError(
'Provided prior has neither a function called \'rvs\' nor '
'\'sample_func_name\' was passed to the function!')
return samples
def add_data(self,
baseline_idx,
digitized_obs,
sample_weights=None,
minlength_vec_g=0):
n_points = len(digitized_obs)
x = np.linspace(0., 360., n_points, endpoint=True)
self.baseline_idx = baseline_idx
self.baseline_value = x[baseline_idx]
if len(digitized_obs) != len(x):
raise ValueError('digitized_obs has invalid shape! It needs to '
'be of shape (n_events, len(x))!')
if sample_weights is not None:
if len(sample_weights) != len(x):
raise ValueError(
'digitized_obs has invalid shape! It needs to '
'be of shape (n_events, len(x))!')
else:
sample_weights = [None] * len(x)
vector_g = []
rel_uncert = []
mean_w = None
for y_i, w_i in zip(digitized_obs, sample_weights):
if w_i is not None:
if mean_w is None:
mean_w = np.mean(sample_weights[baseline_idx])
w_i = w_i / mean_w
vector_g.append(np.bincount(y_i,
weights=w_i,
minlength=minlength_vec_g))
rel_uncert.append(np.sqrt(np.bincount(y_i,
weights=w_i**2,
minlength=minlength_vec_g)))
del digitized_obs
del sample_weights
n_bins = np.unique(len(g) for g in vector_g)
if len(n_bins) > 1:
raise ValueError(
'digitized_obs has different number of populated bins! '
'Either use different/same binning for all dataset or '
'set minlength_vec_g')
else:
n_bins = n_bins[0]
vector_g = np.atleast_2d(vector_g).T
rel_uncert = np.atleast_2d(rel_uncert).T
rel_uncert /= vector_g
for i in range(len(x)):
if i == baseline_idx:
continue
else:
vector_g[:, i] /= vector_g[:, baseline_idx]
vector_g[:, baseline_idx] = 1.
self.vector_g = vector_g
self.rel_uncert = rel_uncert
self.x = x
__x = np.zeros(len(x) + 1, dtype=float)
__x[:-1] = x
__x[-1] = __x[-2] + np.spacing(__x[-2])
self.__x = __x
self.distance = self.x[1] - self.x[0]
def plot(self, bin_i):
raise NotImplementedError
def evaluate(self, baseline_digitized, x):
factors = self.get_bin_factors(x)
return factors[baseline_digitized]
def get_bin_factors(self, x):
if not self.bounds(x):
return None
x = x % 360.
distance = np.sort(np.argmin(np.absolute(self.__x - x))[:2])
idx_front = distance[1]
idx_back = distance[0]
if idx_front == len(x) - 1:
idx_front = 0
factor = self.x[idx_back] / self.distance
contribution_back = factor * self.vector_g[idx_back]
contribution_front = (1. - factor) * self.vector_g[idx_front]
return contribution_back + contribution_front
def __call__(self, baseline_digitized, x):
return self.evaluate(baseline_digitized, x)
class PlaneSytematic(object):
n_parameters = 2
def __init__(self,
name,
prior=None,
bounds=None):
self.name = name
if bounds is None:
self.bounds = lambda x: True
self._bounds = None
elif len(bounds) == 2:
bounds_x = bounds[0]
bounds_y = bounds[1]
if bounds_x is None and bounds_y is not None:
uniform_prior = stats.uniform(
loc=bounds_y[0],
scale=bounds_y[1] - bounds_y[0])
self.bounds = lambda x: uniform_prior.pdf(x[1]) > 0
self._bounds = (None, bounds_y)
elif bounds_x is not None and bounds_y is None:
uniform_prior = stats.uniform(
loc=bounds_x[0],
scale=bounds_x[1] - bounds_x[0])
self.bounds = lambda x: uniform_prior.pdf(x[0]) > 0
self._bounds = (bounds_x, None)
elif bounds_x is not None and bounds_y is not None:
uniform_prior = stats.uniform(
loc=(bounds_x[0], bounds_y[0]),
scale=(bounds_x[1] - bounds_x[0],
bounds_y[1] - bounds_y[0]))
self.bounds = lambda x: all(uniform_prior.pdf(x) > 0)
self._bounds = bounds
else:
self.bounds = lambda x: True
self._bounds = None
else:
raise ValueError(
"'bounds' can be either None or a tuple/list of len 2 "
" containing None or the acutal bounds for")
self.points = None
self.coeffs = None
if prior is None:
def prior_pdf(x):
return 1.
elif hasattr(prior, 'pdf'):
prior_pdf = lambda x: sum(prior.pdf(x))
elif callable(prior):
prior_pdf = lambda x: sum(prior(x))
else:
raise TypeError('The provided prior has to be None, '
'scipy.stats frozen rv or callable!')
self.prior = prior
self.prior_pdf = prior_pdf
self.baseline_value = None
def lnprob_prior(self, x):
if self.bounds(x):
p_val = self.prior_pdf(x)
if p_val > 0.:
return np.inf * -1
else:
return np.log(p_val)
else:
return np.inf * -1
def sample(self, size, sample_func_name=None):
if hasattr(self.prior, 'rvs'):
if self.bounds is None:
samples = self.prior.rvs(size)
else:
samples = np.zeros((size, 2), dtype=float)
pointer = 0
while pointer < size:
r = self.prior.rvs(size=(1, 2))
if self.bounds(r):
samples[pointer, :] = r
pointer += 1
elif sample_func_name is not None:
f = getattr(self.prior, sample_func_name)
samples = f(size)
else:
raise TypeError(
'Provided prior has neither a function called \'rvs\' nor '
'\'sample_func_name\' was passed to the function!')
return samples
def add_data(self,
xy_coords,
baseline_idx,
digitized_obs,
sample_weights=None,
minlength_vec_g=0):
self.baseline_idx = baseline_idx
xy_coords = np.atleast_2d(xy_coords)
self.baseline_value = xy_coords[baseline_idx, :]
if len(digitized_obs) != len(xy_coords):
raise ValueError('digitized_obs has invalid shape! It needs to '
'be of shape (n_events, len(x))!')
if sample_weights is not None:
if len(sample_weights) != len(xy_coords):
raise ValueError(
'digitized_obs has invalid shape! It needs to '
'be of shape (n_events, len(x))!')
else:
sample_weights = [None] * len(xy_coords)
vector_g = []
mean_w = None
for y_i, w_i in zip(digitized_obs, sample_weights):
if w_i is not None:
if mean_w is None:
mean_w = np.mean(sample_weights[baseline_idx])
w_i /= mean_w
vector_g.append(np.bincount(y_i,
weights=w_i,
minlength=minlength_vec_g))
n_bins = np.unique(len(g) for g in vector_g)
if len(n_bins) > 1:
raise ValueError(
'digitized_obs has different number of populated bins! '
'Either use different/same binning for all dataset or '
'set minlength_vec_g')
else:
n_bins = n_bins[0]
vector_g = np.atleast_2d(vector_g).T
for i in range(len(xy_coords)):
if i == baseline_idx:
continue
else:
vector_g[:, i] /= vector_g[:, baseline_idx]
vector_g[:, baseline_idx] = 1.
points = np.zeros((vector_g.shape[0],
xy_coords.shape[0],
xy_coords.shape[1] + 1), dtype=float)
points[:, :, :2] = xy_coords
points[:, :, 2] = vector_g
self.coeffs = np.empty((vector_g.shape[0], xy_coords.shape[1] + 1))
for i in range(vector_g.shape[0]):
fit_i, _ = plane_fit_least_squares(points[i, :, :])
self.coeffs[i, :] = fit_i.flatten()
self.points = points
def plot(self, bin_i):
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # noqa: F401
if self.coeffs is None:
raise RuntimeError("No data added yet. Call 'add_data' first.")
points = self.points[bin_i, :, :]
coeffs = self.coeffs[bin_i, :]
x_lim = [np.min(points[:, 0], axis=0), np.max(points[:, 0], axis=0)]
y_lim = [np.min(points[:, 1], axis=0), np.max(points[:, 1], axis=0)]
x_lim[0] = x_lim[0] - (x_lim[1] - x_lim[0]) * 0.1
x_lim[1] = x_lim[1] + (x_lim[1] - x_lim[0]) * 0.1
y_lim[0] = y_lim[0] - (y_lim[1] - y_lim[0]) * 0.1
y_lim[1] = y_lim[1] + (y_lim[1] - y_lim[0]) * 0.1
if self._bounds is not None:
x_lim_bounds = self._bounds[0]
if x_lim_bounds is not None:
x_lim = x_lim_bounds
y_lim_bounds = self._bounds[1]
if y_lim_bounds is not None:
y_lim = y_lim_bounds
fig = plt.figure()
ax = plt.subplot(111, projection='3d')
ax.set_xlim(x_lim[0], x_lim[1])
ax.set_ylim(y_lim[0], y_lim[1])
ax.set_xticks(np.unique(points[:, 0]))
ax.set_yticks(np.unique(points[:, 1]))
X, Y = np.meshgrid(np.arange(x_lim[0], x_lim[1], 0.02),
np.arange(y_lim[0], y_lim[1], 0.02))
Z = np.zeros(X.shape)
for r in range(X.shape[0]):
for c in range(X.shape[1]):
Z[r, c] = coeffs[0] * X[r, c] + coeffs[1] * Y[r, c] + coeffs[2]
idx = np.ones(len(points), dtype=bool)
idx[self.baseline_idx] = False
z_min, z_max = np.min(Z), np.max(Z)
diff = z_max - z_min
ax.set_zlim(z_min - diff * 0.05, z_max + diff * 0.05)
for i, (x, y, z) in enumerate(points):
if i == self.baseline_idx:
color = 'r'
else:
color = 'k'
ax.plot([x, x],
[y, y],
[ax.get_zlim()[0], z],
'-*',
color=color)
ax.plot_wireframe(X, Y, Z, color='C0')
return fig, ax
def evaluate(self, baseline_digitized, x):
factors = self.get_bin_factors(x)
return factors[baseline_digitized]
def get_bin_factors(self, x):
if not self.bounds(x):
return None
__x = np.ones(len(x) + 1, dtype=float)
__x[:len(x)] = x
return np.sum(self.coeffs * __x, axis=1)
def __call__(self, baseline_digitized, x):
return self.evaluate(baseline_digitized, x)
class ArrayCacheTransformation(object):
def __init__(self, array):
self.array = array
def __call__(self, x):
return self.array[np.argmin(np.absolute(x - self.array))]
class FloatCacheTransformation(object):
def __init__(self, value, offset=0.):
self.value = value
self.offset = offset
def __call__(self, x):
a = (x - self.offset) / self.value
return np.floor((a + 0.5)) * self.value + self.offset
class LinearModelSystematics(LinearModel):
name = 'LinearModelSystematics'
status_need_for_eval = 0
def __init__(self,
generic_epsilon=None,
systematics=[],
cache_precision=[],
random_state=None):
super(LinearModelSystematics, self).__init__(random_state=random_state)
self.range_obs = None
self.range_truth = None
self.A = None
self.dim_f = None
self.dim_g = None
self.vec_b = None
if systematics is None:
systematics = []
if cache_precision is None:
cache_precision = []
self.systematics = systematics
if len(cache_precision) < len(self.systematics):
for i in range(len(self.systematics) - len(cache_precision)):
cache_precision.append(None)
elif len(cache_precision) > len(self.systematics):
raise ValueError('len(systematics) should be len(cache_precision')
self.cache_precision = cache_precision
if cache_precision is not None:
self.__cache = {}
cache_error = ValueError('cache_precision has to be either None, '
'float, (float, float) or a np.array!')
for i, (s, p) in enumerate(zip(systematics, cache_precision)):
if p is not None:
if isinstance(p, float):
self.cache_precision[i] = FloatCacheTransformation(p)
elif isinstance(p, list) or isinstance(p, tuple):
if len(p) == 2:
self.cache_precision[i] = FloatCacheTransformation(
value=p[0],
offset=p[1])
else:
raise cache_error
elif isinstance(p, np.array):
self.cache_precision[i] = ArrayCacheTransformation(p)
else:
raise cache_error
self.__cache[s.name] = {}
self.n_nuissance_parameters = sum(s.n_parameters
for s in self.systematics)
self.dim_fit_vector = None
self.x0_distributions = None
if generic_epsilon is not None:
if isinstance(generic_epsilon, float):
if generic_epsilon <= 0.:
raise ValueError('generic_epsilon has to be > 0.')
else:
self.n_nuissance_parameters += 1
else:
raise ValueError('generic_epsilon has to be None or float > 0')
self.generic_epsilon = generic_epsilon
def initialize(self,
digitized_obs,
digitized_truth,
sample_weight=None):
super(LinearModel, self).initialize()
self.range_obs = (min(digitized_obs), max(digitized_obs))
self.range_truth = (min(digitized_truth), max(digitized_truth))
self.dim_f = self.range_truth[1] - self.range_truth[0] + 1
self.dim_g = self.range_obs[1] - self.range_obs[0] + 1
self.digitized_obs = digitized_obs
self.digitized_truth = digitized_truth
self.sample_weight = sample_weight
self._A_unnormed = self.__generate_matrix_A_unnormed()
self.A = np.dot(self._A_unnormed,
np.diag(1 / np.sum(self._A_unnormed, axis=0)))
self.dim_fit_vector = self.dim_f + self.n_nuissance_parameters
self.x0_distributions = [('poisson', None, 1)] * self.dim_f
self.x0_distributions += [(s.sample, s.lnprob_prior, s.n_parameters)
for s in self.systematics]
if self.generic_epsilon is not None:
s = stats.norm(loc=1., scale=self.generic_epsilon)
s.random_state = self.random_state
def lnprop_prior_generic_epsilon(x):
val = s.pdf(x)[0]
if val == 0.:
return -np.inf
else:
return np.log(val)
self.x0_distributions += [(s.rvs, lnprop_prior_generic_epsilon, 1)]
def __generate_matrix_A_unnormed(self, weight_factors=None):
if self.sample_weight is None:
weights = weight_factors
else:
if weight_factors is not None:
weights = self.sample_weight * weight_factors
else:
weights = self.sample_weight
binning_g, binning_f = self.__generate_binning__()
A_unnormed = np.histogram2d(x=self.digitized_obs,
y=self.digitized_truth,
bins=(binning_g, binning_f),
weights=weights)[0]
return A_unnormed
def evaluate_old(self, vec_fit):
vec_f = vec_fit[:self.dim_f]
nuissance_parameters = vec_fit[self.dim_f:]
A = self._A_unnormed.copy()
for s, x_s, c_t in zip(self.systematics,
nuissance_parameters,
self.cache_precision):
factor_matrix = self.__get_systematic_event_factors(s, x_s, c_t)
if factor_matrix is None:
return np.array([-1.]), np.array([-1.]), np.array([-1.])
A *= factor_matrix
M_norm = np.diag(1 / np.sum(A, axis=0))
A = np.dot(A, M_norm)
vec_g = np.dot(A, vec_f)
if self.has_background:
vec_g += self.vec_b
return vec_g, vec_fit, vec_fit
def evaluate(self, vec_fit):
vec_f = vec_fit[:self.dim_f]
nuissance_parameters = vec_fit[self.dim_f:]
A = self._A_unnormed.copy()
pointer = 0
for syst_i, c_t in zip(self.systematics, self.cache_precision):
s = slice(pointer, pointer + syst_i.n_parameters)
x_s = nuissance_parameters[s]
factor_vector = self.__get_systematic_factors(syst_i, x_s, c_t)
if factor_vector is None:
return np.array([-1.]), np.array([-1.]), np.array([-1.])
A *= factor_vector[:, np.newaxis]
pointer += syst_i.n_parameters
M_norm = np.diag(1 / np.sum(A, axis=0))
A = np.dot(A, M_norm)
if self.generic_epsilon is not None:
vec_f *= vec_fit[-1]
vec_g = np.dot(A, vec_f)
if self.has_background:
vec_g += self.vec_b
return vec_g, vec_fit, vec_fit
def __get_systematic_event_factors(self,
systematic,
x,
cache_transformation):
if cache_transformation is not None:
x = cache_transformation(x)
if x in self.__cache[systematic.name].keys():
return self.__cache[systematic.name][x]
weight_factors = systematic(baseline_digitized=self.digitized_obs,
x=x)
if weight_factors is None:
return None
A_syst = self.__generate_matrix_A_unnormed(
weight_factors=weight_factors)
A_syst[A_syst > 0] /= self._A_unnormed[A_syst > 0]
if cache_transformation is not None:
self.__cache[systematic.name][x] = A_syst
return A_syst
def __get_systematic_factors(self, systematic, x, cache_transformation):
if cache_transformation is not None:
x = cache_transformation(x)
if x in self.__cache[systematic.name].keys():
return self.__cache[systematic.name][x]
weight_factors = systematic.get_bin_factors(x=x)
if weight_factors is None:
return None
if cache_transformation is not None:
self.__cache[systematic.name][x] = weight_factors
return weight_factors
def generate_fit_x0(self, vec_g, vec_f_0=None, size=None):
vec_f_0_def_f = super(LinearModelSystematics, self).generate_fit_x0(
vec_g=vec_g,
vec_f_0=vec_f_0,
size=None)
vec_x_0_def = np.ones(self.dim_fit_vector, dtype=float)
vec_x_0_def[:self.dim_f] = vec_f_0_def_f
x0_pointer = self.dim_f
for syst_i in self.systematics:
s = slice(x0_pointer, x0_pointer + syst_i.n_parameters)
vec_x_0_def[s] = syst_i.baseline_value
x0_pointer += syst_i.n_parameters
if size is None:
return vec_x_0_def
pos_x0 = np.ones((size, self.dim_fit_vector), dtype=float)
vec_f_x0 = super(LinearModelSystematics, self).generate_fit_x0(
vec_g=vec_g,
vec_f_0=vec_f_0,
size=size)
pos_x0[:, :self.dim_f] = vec_f_x0
x0_pointer = self.dim_f
for sample_x0, _, n_parameters in self.x0_distributions[self.dim_f:]:
if n_parameters == 1:
x0_slice = x0_pointer
else:
x0_slice = slice(x0_pointer, x0_pointer + n_parameters)
x0_i = vec_x_0_def[x0_slice]
if sample_x0 is None:
pos_x0_i = x0_i
elif isinstance(sample_x0, basestring):
if sample_x0 == 'poisson':
pos_x0_i = self.random_state.poisson(x0_i,
size=size)
else:
raise ValueError(
'Only "poisson" as name for x0 sample'
'dist is implemented')
elif callable(sample_x0):
pos_x0_i = sample_x0(size=size)
pos_x0[:, x0_slice] = pos_x0_i
x0_pointer += n_parameters
if self.generic_epsilon is not None:
for i, factor in enumerate(pos_x0[:, -1]):
pos_x0[i, :self.dim_f] /= factor
return pos_x0
def generate_fit_bounds(self, vec_g):
bounds = super(LinearModelSystematics, self).generate_fit_bounds()
for i, syst_i in enumerate(self.systematics):
bounds.append(syst_i.bounds)
return bounds
def evaluate_condition(self, nuissance_parameters=None, normalize=True):
"""Returns an ordered array of the singular values of matrix A.
Parameters
----------
normalize : boolean (optional)
If True the singular values return relativ to the largest
value.
Returns
-------
S_values : np.array, shape=(dim_f)
Ordered array of the singular values.
"""
if nuissance_parameters is not None:
A = self.self._A_unnormed.copy()
for s, x_s, c_t in zip(self.systematics,
nuissance_parameters,
self.cache_precision):
factor_matrix = self.__get_systematic_factor(s, x_s, c_t)
if factor_matrix is None:
return -1., -1., -1.
A *= factor_matrix
M_norm = np.diag(1 / np.sum(A, axis=0))
A = np.dot(A, M_norm)
else:
A = self.A
if self.status < 0:
raise RuntimeError("Model has to be intilized. "
"Run 'model.initialize' first!")
U, S_values, V = linalg.svd(A)
if normalize:
S_values = S_values / S_values[0]
return S_values
class TestModelSystematics(LinearModelSystematics):
name = 'TestModelSystematics'
status_need_for_eval = 0
def __init__(self,
systematics=[],
cache_precision=[]):
super(LinearModelSystematics, self).__init__(
systematics=systematics,
cache_precision=cache_precision)
self.f_test = None
def initialize(self,
f_test,
digitized_obs,
digitized_truth,
sample_weight=None):
super(TestModelSystematics, self).initialize(
digitized_obs=digitized_truth,
digitized_truth=digitized_truth,
sample_weight=sample_weight)
if len(f_test) == self.dim_f:
self.f_test = f_test / np.sum(f_test)
else:
raise ValueError(
'\'f_test\' wrong length! Has {} needs {} '.format(
len(f_test),
self.dim_f))
self.dim_fit_vector = 1 + self.n_nuissance_parameters
def evaluate(self, vec_fit):
vec_fit_transformed = self.transform_fit_vector(vec_fit)
return super(TestModelSystematics, self).evaluate(
vec_fit=vec_fit_transformed)
def generate_fit_x0(self, vec_g):
factor = np.sum(vec_g) / np.sum(self.f_test)
vec_x_0 = np.ones(self.dim_fit_vector)
vec_x_0[0] = factor
for i, syst_i in enumerate(self.systematics):
vec_x_0[1 + i] = syst_i.x[syst_i.baseline_idx]
return vec_x_0
def generate_fit_bounds(self, vec_g, max_factor=3.):
n_events = np.sum(vec_g)
bounds = [(0., n_events * max_factor)]
for i, syst_i in enumerate(self.systematics):
bounds.append(syst_i.bounds)
return bounds
def transform_fit_vector(self, vec_fit):
vec_f = self.f_test * vec_fit[0]
vec_fit_transformed = np.zeros(
self.self.dim_f + self.n_nuissance_parameters, dtype=float)
vec_fit_transformed[:self.dim_f] = vec_f
return vec_fit_transformed
|
|
from flask_restful import abort, reqparse, Resource
from marshmallow import Schema, fields, ValidationError, pre_load
from flask import Flask, Blueprint, request, jsonify
from flask_cors import CORS, cross_origin
import psycopg2
import os
from os.path import join, dirname
import threading
from time import sleep
import math
import urlparse
# DB_DRIVER=postgresql
# DB_HOST=localhost
# DB_USER=patientplatypus
# DB_PASSWORD=Fvnjty0b
# DB_NAME=pictureswapper
class Inflate:
threads = []
def __init__(self, s):
self.s = s
def printtest(self):
print('insided the printtest for inflation')
def inflatemethod(self):
while 1 > 0:
# conn = psycopg2.connect(database = os.environ.get('DB_NAME'), user = os.environ.get('DB_USER'), password = os.environ.get('DB_PASSWORD'))
urlparse.uses_netloc.append("postgres")
url = urlparse.urlparse(os.environ["DATABASE_URL"])
conn = psycopg2.connect(
database=url.path[1:],
user=url.username,
password=url.password,
host=url.hostname,
port=url.port
)
# conn = psycopg2.connect(database = 'pictureswapper', user = 'patientplatypus', password = 'Fvnjty0b')
cur = conn.cursor()
sql = 'SELECT * FROM logins'
cur.execute(sql)
conn.commit()
userdata = cur.fetchall()
userdataclean = userdata[0]
print('this is the value of userdataclean ', userdataclean)
print('this is the value of userdata ', userdata)
usermoney = []
totalmoney = 0
for x in range(0,len(userdata)):
userdict = {}
userdict['name'] = userdata[x][0]
userdict['money'] = userdata[x][2]
usermoney.append(userdict)
sortedusers = sorted(usermoney, key=lambda k: k['money'])
print('sortedusers before adding: ', sortedusers)
for x in range(0, len(sortedusers)):
if 100 * x / len(sortedusers) <= 20 and sortedusers[x]['money']<sortedusers[int(math.ceil(0.2*len(sortedusers)))]['money']:
sortedusers[x]['money'] = sortedusers[x]['money'] + 1
print('sortedusers after adding: ', sortedusers)
# now send to daterbase
for x in range(0, len(sortedusers)):
sql = 'UPDATE logins SET totalmoney = %s WHERE username = %s'
params = (sortedusers[x]['money'], sortedusers[x]['name'],)
cur.execute(sql, params)
conn.commit()
conn.close()
sleep(300000)
def timermethod(self):
h="hello there "
t = threading.Thread(target=self.inflatemethod, args=())
t.start()
# totalmoney = totalmoney + userdata[x][2]
# for x in range(0, len(usermoney)):
# usermoney[x]['percentage'] = 100 * usermoney[x]['money'] / totalmoney
# print('***** values after first look *****')
# print('usermoney ', usermoney)
# print('totalmoney ', totalmoney)
#
# for x in range(0, len(usermoney)):
# if usermoney[x]['percentage'] <= 20:
# sql = 'UPDATE logins SET totalmoney = %s WHERE username = %s'
# newtotalmoney = usermoney[x]['money']+10
# params = (newtotalmoney, usermoney[x]['name'],)
# cur.execute(sql, params)
# conn.commit()
#
# sleep(5)
#
# sql = 'SELECT * FROM logins'
# cur.execute(sql)
# conn.commit()
# userdatanew = cur.fetchall()
# usermoneynew = []
# totalmoneynew = 0
# for x in range(0,len(userdatanew)):
# userdict = {}
# userdict['name'] = userdatanew[x][0]
# userdict['money'] = userdatanew[x][2]
# usermoneynew.append(userdict)
# totalmoneynew = totalmoneynew + userdatanew[x][2]
# for x in range(0, len(usermoneynew)):
# usermoneynew[x]['percentage'] = 100 * usermoneynew[x]['money'] / totalmoney
#
#
# print('***** values after add money *****')
# print('usermoneynew ', usermoneynew)
# print('totalmoneynew ', totalmoneynew)
#
# print('***** the total number of users *****')
# print('total number of users: ', len(userdata))
#
# conn.close()
#
# alist = [54,26,93,17,77,31,44,55,20]
# bubbleSort(alist)
# print(alist)
#
# class Inflate:
# # def __init__(self, profitorloss, listname, itemname, itemdescription):
# # self.profitorloss = profitorloss
# # self.listname = listname
# # self.itemname = itemname
# # self.itemdescription = itemdescription
# threads = []
# def __init__(self, s):
# self.s = s
# def printtest(self):
# print('insided the printtest for inflation')
# def hello(self, h):
# print h + self.s
# def timermethod(self):
# h="hello there "
# for i in range(5):
# t = threading.Thread(target=self.hello, args=(h,))
# t.start()
# sleep(2)
# while 1>0:
# t = threading.Timer(2, self.hello, [h])
# t.start()
# sleep(2)
# time.sleep(2)
# print "Hi"
# i=10
# i=i+20
# print i
# class Typewriter(threading.Thread):
# def __init__(self, your_string):
# threading.Thread.__init__(self)
# self.my_string = your_string
#
# def run(self):
# for char in self.my_string:
# libtcod.console_print(0,3,3,char)
# time.sleep(50)
# import threading
#
# def worker():
# """thread worker function"""
# print 'Worker'
# return
#
# threads = []
# for i in range(5):
# t = threading.Thread(target=worker)
# threads.append(t)
# t.start()
#!/usr/bin/python
#
# import threading
# import time
#
# exitFlag = 0
#
# class myThread (threading.Thread):
# def __init__(self, threadID, name, counter):
# threading.Thread.__init__(self)
# self.threadID = threadID
# self.name = name
# self.counter = counter
# def run(self):
# print "Starting " + self.name
# print_time(self.name, self.counter, 5)
# print "Exiting " + self.name
#
# def print_time(threadName, counter, delay):
# while counter:
# if exitFlag:
# threadName.exit()
# time.sleep(delay)
# print "%s: %s" % (threadName, time.ctime(time.time()))
# counter -= 1
#
# # Create new threads
# thread1 = myThread(1, "Thread-1", 1)
# thread2 = myThread(2, "Thread-2", 2)
#
# # Start new Threads
# thread1.start()
# thread2.start()
#
# print "Exiting Main Thread"
# class Dog:
#
# def __init__(self, name):
# self.name = name
# self.tricks = [] # creates a new empty list for each dog
#
# def add_trick(self, trick):
# self.tricks.append(trick)
|
|
import shutil
import time
import unittest
from unittest import TestCase
from wopmars.tests.resource.model.FooBase import FooBase
from wopmars.tests.resource.wrapper.FooWrapper5 import FooWrapper5
from wopmars.tests.resource.wrapper.sprintFive.Add import Add
from wopmars.tests.resource.wrapper.sprintFive.Query import Query
from wopmars.SQLManager import SQLManager
from wopmars.models.TableInputOutputInformation import TableInputOutputInformation
from wopmars.models.FileInputOutputInformation import FileInputOutputInformation
from wopmars.models.TableModificationTime import TableModificationTime
from wopmars.models.Option import Option
from wopmars.models.TypeInputOrOutput import TypeInputOrOutput
from wopmars.ToolWrapperThread import ToolWrapperThread
from wopmars.utils.OptionManager import OptionManager
from wopmars.utils.PathManager import PathManager
from wopmars.utils.various import get_current_time
class TestToolWrapperThread(TestCase):
def setUp(self):
OptionManager.initial_test_setup() # Set tests arguments
SQLManager.instance().create_all() # Create database with tables
[SQLManager.instance().get_session().add(FooBase(name="foo " + str(i))) for i in range(10000)]
SQLManager.instance().get_session().commit()
def test_run(self):
input_entry = TypeInputOrOutput(is_input=True)
output_entry = TypeInputOrOutput(is_input=False)
f1 = FileInputOutputInformation(file_key="input1", path="resource/input_files/input_file1.txt")
f1.relation_file_or_tableioinfo_to_typeio = input_entry
f2 = FileInputOutputInformation(file_key="output1", path="outdir/output_file1.txt")
f2.relation_file_or_tableioinfo_to_typeio = output_entry
t1 = TableInputOutputInformation(model_py_path="FooBase", table_key="FooBase", table_name="FooBase")
t1.set_table(FooBase)
t1.relation_file_or_tableioinfo_to_typeio = output_entry
timestamp_millis, timestamp_human = get_current_time()
modification_table_entry = TableModificationTime(mtime_epoch_millis=timestamp_millis, table_name=t1.table_name)
t1.modification = modification_table_entry
tw1 = FooWrapper5(rule_name="rule1")
tw1.relation_toolwrapper_to_fileioinfo.extend([f1, f2])
tw1.relation_toolwrapper_to_tableioinfo.append(t1)
f12 = FileInputOutputInformation(file_key="input1", path="resource/input_files/input_file1.txt")
f12.relation_file_or_tableioinfo_to_typeio = input_entry
f22 = FileInputOutputInformation(file_key="output1", path="outdir/output_file1.txt")
f22.relation_file_or_tableioinfo_to_typeio = output_entry
t12 = TableInputOutputInformation(model_py_path="FooBase", table_key="FooBase", table_name="FooBase")
t12.set_table(FooBase)
t12.relation_file_or_tableioinfo_to_typeio = output_entry
timestamp_millis, timestamp_human = get_current_time()
modification_table_entry = TableModificationTime(
mtime_epoch_millis=timestamp_millis, table_name=t12.table_name)
t12.modification = modification_table_entry
tw2 = FooWrapper5(rule_name="rule2")
tw2.relation_toolwrapper_to_fileioinfo.extend([f12, f22])
tw2.relation_toolwrapper_to_tableioinfo.append(t12)
f13 = FileInputOutputInformation(file_key="input1", path="resource/input_files/input_file1.txt")
f13.relation_file_or_tableioinfo_to_typeio = input_entry
f23 = FileInputOutputInformation(file_key="output1", path="outdir/output_file1.txt")
f23.relation_file_or_tableioinfo_to_typeio = output_entry
t13 = TableInputOutputInformation(model_py_path="FooBase", table_key="FooBase", table_name="FooBase")
t13.set_table(FooBase)
t13.relation_file_or_tableioinfo_to_typeio = output_entry
timestamp_millis, timestamp_human = get_current_time()
modification_table_entry = TableModificationTime(
mtime_epoch_millis=timestamp_millis, table_name=t13.table_name)
t13.modification = modification_table_entry
tw3 = FooWrapper5(rule_name="rule3")
tw3.relation_toolwrapper_to_fileioinfo.extend([f13, f23])
tw3.relation_toolwrapper_to_tableioinfo.append(t13)
tt1 = ToolWrapperThread(tw1)
tt2 = ToolWrapperThread(tw2)
tt3 = ToolWrapperThread(tw3)
tt1.start()
tt2.start()
tt3.start()
tt1.join()
tt2.join()
tt3.join()
self.assertEqual(len(SQLManager.instance().get_session().query(FooBase).filter(FooBase.name.like('Foowrapper5 - %')).all()), 3000)
def test_run_commit_vs_query(self):
# this tests does not work with mysql and postgresql
if not SQLManager.instance().engine.url.drivername in ['mysql', 'postgresql']:
input_entry = TypeInputOrOutput(is_input=True)
output_entry = TypeInputOrOutput(is_input=False)
f1 = FileInputOutputInformation(file_key="input1", path="resource/input_files/input_file1.txt")
f1.relation_file_or_tableioinfo_to_typeio = input_entry
t1 = TableInputOutputInformation(model_py_path="FooBase", table_key="FooBase", table_name="FooBase")
t1.set_table(FooBase)
t1.relation_file_or_tableioinfo_to_typeio = output_entry
timestamp_millis, timestamp_human = get_current_time()
modification_table_entry = TableModificationTime(mtime_epoch_millis=timestamp_millis, table_name=t1.table_name)
t1.modification = modification_table_entry
o1 = Option(name="rows", value="1000")
tw1 = Add(rule_name="rule1")
tw1.relation_toolwrapper_to_fileioinfo.append(f1)
tw1.relation_toolwrapper_to_tableioinfo.append(t1)
tw1.relation_toolwrapper_to_option.append(o1)
f12 = FileInputOutputInformation(file_key="input1", path="resource/input_files/input_file1.txt")
f12.relation_file_or_tableioinfo_to_typeio = input_entry
t12 = TableInputOutputInformation(model_py_path="FooBase", table_key="FooBase", table_name="FooBase")
t12.set_table(FooBase)
t12.relation_file_or_tableioinfo_to_typeio = output_entry
timestamp_millis, timestamp_human = get_current_time()
modification_table_entry = TableModificationTime(mtime_epoch_millis=timestamp_millis,
table_name=t12.table_name)
t12.modification = modification_table_entry
o12 = Option(name="rows", value="1000")
tw12 = Add(rule_name="rule1")
tw12.relation_toolwrapper_to_fileioinfo.append(f12)
tw12.relation_toolwrapper_to_tableioinfo.append(t12)
tw12.relation_toolwrapper_to_option.append(o12)
f13 = FileInputOutputInformation(file_key="input1", path="resource/input_files/input_file1.txt")
f13.relation_file_or_tableioinfo_to_typeio = input_entry
t13 = TableInputOutputInformation(model_py_path="FooBase", table_key="FooBase", table_name="FooBase")
t13.set_table(FooBase)
t13.relation_file_or_tableioinfo_to_typeio = output_entry
timestamp_millis, timestamp_human = get_current_time()
modification_table_entry = TableModificationTime(mtime_epoch_millis=timestamp_millis,
table_name=t13.table_name)
t13.modification = modification_table_entry
o13 = Option(name="rows", value="1000")
tw13 = Add(rule_name="rule1")
tw13.relation_toolwrapper_to_fileioinfo.append(f13)
tw13.relation_toolwrapper_to_tableioinfo.append(t13)
tw13.relation_toolwrapper_to_option.append(o13)
tt1 = ToolWrapperThread(tw1)
tt2 = ToolWrapperThread(tw12)
tt3 = ToolWrapperThread(tw13)
t21 = TableInputOutputInformation(model_py_path="FooBase", table_key="FooBase", table_name="FooBase")
t21.set_table(FooBase)
t21.relation_file_or_tableioinfo_to_typeio = input_entry
tw21 = Query(rule_name="rule1")
tw21.relation_toolwrapper_to_tableioinfo.append(t21)
t22 = TableInputOutputInformation(model_py_path="FooBase", table_key="FooBase", table_name="FooBase")
t22.set_table(FooBase)
t22.relation_file_or_tableioinfo_to_typeio = input_entry
tw22 = Query(rule_name="rule1")
tw22.relation_toolwrapper_to_tableioinfo.append(t22)
t23 = TableInputOutputInformation(model_py_path="FooBase", table_key="FooBase", table_name="FooBase")
t23.set_table(FooBase)
t23.relation_file_or_tableioinfo_to_typeio = input_entry
tw23 = Query(rule_name="rule1")
tw23.relation_toolwrapper_to_tableioinfo.append(t23)
tt4 = ToolWrapperThread(tw21)
tt5 = ToolWrapperThread(tw22)
tt6 = ToolWrapperThread(tw23)
tt4.start()
tt1.start()
tt2.start()
tt3.start()
time.sleep(5)
tt5.start()
tt6.start()
tt1.join()
tt2.join()
tt3.join()
tt4.join()
tt5.join()
tt6.join()
def tearDown(self):
SQLManager.instance().get_session().close()
SQLManager.instance().drop_all()
shutil.rmtree("wopmars/tests/outdir", ignore_errors=True)
if __name__ == '__main__':
unittest.main()
|
|
"""
Tests of the neo.core.irregularlysampledsignal.IrregularySampledSignal class
"""
import unittest
import os
import pickle
import warnings
from copy import deepcopy
import numpy as np
import quantities as pq
from numpy.testing import assert_array_equal
from neo.core.dataobject import ArrayDict
try:
from IPython.lib.pretty import pretty
except ImportError as err:
HAVE_IPYTHON = False
else:
HAVE_IPYTHON = True
try:
import scipy
except ImportError:
HAVE_SCIPY = False
else:
HAVE_SCIPY = True
from neo.core.irregularlysampledsignal import IrregularlySampledSignal
from neo.core import Segment, ChannelIndex
from neo.core.baseneo import MergeError
from neo.test.tools import (assert_arrays_almost_equal, assert_arrays_equal,
assert_neo_object_is_compliant, assert_same_sub_schema,
assert_same_attributes, assert_same_annotations,
assert_same_array_annotations)
from neo.test.generate_datasets import (get_fake_value, get_fake_values, fake_neo,
TEST_ANNOTATIONS)
class Test__generate_datasets(unittest.TestCase):
def setUp(self):
np.random.seed(0)
self.annotations = {
str(x): TEST_ANNOTATIONS[x] for x in range(len(TEST_ANNOTATIONS))}
def test__get_fake_values(self):
self.annotations['seed'] = 0
times = get_fake_value('times', pq.Quantity, seed=0, dim=1)
signal = get_fake_value('signal', pq.Quantity, seed=1, dim=2)
name = get_fake_value('name', str, seed=2, obj=IrregularlySampledSignal)
description = get_fake_value('description', str, seed=3, obj='IrregularlySampledSignal')
file_origin = get_fake_value('file_origin', str)
arr_ann = get_fake_value('array_annotations', dict, seed=5,
obj=IrregularlySampledSignal, n=1)
attrs1 = {'name': name, 'description': description, 'file_origin': file_origin}
attrs2 = attrs1.copy()
attrs2.update(self.annotations)
attrs2['array_annotations'] = arr_ann
res11 = get_fake_values(IrregularlySampledSignal, annotate=False, seed=0)
res12 = get_fake_values('IrregularlySampledSignal', annotate=False, seed=0)
res21 = get_fake_values(IrregularlySampledSignal, annotate=True, seed=0)
res22 = get_fake_values('IrregularlySampledSignal', annotate=True, seed=0)
assert_array_equal(res11.pop('times'), times)
assert_array_equal(res12.pop('times'), times)
assert_array_equal(res21.pop('times'), times)
assert_array_equal(res22.pop('times'), times)
assert_array_equal(res11.pop('signal'), signal)
assert_array_equal(res12.pop('signal'), signal)
assert_array_equal(res21.pop('signal'), signal)
assert_array_equal(res22.pop('signal'), signal)
self.assertEqual(res11, attrs1)
self.assertEqual(res12, attrs1)
# Array annotations need to be compared separately
# because numpy arrays define equality differently
arr_ann_res21 = res21.pop('array_annotations')
arr_ann_attrs2 = attrs2.pop('array_annotations')
self.assertEqual(res21, attrs2)
assert_arrays_equal(arr_ann_res21['valid'], arr_ann_attrs2['valid'])
assert_arrays_equal(arr_ann_res21['number'], arr_ann_attrs2['number'])
arr_ann_res22 = res22.pop('array_annotations')
self.assertEqual(res22, attrs2)
assert_arrays_equal(arr_ann_res22['valid'], arr_ann_attrs2['valid'])
assert_arrays_equal(arr_ann_res22['number'], arr_ann_attrs2['number'])
def test__fake_neo__cascade(self):
self.annotations['seed'] = None
obj_type = IrregularlySampledSignal
cascade = True
res = fake_neo(obj_type=obj_type, cascade=cascade)
self.assertTrue(isinstance(res, IrregularlySampledSignal))
assert_neo_object_is_compliant(res)
self.assertEqual(res.annotations, self.annotations)
def test__fake_neo__nocascade(self):
self.annotations['seed'] = None
obj_type = 'IrregularlySampledSignal'
cascade = False
res = fake_neo(obj_type=obj_type, cascade=cascade)
self.assertTrue(isinstance(res, IrregularlySampledSignal))
assert_neo_object_is_compliant(res)
self.assertEqual(res.annotations, self.annotations)
class TestIrregularlySampledSignalConstruction(unittest.TestCase):
def test_IrregularlySampledSignal_creation_times_units_signal_units(self):
params = {'test2': 'y1', 'test3': True}
arr_ann = {'anno1': [23], 'anno2': ['A']}
sig = IrregularlySampledSignal([1.1, 1.5, 1.7] * pq.ms, signal=[20., 40., 60.] * pq.mV,
name='test', description='tester', file_origin='test.file',
test1=1, array_annotations=arr_ann, **params)
sig.annotate(test1=1.1, test0=[1, 2])
assert_neo_object_is_compliant(sig)
assert_array_equal(sig.times, [1.1, 1.5, 1.7] * pq.ms)
assert_array_equal(np.asarray(sig).flatten(), np.array([20., 40., 60.]))
self.assertEqual(sig.units, pq.mV)
self.assertEqual(sig.name, 'test')
self.assertEqual(sig.description, 'tester')
self.assertEqual(sig.file_origin, 'test.file')
self.assertEqual(sig.annotations['test0'], [1, 2])
self.assertEqual(sig.annotations['test1'], 1.1)
self.assertEqual(sig.annotations['test2'], 'y1')
self.assertTrue(sig.annotations['test3'])
assert_arrays_equal(sig.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(sig.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(sig.array_annotations, ArrayDict)
def test_IrregularlySampledSignal_creation_units_arg(self):
params = {'test2': 'y1', 'test3': True}
arr_ann = {'anno1': [23], 'anno2': ['A']}
sig = IrregularlySampledSignal([1.1, 1.5, 1.7], signal=[20., 40., 60.], units=pq.V,
time_units=pq.s, name='test', description='tester',
file_origin='test.file', test1=1,
array_annotations=arr_ann, **params)
sig.annotate(test1=1.1, test0=[1, 2])
assert_neo_object_is_compliant(sig)
assert_array_equal(sig.times, [1.1, 1.5, 1.7] * pq.s)
assert_array_equal(np.asarray(sig).flatten(), np.array([20., 40., 60.]))
self.assertEqual(sig.units, pq.V)
self.assertEqual(sig.name, 'test')
self.assertEqual(sig.description, 'tester')
self.assertEqual(sig.file_origin, 'test.file')
self.assertEqual(sig.annotations['test0'], [1, 2])
self.assertEqual(sig.annotations['test1'], 1.1)
self.assertEqual(sig.annotations['test2'], 'y1')
self.assertTrue(sig.annotations['test3'])
assert_arrays_equal(sig.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(sig.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(sig.array_annotations, ArrayDict)
def test_IrregularlySampledSignal_creation_units_rescale(self):
params = {'test2': 'y1', 'test3': True}
arr_ann = {'anno1': [23], 'anno2': ['A']}
sig = IrregularlySampledSignal([1.1, 1.5, 1.7] * pq.s, signal=[2., 4., 6.] * pq.V,
units=pq.mV, time_units=pq.ms, name='test',
description='tester', file_origin='test.file', test1=1,
array_annotations=arr_ann, **params)
sig.annotate(test1=1.1, test0=[1, 2])
assert_neo_object_is_compliant(sig)
assert_array_equal(sig.times, [1100, 1500, 1700] * pq.ms)
assert_array_equal(np.asarray(sig).flatten(), np.array([2000., 4000., 6000.]))
self.assertEqual(sig.units, pq.mV)
self.assertEqual(sig.name, 'test')
self.assertEqual(sig.description, 'tester')
self.assertEqual(sig.file_origin, 'test.file')
self.assertEqual(sig.annotations['test0'], [1, 2])
self.assertEqual(sig.annotations['test1'], 1.1)
self.assertEqual(sig.annotations['test2'], 'y1')
self.assertTrue(sig.annotations['test3'])
assert_arrays_equal(sig.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(sig.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(sig.array_annotations, ArrayDict)
def test_IrregularlySampledSignal_different_lens_ValueError(self):
times = [1.1, 1.5, 1.7] * pq.ms
signal = [20., 40., 60., 70.] * pq.mV
self.assertRaises(ValueError, IrregularlySampledSignal, times, signal)
def test_IrregularlySampledSignal_no_signal_units_ValueError(self):
times = [1.1, 1.5, 1.7] * pq.ms
signal = [20., 40., 60.]
self.assertRaises(ValueError, IrregularlySampledSignal, times, signal)
def test_IrregularlySampledSignal_no_time_units_ValueError(self):
times = [1.1, 1.5, 1.7]
signal = [20., 40., 60.] * pq.mV
self.assertRaises(ValueError, IrregularlySampledSignal, times, signal)
class TestIrregularlySampledSignalProperties(unittest.TestCase):
def setUp(self):
self.times = [np.arange(10.0) * pq.s, np.arange(-100.0, 100.0, 10.0) * pq.ms,
np.arange(100) * pq.ns]
self.data = [np.arange(10.0) * pq.nA, np.arange(-100.0, 100.0, 10.0) * pq.mV,
np.random.uniform(size=100) * pq.uV]
self.signals = [IrregularlySampledSignal(t, signal=D, testattr='test') for D, t in
zip(self.data, self.times)]
def test__compliant(self):
for signal in self.signals:
assert_neo_object_is_compliant(signal)
def test__t_start_getter(self):
for signal, times in zip(self.signals, self.times):
self.assertAlmostEqual(signal.t_start, times[0], delta=1e-15)
def test__t_stop_getter(self):
for signal, times in zip(self.signals, self.times):
self.assertAlmostEqual(signal.t_stop, times[-1], delta=1e-15)
def test__duration_getter(self):
for signal, times in zip(self.signals, self.times):
self.assertAlmostEqual(signal.duration, times[-1] - times[0], delta=1e-15)
def test__sampling_intervals_getter(self):
for signal, times in zip(self.signals, self.times):
assert_arrays_almost_equal(signal.sampling_intervals, np.diff(times), threshold=1e-15)
def test_IrregularlySampledSignal_repr(self):
sig = IrregularlySampledSignal([1.1, 1.5, 1.7] * pq.s, signal=[2., 4., 6.] * pq.V,
name='test', description='tester', file_origin='test.file',
test1=1)
assert_neo_object_is_compliant(sig)
if np.__version__.split(".")[:2] > ['1', '13']:
# see https://github.com/numpy/numpy/blob/master/doc/release/1.14.0-notes.rst#many
# -changes-to-array-printing-disableable-with-the-new-legacy-printing-mode
targ = (
'<IrregularlySampledSignal(array([[2.],\n [4.],\n [6.]]) * V '
'' + 'at times [1.1 1.5 1.7] s)>')
else:
targ = (
'<IrregularlySampledSignal(array([[ 2.],\n [ 4.],\n [ 6.]]) '
'* V ' + 'at times [ 1.1 1.5 1.7] s)>')
res = repr(sig)
self.assertEqual(targ, res)
class TestIrregularlySampledSignalArrayMethods(unittest.TestCase):
def setUp(self):
self.data1 = np.arange(10.0)
self.data1quant = self.data1 * pq.mV
self.time1 = np.logspace(1, 5, 10)
self.time1quant = self.time1 * pq.ms
self.arr_ann = {'anno1': [23], 'anno2': ['A']}
self.signal1 = IrregularlySampledSignal(self.time1quant, signal=self.data1quant,
name='spam', description='eggs',
file_origin='testfile.txt', arg1='test',
array_annotations=self.arr_ann)
self.signal1.segment = Segment()
self.signal1.channel_index = ChannelIndex([0])
def test__compliant(self):
assert_neo_object_is_compliant(self.signal1)
self.assertEqual(self.signal1.name, 'spam')
self.assertEqual(self.signal1.description, 'eggs')
self.assertEqual(self.signal1.file_origin, 'testfile.txt')
self.assertEqual(self.signal1.annotations, {'arg1': 'test'})
assert_arrays_equal(self.signal1.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(self.signal1.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(self.signal1.array_annotations, ArrayDict)
def test__slice_should_return_IrregularlySampledSignal(self):
result = self.signal1[3:8]
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
self.assertEqual(result.size, 5)
self.assertEqual(result.t_start, self.time1quant[3])
self.assertEqual(result.t_stop, self.time1quant[7])
assert_array_equal(self.time1quant[3:8], result.times)
assert_array_equal(self.data1[3:8].reshape(-1, 1), result.magnitude)
# Test other attributes were copied over (in this case, defaults)
self.assertEqual(result.file_origin, self.signal1.file_origin)
self.assertEqual(result.name, self.signal1.name)
self.assertEqual(result.description, self.signal1.description)
self.assertEqual(result.annotations, self.signal1.annotations)
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
def test__getitem_should_return_single_quantity(self):
self.assertEqual(self.signal1[0], 0 * pq.mV)
self.assertEqual(self.signal1[9], 9 * pq.mV)
self.assertRaises(IndexError, self.signal1.__getitem__, 10)
def test__getitem_out_of_bounds_IndexError(self):
self.assertRaises(IndexError, self.signal1.__getitem__, 10)
def test_comparison_operators(self):
assert_array_equal(self.signal1 >= 5 * pq.mV, np.array(
[[False, False, False, False, False, True, True, True, True, True]]).T)
assert_array_equal(self.signal1 == 5 * pq.mV, np.array(
[[False, False, False, False, False, True, False, False, False, False]]).T)
assert_array_equal(self.signal1 == self.signal1, np.array(
[[True, True, True, True, True, True, True, True, True, True]]).T)
def test__comparison_as_indexing_single_trace(self):
self.assertEqual(self.signal1[self.signal1 == 5], [5 * pq.mV])
def test__comparison_as_indexing_multi_trace(self):
signal = IrregularlySampledSignal(self.time1quant, np.arange(20).reshape((-1, 2)) * pq.V)
assert_array_equal(signal[signal < 10],
np.array([[0, 2, 4, 6, 8], [1, 3, 5, 7, 9]]).T * pq.V)
def test__indexing_keeps_order_across_channels(self):
# AnalogSignals with 10 traces each having 5 samples (eg. data[0] = [0,10,20,30,40])
data = np.array([range(10), range(10, 20), range(20, 30), range(30, 40), range(40, 50)])
mask = np.full((5, 10), fill_value=False, dtype=bool)
# selecting one entry per trace
mask[[0, 1, 0, 3, 0, 2, 4, 3, 1, 4], range(10)] = True
signal = IrregularlySampledSignal(np.arange(5) * pq.s, np.array(data) * pq.V)
assert_array_equal(signal[mask], np.array([[0, 11, 2, 33, 4, 25, 46, 37, 18, 49]]) * pq.V)
def test__indexing_keeps_order_across_time(self):
# AnalogSignals with 10 traces each having 5 samples (eg. data[0] = [0,10,20,30,40])
data = np.array([range(10), range(10, 20), range(20, 30), range(30, 40), range(40, 50)])
mask = np.full((5, 10), fill_value=False, dtype=bool)
# selecting two entries per trace
temporal_ids = [0, 1, 0, 3, 1, 2, 4, 2, 1, 4] + [4, 3, 2, 1, 0, 1, 2, 3, 2, 1]
mask[temporal_ids, list(range(10)) + list(range(10))] = True
signal = IrregularlySampledSignal(np.arange(5) * pq.s, np.array(data) * pq.V)
assert_array_equal(signal[mask], np.array([[0, 11, 2, 13, 4, 15, 26, 27, 18, 19],
[40, 31, 22, 33, 14, 25, 46, 37, 28,
49]]) * pq.V)
def test__comparison_with_inconsistent_units_should_raise_Exception(self):
self.assertRaises(ValueError, self.signal1.__gt__, 5 * pq.nA)
def test_simple_statistics(self):
targmean = self.signal1[:-1] * np.diff(self.time1quant).reshape(-1, 1)
targmean = targmean.sum() / (self.time1quant[-1] - self.time1quant[0])
self.assertEqual(self.signal1.max(), 9 * pq.mV)
self.assertEqual(self.signal1.min(), 0 * pq.mV)
self.assertEqual(self.signal1.mean(), targmean)
def test_mean_interpolation_NotImplementedError(self):
self.assertRaises(NotImplementedError, self.signal1.mean, True)
def test__rescale_same(self):
result = self.signal1.copy()
result = result.rescale(pq.mV)
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
self.assertEqual(result.units, 1 * pq.mV)
assert_array_equal(result.magnitude, self.data1.reshape(-1, 1))
assert_array_equal(result.times, self.time1quant)
assert_same_sub_schema(result, self.signal1)
self.assertIsInstance(result.channel_index, ChannelIndex)
self.assertIsInstance(result.segment, Segment)
self.assertIs(result.channel_index, self.signal1.channel_index)
self.assertIs(result.segment, self.signal1.segment)
def test__rescale_new(self):
result = self.signal1.copy()
result = result.rescale(pq.uV)
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
self.assertEqual(result.units, 1 * pq.uV)
assert_arrays_almost_equal(np.array(result), self.data1.reshape(-1, 1) * 1000., 1e-10)
assert_array_equal(result.times, self.time1quant)
self.assertIsInstance(result.channel_index, ChannelIndex)
self.assertIsInstance(result.segment, Segment)
self.assertIs(result.channel_index, self.signal1.channel_index)
self.assertIs(result.segment, self.signal1.segment)
def test__rescale_new_incompatible_ValueError(self):
self.assertRaises(ValueError, self.signal1.rescale, pq.nA)
def test_time_slice(self):
targdataquant = [[1.0], [2.0], [3.0]] * pq.mV
targtime = np.logspace(1, 5, 10)
targtimequant = targtime[1:4] * pq.ms
targ_signal = IrregularlySampledSignal(targtimequant, signal=targdataquant, name='spam',
description='eggs', file_origin='testfile.txt',
arg1='test')
t_start = 15
t_stop = 250
result = self.signal1.time_slice(t_start, t_stop)
assert_array_equal(result, targ_signal)
assert_array_equal(result.times, targtimequant)
self.assertEqual(result.units, 1 * pq.mV)
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
def test__time_slice_deepcopy_annotations(self):
params1 = {'test0': 'y1', 'test1': ['deeptest'], 'test2': True}
self.signal1.annotate(**params1)
result = self.signal1.time_slice(None, None)
# Change annotations of original
params2 = {'test0': 'y2', 'test2': False}
self.signal1.annotate(**params2)
self.signal1.annotations['test1'][0] = 'shallowtest'
self.assertNotEqual(self.signal1.annotations['test0'], result.annotations['test0'])
self.assertNotEqual(self.signal1.annotations['test1'], result.annotations['test1'])
self.assertNotEqual(self.signal1.annotations['test2'], result.annotations['test2'])
# Change annotations of result
params3 = {'test0': 'y3'}
result.annotate(**params3)
result.annotations['test1'][0] = 'shallowtest2'
self.assertNotEqual(self.signal1.annotations['test0'], result.annotations['test0'])
self.assertNotEqual(self.signal1.annotations['test1'], result.annotations['test1'])
self.assertNotEqual(self.signal1.annotations['test2'], result.annotations['test2'])
def test__time_slice_deepcopy_array_annotations(self):
length = self.signal1.shape[-1]
params1 = {'test0': ['y{}'.format(i) for i in range(length)],
'test1': ['deeptest' for i in range(length)],
'test2': [(-1)**i > 0 for i in range(length)]}
self.signal1.array_annotate(**params1)
result = self.signal1.time_slice(None, None)
# Change annotations of original
params2 = {'test0': ['x{}'.format(i) for i in range(length)],
'test2': [(-1) ** (i + 1) > 0 for i in range(length)]}
self.signal1.array_annotate(**params2)
self.signal1.array_annotations['test1'][0] = 'shallowtest'
self.assertFalse(all(self.signal1.array_annotations['test0']
== result.array_annotations['test0']))
self.assertFalse(all(self.signal1.array_annotations['test1']
== result.array_annotations['test1']))
self.assertFalse(all(self.signal1.array_annotations['test2']
== result.array_annotations['test2']))
# Change annotations of result
params3 = {'test0': ['z{}'.format(i) for i in range(1, result.shape[-1]+1)]}
result.array_annotate(**params3)
result.array_annotations['test1'][0] = 'shallow2'
self.assertFalse(all(self.signal1.array_annotations['test0']
== result.array_annotations['test0']))
self.assertFalse(all(self.signal1.array_annotations['test1']
== result.array_annotations['test1']))
self.assertFalse(all(self.signal1.array_annotations['test2']
== result.array_annotations['test2']))
def test__time_slice_deepcopy_data(self):
result = self.signal1.time_slice(None, None)
# Change values of original array
self.signal1[2] = 7.3*self.signal1.units
self.assertFalse(all(self.signal1 == result))
# Change values of sliced array
result[3] = 9.5*result.units
self.assertFalse(all(self.signal1 == result))
def test_time_slice_out_of_boundries(self):
targdataquant = self.data1quant
targtimequant = self.time1quant
targ_signal = IrregularlySampledSignal(targtimequant, signal=targdataquant, name='spam',
description='eggs', file_origin='testfile.txt',
arg1='test')
t_start = 0
t_stop = 2500000
result = self.signal1.time_slice(t_start, t_stop)
assert_array_equal(result, targ_signal)
assert_array_equal(result.times, targtimequant)
self.assertEqual(result.units, 1 * pq.mV)
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
def test_time_slice_empty(self):
targdataquant = [] * pq.mV
targtimequant = [] * pq.ms
targ_signal = IrregularlySampledSignal(targtimequant, signal=targdataquant, name='spam',
description='eggs', file_origin='testfile.txt',
arg1='test')
t_start = 15
t_stop = 250
result = targ_signal.time_slice(t_start, t_stop)
assert_array_equal(result, targ_signal)
assert_array_equal(result.times, targtimequant)
self.assertEqual(result.units, 1 * pq.mV)
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
self.assertEqual(result.array_annotations, {})
self.assertIsInstance(result.array_annotations, ArrayDict)
def test_time_slice_none_stop(self):
targdataquant = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0], [7.0], [8.0], [9.0]] * pq.mV
targtime = np.logspace(1, 5, 10)
targtimequant = targtime[1:10] * pq.ms
targ_signal = IrregularlySampledSignal(targtimequant, signal=targdataquant, name='spam',
description='eggs', file_origin='testfile.txt',
arg1='test')
t_start = 15
t_stop = None
result = self.signal1.time_slice(t_start, t_stop)
assert_array_equal(result, targ_signal)
assert_array_equal(result.times, targtimequant)
self.assertEqual(result.units, 1 * pq.mV)
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
def test_time_slice_none_start(self):
targdataquant = [[0.0], [1.0], [2.0], [3.0]] * pq.mV
targtime = np.logspace(1, 5, 10)
targtimequant = targtime[0:4] * pq.ms
targ_signal = IrregularlySampledSignal(targtimequant, signal=targdataquant, name='spam',
description='eggs', file_origin='testfile.txt',
arg1='test')
t_start = None
t_stop = 250
result = self.signal1.time_slice(t_start, t_stop)
assert_array_equal(result, targ_signal)
assert_array_equal(result.times, targtimequant)
self.assertEqual(result.units, 1 * pq.mV)
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
def test_time_slice_none_both(self):
targdataquant = [[0.0], [1.0], [2.0], [3.0], [4.0], [5.0], [6.0], [7.0], [8.0],
[9.0]] * pq.mV
targtime = np.logspace(1, 5, 10)
targtimequant = targtime[0:10] * pq.ms
targ_signal = IrregularlySampledSignal(targtimequant, signal=targdataquant, name='spam',
description='eggs', file_origin='testfile.txt',
arg1='test')
t_start = None
t_stop = None
result = self.signal1.time_slice(t_start, t_stop)
assert_array_equal(result, targ_signal)
assert_array_equal(result.times, targtimequant)
self.assertEqual(result.units, 1 * pq.mV)
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
def test_time_slice_differnt_units(self):
targdataquant = [[1.0], [2.0], [3.0]] * pq.mV
targtime = np.logspace(1, 5, 10)
targtimequant = targtime[1:4] * pq.ms
targ_signal = IrregularlySampledSignal(targtimequant, signal=targdataquant, name='spam',
description='eggs', file_origin='testfile.txt',
arg1='test')
t_start = 15
t_stop = 250
t_start = 0.015 * pq.s
t_stop = .250 * pq.s
result = self.signal1.time_slice(t_start, t_stop)
assert_array_equal(result, targ_signal)
assert_array_equal(result.times, targtimequant)
self.assertEqual(result.units, 1 * pq.mV)
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
def test__time_slice_should_set_parents_to_None(self):
# When timeslicing, a deep copy is made,
# thus the reference to parent objects should be destroyed
result = self.signal1.time_slice(1 * pq.ms, 3 * pq.ms)
self.assertEqual(result.segment, None)
self.assertEqual(result.channel_index, None)
def test__deepcopy_should_set_parents_objects_to_None(self):
# Deepcopy should destroy references to parents
result = deepcopy(self.signal1)
self.assertEqual(result.segment, None)
self.assertEqual(result.channel_index, None)
def test__time_shift_same_attributes(self):
result = self.signal1.time_shift(1 * pq.ms)
assert_same_attributes(result, self.signal1, exclude=['times', 't_start', 't_stop'])
def test__time_shift_same_annotations(self):
result = self.signal1.time_shift(1 * pq.ms)
assert_same_annotations(result, self.signal1)
def test__time_shift_same_array_annotations(self):
result = self.signal1.time_shift(1 * pq.ms)
assert_same_array_annotations(result, self.signal1)
def test__time_shift_should_set_parents_to_None(self):
# When time-shifting, a deep copy is made,
# thus the reference to parent objects should be destroyed
result = self.signal1.time_shift(1 * pq.ms)
self.assertEqual(result.segment, None)
self.assertEqual(result.channel_index, None)
def test__time_shift_by_zero(self):
shifted = self.signal1.time_shift(0 * pq.ms)
assert_arrays_equal(shifted.times, self.signal1.times)
def test__time_shift_same_units(self):
shifted = self.signal1.time_shift(10 * pq.ms)
assert_arrays_equal(shifted.times, self.signal1.times + 10 * pq.ms)
def test__time_shift_different_units(self):
shifted = self.signal1.time_shift(1 * pq.s)
assert_arrays_equal(shifted.times, self.signal1.times + 1000 * pq.ms)
def test_as_array(self):
sig_as_arr = self.signal1.as_array()
self.assertIsInstance(sig_as_arr, np.ndarray)
assert_array_equal(self.data1, sig_as_arr.flat)
def test_as_quantity(self):
sig_as_q = self.signal1.as_quantity()
self.assertIsInstance(sig_as_q, pq.Quantity)
assert_array_equal(self.data1, sig_as_q.magnitude.flat)
def test__copy_should_preserve_parent_objects(self):
result = self.signal1.copy()
self.assertIs(result.segment, self.signal1.segment)
self.assertIs(result.channel_index, self.signal1.channel_index)
@unittest.skipUnless(HAVE_SCIPY, "requires Scipy")
def test_resample(self):
factors = [1, 2, 10]
for factor in factors:
result = self.signal1.resample(self.signal1.shape[0] * factor)
np.testing.assert_allclose(self.signal1.magnitude, result.magnitude[::factor],
rtol=1e-7, atol=0)
class TestIrregularlySampledSignalCombination(unittest.TestCase):
def setUp(self):
self.data1 = np.arange(10.0)
self.data1quant = self.data1 * pq.mV
self.time1 = np.logspace(1, 5, 10)
self.time1quant = self.time1 * pq.ms
self.arr_ann = {'anno1': [23], 'anno2': ['A']}
self.signal1 = IrregularlySampledSignal(self.time1quant, signal=self.data1quant,
name='spam', description='eggs',
file_origin='testfile.txt', arg1='test',
array_annotations=self.arr_ann)
def test__compliant(self):
assert_neo_object_is_compliant(self.signal1)
self.assertEqual(self.signal1.name, 'spam')
self.assertEqual(self.signal1.description, 'eggs')
self.assertEqual(self.signal1.file_origin, 'testfile.txt')
self.assertEqual(self.signal1.annotations, {'arg1': 'test'})
assert_arrays_equal(self.signal1.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(self.signal1.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(self.signal1.array_annotations, ArrayDict)
def test__add_const_quantity_should_preserve_data_complement(self):
result = self.signal1 + 0.065 * pq.V
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
assert_array_equal(result.magnitude, self.data1.reshape(-1, 1) + 65)
assert_array_equal(result.times, self.time1quant)
self.assertEqual(self.signal1[9], 9 * pq.mV)
self.assertEqual(result[9], 74 * pq.mV)
def test__add_two_consistent_signals_should_preserve_data_complement(self):
data2 = np.arange(10.0, 20.0)
data2quant = data2 * pq.mV
signal2 = IrregularlySampledSignal(self.time1quant, signal=data2quant)
assert_neo_object_is_compliant(signal2)
result = self.signal1 + signal2
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
targ = IrregularlySampledSignal(self.time1quant, signal=np.arange(10.0, 30.0, 2.0),
units="mV", name='spam', description='eggs',
file_origin='testfile.txt', arg1='test')
assert_neo_object_is_compliant(targ)
assert_array_equal(result, targ)
assert_array_equal(self.time1quant, targ.times)
assert_array_equal(result.times, targ.times)
assert_same_sub_schema(result, targ)
def test__add_signals_with_inconsistent_times_AssertionError(self):
signal2 = IrregularlySampledSignal(self.time1quant * 2., signal=np.arange(10.0),
units="mV")
assert_neo_object_is_compliant(signal2)
self.assertRaises(ValueError, self.signal1.__add__, signal2)
def test__add_signals_with_inconsistent_dimension_ValueError(self):
signal2 = np.arange(20).reshape(2, 10)
self.assertRaises(ValueError, self.signal1.__add__, signal2)
def test__subtract_const_should_preserve_data_complement(self):
result = self.signal1 - 65 * pq.mV
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
self.assertEqual(self.signal1[9], 9 * pq.mV)
self.assertEqual(result[9], -56 * pq.mV)
assert_array_equal(result.magnitude, (self.data1 - 65).reshape(-1, 1))
assert_array_equal(result.times, self.time1quant)
def test__subtract_from_const_should_return_signal(self):
result = 10 * pq.mV - self.signal1
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
self.assertEqual(self.signal1[9], 9 * pq.mV)
self.assertEqual(result[9], 1 * pq.mV)
assert_array_equal(result.magnitude, (10 - self.data1).reshape(-1, 1))
assert_array_equal(result.times, self.time1quant)
def test__mult_signal_by_const_float_should_preserve_data_complement(self):
result = self.signal1 * 2.
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
self.assertEqual(self.signal1[9], 9 * pq.mV)
self.assertEqual(result[9], 18 * pq.mV)
assert_array_equal(result.magnitude, self.data1.reshape(-1, 1) * 2)
assert_array_equal(result.times, self.time1quant)
def test__mult_signal_by_const_array_should_preserve_data_complement(self):
result = self.signal1 * np.array(2.)
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
self.assertEqual(self.signal1[9], 9 * pq.mV)
self.assertEqual(result[9], 18 * pq.mV)
assert_array_equal(result.magnitude, self.data1.reshape(-1, 1) * 2)
assert_array_equal(result.times, self.time1quant)
def test__divide_signal_by_const_should_preserve_data_complement(self):
result = self.signal1 / 0.5
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
self.assertEqual(self.signal1[9], 9 * pq.mV)
self.assertEqual(result[9], 18 * pq.mV)
assert_array_equal(result.magnitude, self.data1.reshape(-1, 1) / 0.5)
assert_array_equal(result.times, self.time1quant)
@unittest.skipUnless(HAVE_IPYTHON, "requires IPython")
def test__pretty(self):
res = pretty(self.signal1)
signal = self.signal1
targ = (("IrregularlySampledSignal with %d channels of length %d; units %s; datatype %s \n"
"" % (signal.shape[1], signal.shape[0], signal.units.dimensionality.unicode,
signal.dtype))
+ ("name: '{}'\ndescription: '{}'\n".format(signal.name, signal.description))
+ ("annotations: %s\n" % str(signal.annotations))
+ ("sample times: {}".format(signal.times[:10])))
self.assertEqual(res, targ)
def test__merge(self):
data1 = np.arange(1000.0, 1066.0).reshape((11, 6)) * pq.uV
data2 = np.arange(2.0, 2.033, 0.001).reshape((11, 3)) * pq.mV
times1 = np.arange(11.0) * pq.ms
times2 = np.arange(1.0, 12.0) * pq.ms
arr_ann1 = {'anno1': np.arange(6), 'anno2': ['a', 'b', 'c', 'd', 'e', 'f']}
arr_ann2 = {'anno1': np.arange(100, 103), 'anno3': []}
signal1 = IrregularlySampledSignal(times1, data1, name='signal1',
description='test signal', file_origin='testfile.txt',
array_annotations=arr_ann1)
signal2 = IrregularlySampledSignal(times1, data2, name='signal2',
description='test signal', file_origin='testfile.txt',
array_annotations=arr_ann2)
signal3 = IrregularlySampledSignal(times2, data2, name='signal3',
description='test signal', file_origin='testfile.txt')
with warnings.catch_warnings(record=True) as w:
merged12 = signal1.merge(signal2)
self.assertTrue(len(w) == 1)
self.assertEqual(w[0].category, UserWarning)
self.assertSequenceEqual(str(w[0].message), "The following array annotations were "
"omitted, because they were only present"
" in one of the merged objects: "
"['anno2'] from the one that was merged "
"into and ['anno3'] from the one that "
"was merged into the other")
target_data12 = np.hstack([data1, data2.rescale(pq.uV)])
assert_neo_object_is_compliant(signal1)
assert_neo_object_is_compliant(signal2)
assert_neo_object_is_compliant(merged12)
self.assertAlmostEqual(merged12[5, 0], 1030.0 * pq.uV, 9)
self.assertAlmostEqual(merged12[5, 6], 2015.0 * pq.uV, 9)
self.assertEqual(merged12.name, 'merge(signal1, signal2)')
self.assertEqual(merged12.file_origin, 'testfile.txt')
assert_arrays_equal(merged12.array_annotations['anno1'],
np.array([0, 1, 2, 3, 4, 5, 100, 101, 102]))
self.assertIsInstance(merged12.array_annotations, ArrayDict)
assert_arrays_equal(merged12.magnitude, target_data12)
self.assertRaises(MergeError, signal1.merge, signal3)
class TestAnalogSignalFunctions(unittest.TestCase):
def test__pickle(self):
signal1 = IrregularlySampledSignal(np.arange(10.0) / 100 * pq.s, np.arange(10.0),
units="mV")
fobj = open('./pickle', 'wb')
pickle.dump(signal1, fobj)
fobj.close()
fobj = open('./pickle', 'rb')
try:
signal2 = pickle.load(fobj)
except ValueError:
signal2 = None
assert_array_equal(signal1, signal2)
fobj.close()
os.remove('./pickle')
class TestIrregularlySampledSignalEquality(unittest.TestCase):
def test__signals_with_different_times_should_be_not_equal(self):
signal1 = IrregularlySampledSignal(np.arange(10.0) / 100 * pq.s, np.arange(10.0),
units="mV")
signal2 = IrregularlySampledSignal(np.arange(10.0) / 100 * pq.ms, np.arange(10.0),
units="mV")
self.assertNotEqual(signal1, signal2)
if __name__ == "__main__":
unittest.main()
|
|
#!/usr/bin/python
import sys
import deproxy
import unittest
import threading
import logging
import socket
import argparse
import time
deproxy_port_base = 9999
deproxy_port_iter = None
def get_next_deproxy_port():
global deproxy_port_iter
if deproxy_port_iter is None:
def deproxy_port_iter_func():
for i in xrange(deproxy_port_base):
yield deproxy_port_base - i
deproxy_port_iter = deproxy_port_iter_func().next
return deproxy_port_iter()
class TestDefaultHandler(unittest.TestCase):
def setUp(self):
self.deproxy_port = get_next_deproxy_port()
self.deproxy = deproxy.Deproxy()
self.end_point = self.deproxy.add_endpoint(self.deproxy_port)
def tearDown(self):
self.deproxy.shutdown_all_endpoints()
def test_default_handler(self):
mc = self.deproxy.make_request('http://localhost:%i/' %
self.deproxy_port)
self.assertEquals(int(mc.received_response.code), 200)
class TestEchoHandler(unittest.TestCase):
def setUp(self):
self.deproxy_port = get_next_deproxy_port()
self.deproxy = deproxy.Deproxy()
self.end_point = self.deproxy.add_endpoint(self.deproxy_port)
def tearDown(self):
self.deproxy.shutdown_all_endpoints()
def test_echo_handler(self):
headers = {'x-header': '12345'}
mc = self.deproxy.make_request('http://localhost:%i/' %
self.deproxy_port, headers=headers,
request_body='this is the body',
default_handler=deproxy.echo_handler)
self.assertEquals(int(mc.received_response.code), 200)
self.assertIn('x-header', mc.received_response.headers)
self.assertEquals(mc.received_response.headers['x-header'], '12345')
self.assertEquals(mc.received_response.body, 'this is the body')
class TestDelayHandler(unittest.TestCase):
def setUp(self):
self.deproxy_port = get_next_deproxy_port()
self.deproxy = deproxy.Deproxy()
self.end_point = self.deproxy.add_endpoint(self.deproxy_port)
def tearDown(self):
self.deproxy.shutdown_all_endpoints()
def test_delay_handler(self):
handler = deproxy.delay(3, deproxy.simple_handler)
t1 = time.time()
mc = self.deproxy.make_request('http://localhost:%i/' %
self.deproxy_port,
default_handler=handler)
t2 = time.time()
self.assertEquals(int(mc.received_response.code), 200)
self.assertGreaterEqual(t2 - t1, 3)
self.assertLessEqual(t2 - t1, 3.5)
class TestRoute(unittest.TestCase):
def setUp(self):
self.deproxy_port = get_next_deproxy_port()
self.deproxy = deproxy.Deproxy()
self.end_point = self.deproxy.add_endpoint(self.deproxy_port)
def tearDown(self):
self.deproxy.shutdown_all_endpoints()
def test_route(self):
handler = deproxy.route('http', 'httpbin.org', self.deproxy)
mc = self.deproxy.make_request('http://localhost:%i/' %
self.deproxy_port,
default_handler=handler)
self.assertEquals(int(mc.received_response.code), 200)
class TestCustomHandlers(unittest.TestCase):
def setUp(self):
self.deproxy_port = get_next_deproxy_port()
self.deproxy = deproxy.Deproxy()
self.end_point = self.deproxy.add_endpoint(self.deproxy_port)
def tearDown(self):
self.deproxy.shutdown_all_endpoints()
def test_custom_handler_function(self):
def custom_handler(request):
return deproxy.Response(code=606, message="Spoiler",
headers={"Header-Name": "Header-Value"},
body='Snape Kills Dumbledore')
mc = self.deproxy.make_request('http://localhost:%i/' %
self.deproxy_port,
default_handler=custom_handler)
self.assertEquals(int(mc.received_response.code), 606)
def handler_method(self, request):
return deproxy.Response(code=606, message="Spoiler",
headers={"Header-Name": "Header-Value"},
body='Snape Kills Dumbledore')
def test_custom_handler_method(self):
mc = self.deproxy.make_request('http://localhost:%i/' %
self.deproxy_port,
default_handler=self.handler_method)
self.assertEquals(int(mc.received_response.code), 606)
class TestEndpointDefaultHandler(unittest.TestCase):
def setUp(self):
self.port = get_next_deproxy_port()
self.deproxy = deproxy.Deproxy()
def test_endpoint_default_handler_function(self):
def custom_handler(request):
return deproxy.Response(code='601', message='Custom', headers={},
body=None)
self.deproxy.add_endpoint(port=self.port,
default_handler=custom_handler)
url = 'http://localhost:{0}/'.format(self.port)
mc = self.deproxy.make_request(url=url)
self.assertEqual(len(mc.handlings), 1)
self.assertEqual(mc.handlings[0].response.code, '601')
self.assertEqual(mc.received_response.code, '601')
def custom_handler_method(self, request):
return deproxy.Response(code='602', message='Custom', headers={},
body=None)
def test_endpoint_default_handler_method(self):
self.deproxy.add_endpoint(port=self.port,
default_handler=self.custom_handler_method)
url = 'http://localhost:{0}/'.format(self.port)
mc = self.deproxy.make_request(url=url)
self.assertEqual(len(mc.handlings), 1)
self.assertEqual(mc.handlings[0].response.code, '602')
self.assertEqual(mc.received_response.code, '602')
def tearDown(self):
self.deproxy.shutdown_all_endpoints()
class TestDeproxyDefaultHandler(unittest.TestCase):
def setUp(self):
self.port = get_next_deproxy_port()
def test_deproxy_default_handler_function(self):
def custom_handler(request):
return deproxy.Response(code='603', message='Custom', headers={},
body=None)
self.deproxy = deproxy.Deproxy(default_handler=custom_handler)
self.deproxy.add_endpoint(port=self.port)
url = 'http://localhost:{0}/'.format(self.port)
mc = self.deproxy.make_request(url=url)
self.assertEqual(len(mc.handlings), 1)
self.assertEqual(mc.handlings[0].response.code, '603')
self.assertEqual(mc.received_response.code, '603')
def custom_handler_method(self, request):
return deproxy.Response(code='604', message='Custom', headers={},
body=None)
def test_deproxy_default_handler_method(self):
self.deproxy = deproxy.Deproxy(
default_handler=self.custom_handler_method)
self.deproxy.add_endpoint(port=self.port)
url = 'http://localhost:{0}/'.format(self.port)
mc = self.deproxy.make_request(url=url)
self.assertEqual(len(mc.handlings), 1)
self.assertEqual(mc.handlings[0].response.code, '604')
self.assertEqual(mc.received_response.code, '604')
def tearDown(self):
if hasattr(self, 'deproxy') and self.deproxy is not None:
self.deproxy.shutdown_all_endpoints()
class TestOrphanedHandlings(unittest.TestCase):
def setUp(self):
self.deproxy_port = get_next_deproxy_port()
self.deproxy = deproxy.Deproxy()
self.end_point = self.deproxy.add_endpoint(self.deproxy_port)
self.other_client = deproxy.Deproxy()
def tearDown(self):
self.deproxy.shutdown_all_endpoints()
def test_orphaned_handling(self):
delayed_handler = deproxy.delay(2, deproxy.simple_handler)
self.long_running_mc = None
class Helper:
mc = None
helper = Helper()
def other_thread():
mc = self.deproxy.make_request('http://localhost:%i/' %
self.deproxy_port,
default_handler=delayed_handler)
helper.mc = mc
t = threading.Thread(target=other_thread)
t.daemon = True
t.start()
self.other_client.make_request('http://localhost:%i/' %
self.deproxy_port)
t.join()
self.assertEqual(len(helper.mc.orphaned_handlings), 1)
class TestEndpointShutdown(unittest.TestCase):
def setUp(self):
self.deproxy_port1 = get_next_deproxy_port()
self.deproxy_port2 = get_next_deproxy_port()
self.deproxy = deproxy.Deproxy()
def test_shutdown(self):
e1 = self.deproxy.add_endpoint(self.deproxy_port1)
e2 = self.deproxy.add_endpoint(self.deproxy_port2)
e1.shutdown()
try:
e3 = self.deproxy.add_endpoint(self.deproxy_port1)
except socket.error as e:
self.fail('Address already in use: %s' % e)
class TestShutdownAllEndpoints(unittest.TestCase):
def setUp(self):
self.deproxy_port1 = get_next_deproxy_port()
self.deproxy_port2 = get_next_deproxy_port()
self.deproxy = deproxy.Deproxy()
def test_shutdown(self):
e1 = self.deproxy.add_endpoint(self.deproxy_port1)
e2 = self.deproxy.add_endpoint(self.deproxy_port2)
self.deproxy.shutdown_all_endpoints()
try:
e3 = self.deproxy.add_endpoint(self.deproxy_port1)
except socket.error as e:
self.fail('add_endpoint through an exception: %s' % e)
try:
e4 = self.deproxy.add_endpoint(self.deproxy_port2)
except socket.error as e:
self.fail('add_endpoint through an exception: %s' % e)
class TestAutomaticRequestHeaders(unittest.TestCase):
def setUp(self):
self.port = get_next_deproxy_port()
self.deproxy = deproxy.Deproxy()
self.endpoint = self.deproxy.add_endpoint(self.port)
self.url = 'http://localhost:{}/'.format(self.port)
def tearDown(self):
if self.deproxy is not None:
self.deproxy.shutdown_all_endpoints()
def test_not_specified(self):
mc = self.deproxy.make_request(url=self.url)
self.assertIn('Host', mc.sent_request.headers)
#self.assertIn('host', mc.sent_request.headers)
self.assertIn('Accept', mc.sent_request.headers)
self.assertIn('Accept-Encoding', mc.sent_request.headers)
self.assertIn('User-Agent', mc.sent_request.headers)
def test_explicit_on(self):
mc = self.deproxy.make_request(url=self.url, add_default_headers=True)
self.assertIn('Host', mc.sent_request.headers)
#self.assertIn('host', mc.sent_request.headers)
self.assertIn('Accept', mc.sent_request.headers)
self.assertIn('Accept-Encoding', mc.sent_request.headers)
self.assertIn('User-Agent', mc.sent_request.headers)
def test_explicit_off(self):
mc = self.deproxy.make_request(url=self.url, add_default_headers=False)
self.assertNotIn('Host', mc.sent_request.headers)
#self.assertNotIn('host', mc.sent_request.headers)
self.assertNotIn('Accept', mc.sent_request.headers)
self.assertNotIn('Accept-Encoding', mc.sent_request.headers)
self.assertNotIn('User-Agent', mc.sent_request.headers)
class TestDefaultResponseHeaders(unittest.TestCase):
@classmethod
def setUpClass(self):
self.port = get_next_deproxy_port()
self.deproxy = deproxy.Deproxy()
self.endpoint = self.deproxy.add_endpoint(self.port)
self.url = 'http://localhost:{}/'.format(self.port)
@classmethod
def tearDownClass(self):
if self.deproxy is not None:
self.deproxy.shutdown_all_endpoints()
def handler1(self, request):
return deproxy.Response(code=606, message="Spoiler",
headers={"Header-Name": "Header-Value"},
body='Snape Kills Dumbledore')
def handler2(self, request):
return (deproxy.Response(code=606, message="Spoiler",
headers={"Header-Name": "Header-Value"},
body='Snape Kills Dumbledore'), True)
def handler3(self, request):
return (deproxy.Response(code=606, message="Spoiler",
headers={"Header-Name": "Header-Value"},
body='Snape Kills Dumbledore'), False)
def test_not_specified(self):
mc = self.deproxy.make_request(url=self.url,
default_handler=self.handler1)
self.assertEqual(len(mc.handlings), 1)
self.assertIn('server', mc.received_response.headers)
self.assertIn('date', mc.received_response.headers)
self.assertIn('Server', mc.handlings[0].response.headers)
self.assertIn('Date', mc.handlings[0].response.headers)
def test_explicit_on(self):
mc = self.deproxy.make_request(url=self.url,
default_handler=self.handler2)
self.assertEqual(len(mc.handlings), 1)
self.assertIn('server', mc.received_response.headers)
self.assertIn('date', mc.received_response.headers)
self.assertIn('Server', mc.handlings[0].response.headers)
self.assertIn('Date', mc.handlings[0].response.headers)
def test_explicit_off(self):
mc = self.deproxy.make_request(url=self.url,
default_handler=self.handler3)
self.assertEqual(len(mc.handlings), 1)
self.assertNotIn('server', mc.received_response.headers)
self.assertNotIn('date', mc.received_response.headers)
self.assertNotIn('server', mc.handlings[0].response.headers)
self.assertNotIn('date', mc.handlings[0].response.headers)
self.assertNotIn('Server', mc.received_response.headers)
self.assertNotIn('Date', mc.received_response.headers)
self.assertNotIn('Server', mc.handlings[0].response.headers)
self.assertNotIn('Date', mc.handlings[0].response.headers)
class TestHeaderCollection(unittest.TestCase):
def setUp(self):
self.headers = deproxy.HeaderCollection()
def test_length(self):
self.assertEqual(len(self.headers), 0)
self.headers.add('Name', 'Value')
self.assertEqual(len(self.headers), 1)
def test_contains(self):
self.headers.add('Name', 'Value')
self.assertTrue('Name' in self.headers)
def test_contains_case(self):
self.headers.add('Name', 'Value')
self.assertTrue('name' in self.headers)
def test_assertIn_case(self):
self.headers.add('Name', 'Value')
self.assertIn('name', self.headers)
def test_find_all(self):
self.headers.add('A', 'qwerty')
self.headers.add('B', 'asdf')
self.headers.add('C', 'zxcv')
self.headers.add('A', 'uiop')
self.headers.add('A', 'jkl;')
result = [value for value in self.headers.find_all('A')]
self.assertEqual(result, ['qwerty', 'uiop', 'jkl;'])
def test_bracket_case(self):
self.headers.add('Name', 'Value')
try:
self.assertEqual(self.headers['name'], 'Value')
except:
self.fail()
def test_get(self):
self.headers.add('Name', 'Value')
self.assertIn('name', self.headers)
self.assertEqual(self.headers.get('Name'), 'Value')
self.assertEqual(self.headers.get('name'), 'Value')
self.assertIsNone(self.headers.get('asdf'))
self.assertEqual(self.headers.get('name', default='zxcv'), 'Value')
self.assertEqual(self.headers.get('asdf', default='zxcv'), 'zxcv')
class TestBodies(unittest.TestCase):
def setUp(self):
self.deproxy = deproxy.Deproxy()
self.port = get_next_deproxy_port()
self.deproxy.add_endpoint(self.port)
self.url = 'http://localhost:{0}/'.format(self.port)
def test_request_body(self):
body = """ This is the body
This is the next paragraph.
"""
mc = self.deproxy.make_request(url=self.url, method='POST',
request_body=body)
self.assertEqual(mc.sent_request.body, body)
self.assertEqual(len(mc.handlings), 1)
self.assertEqual(mc.handlings[0].request.body, body)
def test_response_body(self):
body = """ This is another body
This is the next paragraph.
"""
def custom_handler(request):
return deproxy.Response(code=200, message='OK', headers=None,
body=body)
mc = self.deproxy.make_request(url=self.url,
default_handler=custom_handler)
self.assertEqual(mc.received_response.body, body)
self.assertEqual(len(mc.handlings), 1)
self.assertEqual(mc.handlings[0].response.body, body)
@unittest.expectedFailure
def test_request_body_chunked(self):
self.fail()
def test_response_body_chunked(self):
chunked_body = "4\r\nWiki\r\n5\r\npedia\r\n0\r\n\r\n"
body = "Wikipedia"
def custom_handler(request):
return deproxy.Response(code=200, message='OK',
headers={'transfer-encoding': 'chunked'},
body=chunked_body)
mc = self.deproxy.make_request(url=self.url,
default_handler=custom_handler)
self.assertEqual(mc.received_response.body, body)
self.assertEqual(len(mc.handlings), 1)
self.assertEqual(mc.handlings[0].response.body, chunked_body)
def tearDown(self):
self.deproxy.shutdown_all_endpoints()
class TestSendingHeaders(unittest.TestCase):
def setUp(self):
self.deproxy = deproxy.Deproxy()
self.port = get_next_deproxy_port()
self.deproxy.add_endpoint(self.port)
self.url = 'http://localhost:{0}/'.format(self.port)
def test_send_duplicate_request_headers(self):
headers = deproxy.HeaderCollection()
headers.add('Name', 'Value1')
headers.add('Name', 'Value2')
mc = self.deproxy.make_request(url=self.url, headers=headers)
self.assertEqual(len(mc.handlings), 1)
values = [value for value in
mc.handlings[0].request.headers.find_all('Name')]
self.assertEqual(values, ['Value1', 'Value2'])
def test_send_duplicate_response_headers(self):
def custom_handler(request):
headers = deproxy.HeaderCollection()
headers.add('Name', 'Value1')
headers.add('Name', 'Value2')
return deproxy.Response(code=200, message='OK', headers=headers,
body=None)
mc = self.deproxy.make_request(url=self.url,
default_handler=custom_handler)
self.assertEqual(len(mc.handlings), 1)
values = [value for value in
mc.received_response.headers.find_all('Name')]
self.assertEqual(values, ['Value1', 'Value2'])
def tearDown(self):
self.deproxy.shutdown_all_endpoints()
class TestPerEndpointHandlers(unittest.TestCase):
def setUp(self):
self.deproxy = deproxy.Deproxy()
self.endpoint1 = self.deproxy.add_endpoint(
name='test-endpoint-1',
port=get_next_deproxy_port())
self.endpoint2 = self.deproxy.add_endpoint(
name='test-endpoint-2',
port=get_next_deproxy_port())
def custom_handler1(request):
return deproxy.Response(code='605', message='Custom', headers={},
body=None)
def custom_handler2(request):
return deproxy.Response(code='606', message='Spoiler', headers={},
body=None)
self.custom_handler1 = custom_handler1
self.custom_handler2 = custom_handler2
self.url1 = 'http://localhost:{0}/'.format(self.endpoint1.port)
self.url2 = 'http://localhost:{0}/'.format(self.endpoint2.port)
def test_no_handlers(self):
mc = self.deproxy.make_request(url=self.url1)
self.assertEqual(len(mc.handlings), 1)
self.assertEqual(mc.handlings[0].response.code, '200')
self.assertEqual(mc.received_response.code, '200')
mc = self.deproxy.make_request(url=self.url2)
self.assertEqual(len(mc.handlings), 1)
self.assertEqual(mc.handlings[0].response.code, '200')
self.assertEqual(mc.received_response.code, '200')
def test_empty_handlers(self):
mc = self.deproxy.make_request(url=self.url1, handlers={})
self.assertEqual(len(mc.handlings), 1)
self.assertEqual(mc.handlings[0].response.code, '200')
self.assertEqual(mc.received_response.code, '200')
mc = self.deproxy.make_request(url=self.url2, handlers={})
self.assertEqual(len(mc.handlings), 1)
self.assertEqual(mc.handlings[0].response.code, '200')
self.assertEqual(mc.received_response.code, '200')
def test_both_handlers(self):
handlers = {self.endpoint1: self.custom_handler1,
self.endpoint2: self.custom_handler2}
mc = self.deproxy.make_request(url=self.url1, handlers=handlers)
self.assertEqual(len(mc.handlings), 1)
self.assertEqual(mc.handlings[0].response.code, '605')
self.assertEqual(mc.received_response.code, '605')
mc = self.deproxy.make_request(url=self.url2, handlers=handlers)
self.assertEqual(len(mc.handlings), 1)
self.assertEqual(mc.handlings[0].response.code, '606')
self.assertEqual(mc.received_response.code, '606')
def test_one_handler(self):
handlers = {self.endpoint1: self.custom_handler1}
mc = self.deproxy.make_request(url=self.url1, handlers=handlers)
self.assertEqual(len(mc.handlings), 1)
self.assertEqual(mc.handlings[0].response.code, '605')
self.assertEqual(mc.received_response.code, '605')
mc = self.deproxy.make_request(url=self.url2, handlers=handlers)
self.assertEqual(len(mc.handlings), 1)
self.assertEqual(mc.handlings[0].response.code, '200')
self.assertEqual(mc.received_response.code, '200')
def test_handlers_by_name(self):
handlers = {'test-endpoint-1': self.custom_handler1,
'test-endpoint-2': self.custom_handler2}
mc = self.deproxy.make_request(url=self.url1, handlers=handlers)
self.assertEqual(len(mc.handlings), 1)
self.assertEqual(mc.handlings[0].response.code, '605')
self.assertEqual(mc.received_response.code, '605')
mc = self.deproxy.make_request(url=self.url2, handlers=handlers)
self.assertEqual(len(mc.handlings), 1)
self.assertEqual(mc.handlings[0].response.code, '606')
self.assertEqual(mc.received_response.code, '606')
def tearDown(self):
self.deproxy.shutdown_all_endpoints()
def run():
parser = argparse.ArgumentParser()
parser.add_argument('--port-base', help='The base port number to use when '
'assigning ports to tests. Each test case uses the '
'next lower port number than the test case before. '
'The default is 9999.', default=9999, type=int)
parser.add_argument('--print-log', action='store_true',
help='Print the log.')
args = parser.parse_args()
if args.print_log:
logging.basicConfig(level=logging.DEBUG,
format=('%(asctime)s %(levelname)s:%(name)s:'
'%(funcName)s:'
'%(filename)s(%(lineno)d):'
'%(threadName)s(%(thread)d):%(message)s'))
global deproxy_port_base
deproxy_port_base = args.port_base
unittest.main(argv=[''])
if __name__ == '__main__':
run()
|
|
#copyright ReportLab Europe Limited. 2000-2012
#see license.txt for license details
__version__=''' $Id: pdfencrypt.py 3959 2012-09-27 14:39:39Z robin $ '''
"""helpers for pdf encryption/decryption"""
import string, sys, os
try:
from hashlib import md5
except ImportError:
from md5 import md5
from reportlab.lib.utils import getStringIO
import tempfile
from reportlab.pdfgen.canvas import Canvas
from reportlab.pdfbase import pdfutils
from reportlab.platypus.flowables import Flowable
#AR debug hooks - leaving in for now
CLOBBERID = 0 # set a constant Doc ID to allow comparison with other software like iText
CLOBBERPERMISSIONS = 0
DEBUG = 0 # print stuff to trace calculations
# permission bits
reserved1 = 1 # bit 1 must be 0
reserved2 = 1<<1 # bit 2 must be 0
printable = 1<<2
modifiable = 1<<3
copypastable = 1<<4
annotatable = 1<<5
# others [7..32] are reserved, must be 1
higherbits = 0
for i in range(6,31):
higherbits = higherbits | (1<<i)
# no encryption
class StandardEncryption:
prepared = 0
def __init__(self, userPassword, ownerPassword=None, canPrint=1, canModify=1, canCopy=1, canAnnotate=1, strength=40):
'''
This class defines the encryption properties to be used while creating a pdf document.
Once initiated, a StandardEncryption object can be applied to a Canvas or a BaseDocTemplate.
The userPassword parameter sets the user password on the encrypted pdf.
The ownerPassword parameter sets the owner password on the encrypted pdf.
The boolean flags canPrint, canModify, canCopy, canAnnotate determine wether a user can
perform the corresponding actions on the pdf when only a user password has been supplied.
If the user supplies the owner password while opening the pdf, all actions can be performed regardless
of the flags.
Note that the security provided by these encryption settings (and even more so for the flags) is very weak.
'''
self.ownerPassword = ownerPassword
self.userPassword = userPassword
if strength == 40:
self.revision = 2
elif strength == 128:
self.revision = 3
self.canPrint = canPrint
self.canModify = canModify
self.canCopy = canCopy
self.canAnnotate = canAnnotate
self.O = self.U = self.P = self.key = None
def setAllPermissions(self, value):
self.canPrint = \
self.canModify = \
self.canCopy = \
self.canAnnotate = value
def permissionBits(self):
p = 0
if self.canPrint: p = p | printable
if self.canModify: p = p | modifiable
if self.canCopy: p = p | copypastable
if self.canAnnotate: p = p | annotatable
p = p | higherbits
return p
def encode(self, t):
"encode a string, stream, text"
if not self.prepared:
raise ValueError, "encryption not prepared!"
if self.objnum is None:
raise ValueError, "not registered in PDF object"
return encodePDF(self.key, self.objnum, self.version, t, revision=self.revision)
def prepare(self, document, overrideID=None):
# get ready to do encryption
if DEBUG: print 'StandardEncryption.prepare(...) - revision %d' % self.revision
if self.prepared:
raise ValueError, "encryption already prepared!"
# get the unescaped string value of the document id (first array element).
# we allow one to be passed in instead to permit reproducible tests
# of our algorithm, but in real life overrideID will always be None
if overrideID:
internalID = overrideID
else:
externalID = document.ID() # initialize it...
internalID = document.signature.digest()
#AR debugging
if CLOBBERID:
internalID = "xxxxxxxxxxxxxxxx"
if DEBUG:
print 'userPassword = %s' % self.userPassword
print 'ownerPassword = %s' % self.ownerPassword
print 'internalID = %s' % internalID
self.P = int(self.permissionBits() - 2**31L)
if CLOBBERPERMISSIONS: self.P = -44 # AR hack
if DEBUG:
print "self.P = %s" % repr(self.P)
self.O = computeO(self.userPassword, self.ownerPassword, self.revision)
if DEBUG:
print "self.O (as hex) = %s" % hexText(self.O)
#print "\nself.O", self.O, repr(self.O)
self.key = encryptionkey(self.userPassword, self.O, self.P, internalID, revision=self.revision)
if DEBUG:
print "self.key (hex) = %s" % hexText(self.key)
self.U = computeU(self.key, revision=self.revision, documentId=internalID)
if DEBUG:
print "self.U (as hex) = %s" % hexText(self.U)
self.objnum = self.version = None
self.prepared = 1
def register(self, objnum, version):
# enter a new direct object
if not self.prepared:
raise ValueError, "encryption not prepared!"
self.objnum = objnum
self.version = version
def info(self):
# the representation of self in file if any (should be None or PDFDict)
if not self.prepared:
raise ValueError, "encryption not prepared!"
return StandardEncryptionDictionary(O=self.O, U=self.U, P=self.P, revision=self.revision)
class StandardEncryptionDictionary:
__RefOnly__ = 1
__PDFObject__ = True
def __init__(self, O, U, P, revision):
self.O, self.U, self.P = O,U,P
self.revision = revision
def format(self, document):
# use a dummy document to bypass encryption
from reportlab.pdfbase.pdfdoc import DummyDoc, PDFDictionary, PDFString, PDFName
dummy = DummyDoc()
dict = {"Filter": PDFName("Standard"),
"O": hexText(self.O), #PDFString(self.O),
"U": hexText(self.U), #PDFString(self.U),
"P": self.P}
if self.revision == 3:
dict['Length'] = 128
dict['R'] = 3
dict['V'] = 2
else:
dict['R'] = 2
dict['V'] = 1
pdfdict = PDFDictionary(dict)
return pdfdict.format(dummy)
# from pdf spec
padding = """
28 BF 4E 5E 4E 75 8A 41 64 00 4E 56 FF FA 01 08
2E 2E 00 B6 D0 68 3E 80 2F 0C A9 FE 64 53 69 7A
"""
if hasattr(padding,'join'):
def xorKey(num,key):
"xor's each bytes of the key with the number, which is <256"
if num==0: return key
from operator import xor
return ''.join(map(chr,map(xor,len(key)*[num],map(ord,key))))
else:
def xorKey(num, key):
"xor's each bytes of the key with the number, which is <256"
from operator import xor
out = ''
for ch in key:
out = out + chr(xor(num, ord(ch)))
return out
def hexchar(x):
return chr(string.atoi(x, 16))
def hexText(text):
"a legitimate way to show strings in PDF"
out = ''
for char in text:
out = out + '%02X' % ord(char)
return '<' + out + '>'
def unHexText(hexText):
assert hexText[0] == '<', 'bad hex text'
assert hexText[-1] == '>', 'bad hex text'
hexText = hexText[1:-1]
out = ''
for i in range(int(len(hexText)/2.0)):
slice = hexText[i*2: i*2+2]
char = chr(eval('0x'+slice))
out = out + char
return out
PadString = string.join(map(hexchar, string.split(string.strip(padding))), "")
def encryptionkey(password, OwnerKey, Permissions, FileId1, revision=2):
# FileId1 is first string of the fileid array
# add padding string
#AR force same as iText example
#Permissions = -1836 #int(Permissions - 2**31)
password = password + PadString
# truncate to 32 bytes
password = password[:32]
# translate permissions to string, low order byte first
p = Permissions# + 2**32L
permissionsString = ""
for i in range(4):
byte = (p & 0xff) # seems to match what iText does
p = p>>8
permissionsString = permissionsString + chr(byte % 256)
hash = md5(password)
hash.update(OwnerKey)
hash.update(permissionsString)
hash.update(FileId1)
md5output = hash.digest()
if revision==2:
key = md5output[:5]
elif revision==3: #revision 3 algorithm - loop 50 times
for x in range(50):
md5output = md5(md5output).digest()
key = md5output[:16]
if DEBUG: print 'encryptionkey(%s,%s,%s,%s,%s)==>%s' % tuple(map(lambda x: hexText(str(x)),(password, OwnerKey, Permissions, FileId1, revision, key)))
return key
def computeO(userPassword, ownerPassword, revision):
from reportlab.lib.arciv import ArcIV
#print 'digest of hello is %s' % md5('hello').digest()
assert revision in (2,3), 'Unknown algorithm revision %s' % revision
if ownerPassword in (None, ''):
ownerPassword = userPassword
ownerPad = ownerPassword + PadString
ownerPad = ownerPad[0:32]
password = userPassword + PadString
userPad = password[:32]
digest = md5(ownerPad).digest()
if revision == 2:
O = ArcIV(digest[:5]).encode(userPad)
elif revision == 3:
for i in range(50):
digest = md5(digest).digest()
digest = digest[:16]
O = userPad
for i in range(20):
thisKey = xorKey(i, digest)
O = ArcIV(thisKey).encode(O)
if DEBUG: print 'computeO(%s,%s,%s)==>%s' % tuple(map(lambda x: hexText(str(x)),(userPassword, ownerPassword, revision,O)))
return O
def computeU(encryptionkey, encodestring=PadString,revision=2,documentId=None):
from reportlab.lib.arciv import ArcIV
if revision == 2:
result = ArcIV(encryptionkey).encode(encodestring)
elif revision == 3:
assert documentId is not None, "Revision 3 algorithm needs the document ID!"
h = md5(PadString)
h.update(documentId)
tmp = h.digest()
tmp = ArcIV(encryptionkey).encode(tmp)
for n in range(1,20):
thisKey = xorKey(n, encryptionkey)
tmp = ArcIV(thisKey).encode(tmp)
while len(tmp) < 32:
tmp = tmp + '\000'
result = tmp
if DEBUG: print 'computeU(%s,%s,%s,%s)==>%s' % tuple(map(lambda x: hexText(str(x)),(encryptionkey, encodestring,revision,documentId,result)))
return result
def checkU(encryptionkey, U):
decoded = computeU(encryptionkey, U)
#print len(decoded), len(U), len(PadString)
if decoded!=PadString:
if len(decoded)!=len(PadString):
raise ValueError, "lengths don't match! (password failed)"
raise ValueError, "decode of U doesn't match fixed padstring (password failed)"
def encodePDF(key, objectNumber, generationNumber, string, revision=2):
"Encodes a string or stream"
#print 'encodePDF (%s, %d, %d, %s)' % (hexText(key), objectNumber, generationNumber, string)
# extend 3 bytes of the object Number, low byte first
newkey = key
n = objectNumber
for i in range(3):
newkey = newkey + chr(n & 0xff)
n = n>>8
# extend 2 bytes of the generationNumber
n = generationNumber
for i in range(2):
newkey = newkey + chr(n & 0xff)
n = n>>8
md5output = md5(newkey).digest()
if revision == 2:
key = md5output[:10]
elif revision == 3:
key = md5output #all 16 bytes
from reportlab.lib.arciv import ArcIV
encrypted = ArcIV(key).encode(string)
#print 'encrypted=', hexText(encrypted)
if DEBUG: print 'encodePDF(%s,%s,%s,%s,%s)==>%s' % tuple(map(lambda x: hexText(str(x)),(key, objectNumber, generationNumber, string, revision,encrypted)))
return encrypted
######################################################################
#
# quick tests of algorithm, should be moved elsewhere
#
######################################################################
def test():
# do a 40 bit example known to work in Acrobat Reader 4.0
enc = StandardEncryption('userpass','ownerpass', strength=40)
enc.prepare(None, overrideID = 'xxxxxxxxxxxxxxxx')
expectedO = '<6A835A92E99DCEA39D51CF34FDBDA42162690D2BD5F8E08E3008F91FE5B8512E>'
expectedU = '<9997BDB61E7F288DAE6A8C4246A8F9CDCDBBC3D909D703CABA5D65A0CC6D4083>'
expectedKey = '<A3A68B5CB1>' # 5 byte key = 40 bits
assert hexText(enc.O) == expectedO, '40 bit unexpected O value %s' % hexText(enc.O)
assert hexText(enc.U) == expectedU, '40 bit unexpected U value %s' % hexText(enc.U)
assert hexText(enc.key) == expectedKey, '40 bit unexpected key value %s' % hexText(enc.key)
# now for 128 bit example
enc = StandardEncryption('userpass','ownerpass', strength=128)
enc.prepare(None, overrideID = 'xxxxxxxxxxxxxxxx')
expectedO = '<19BDBD240E0866B84C49AEEF7E2350045DB8BDAE96E039BF4E3F12DAC3427DB6>'
expectedU = '<564747DADFF35F5F2078A2CA1705B50800000000000000000000000000000000>'
expectedKey = '<DC1E019846B1EEABA0CDB8ED6D53B5C4>' # 16 byte key = 128 bits
assert hexText(enc.O) == expectedO, '128 bit unexpected O value %s' % hexText(enc.O)
assert hexText(enc.U) == expectedU, '128 bit unexpected U value %s' % hexText(enc.U)
assert hexText(enc.key) == expectedKey, '128 bit unexpected key value %s' % hexText(enc.key)
######################################################################
#
# These represent the higher level API functions
#
######################################################################
def encryptCanvas(canvas,
userPassword, ownerPassword=None,
canPrint=1, canModify=1, canCopy=1, canAnnotate=1,
strength=40):
"Applies encryption to the document being generated"
enc = StandardEncryption(userPassword, ownerPassword,
canPrint, canModify, canCopy, canAnnotate,
strength=strength)
canvas._doc.encrypt = enc
# Platypus stuff needs work, sadly. I wanted to do it without affecting
# needing changes to latest release.
class EncryptionFlowable(StandardEncryption, Flowable):
"""Drop this in your Platypus story and it will set up the encryption options.
If you do it multiple times, the last one before saving will win."""
def wrap(self, availWidth, availHeight):
return (0,0)
def draw(self):
encryptCanvas(self.canv,
self.userPassword,
self.ownerPassword,
self.canPrint,
self.canModify,
self.canCopy,
self.canAnnotate)
## I am thinking about this one. Needs a change to reportlab to
## work.
def encryptDocTemplate(dt,
userPassword, ownerPassword=None,
canPrint=1, canModify=1, canCopy=1, canAnnotate=1,
strength=40):
"For use in Platypus. Call before build()."
raise Exception("Not implemented yet")
def encryptPdfInMemory(inputPDF,
userPassword, ownerPassword=None,
canPrint=1, canModify=1, canCopy=1, canAnnotate=1,
strength=40):
"""accepts a PDF file 'as a byte array in memory'; return encrypted one.
This is a high level convenience and does not touch the hard disk in any way.
If you are encrypting the same file over and over again, it's better to use
pageCatcher and cache the results."""
try:
from rlextra.pageCatcher.pageCatcher import storeFormsInMemory, restoreFormsInMemory
except ImportError:
raise ImportError('''reportlab.lib.pdfencrypt.encryptPdfInMemory failed because rlextra cannot be imported.
See http://developer.reportlab.com''')
(bboxInfo, pickledForms) = storeFormsInMemory(inputPDF, all=1, BBoxes=1)
names = bboxInfo.keys()
firstPageSize = bboxInfo['PageForms0'][2:]
#now make a new PDF document
buf = getStringIO()
canv = Canvas(buf, pagesize=firstPageSize)
# set a standard ID while debugging
if CLOBBERID:
canv._doc._ID = "[(xxxxxxxxxxxxxxxx)(xxxxxxxxxxxxxxxx)]"
encryptCanvas(canv,
userPassword, ownerPassword,
canPrint, canModify, canCopy, canAnnotate,
strength=strength)
formNames = restoreFormsInMemory(pickledForms, canv)
for formName in formNames:
#need to extract page size in future
canv.doForm(formName)
canv.showPage()
canv.save()
return buf.getvalue()
def encryptPdfOnDisk(inputFileName, outputFileName,
userPassword, ownerPassword=None,
canPrint=1, canModify=1, canCopy=1, canAnnotate=1,
strength=40):
"Creates encrypted file OUTPUTFILENAME. Returns size in bytes."
inputPDF = open(inputFileName, 'rb').read()
outputPDF = encryptPdfInMemory(inputPDF,
userPassword, ownerPassword,
canPrint, canModify, canCopy, canAnnotate,
strength=strength)
open(outputFileName, 'wb').write(outputPDF)
return len(outputPDF)
def scriptInterp():
sys_argv = sys.argv[:] # copy
usage = """PDFENCRYPT USAGE:
PdfEncrypt encrypts your PDF files.
Line mode usage:
% pdfencrypt.exe pdffile [-o ownerpassword] | [owner ownerpassword],
\t[-u userpassword] | [user userpassword],
\t[-p 1|0] | [printable 1|0],
\t[-m 1|0] | [modifiable 1|0],
\t[-c 1|0] | [copypastable 1|0],
\t[-a 1|0] | [annotatable 1|0],
\t[-s savefilename] | [savefile savefilename],
\t[-v 1|0] | [verbose 1|0],
\t[-e128], [encrypt128],
\t[-h] | [help]
-o or owner set the owner password.
-u or user set the user password.
-p or printable set the printable attribute (must be 1 or 0).
-m or modifiable sets the modifiable attribute (must be 1 or 0).
-c or copypastable sets the copypastable attribute (must be 1 or 0).
-a or annotatable sets the annotatable attribute (must be 1 or 0).
-s or savefile sets the name for the output PDF file
-v or verbose prints useful output to the screen.
(this defaults to 'pdffile_encrypted.pdf').
'-e128' or 'encrypt128' allows you to use 128 bit encryption (in beta).
-h or help prints this message.
See PdfEncryptIntro.pdf for more information.
"""
known_modes = ['-o', 'owner',
'-u', 'user',
'-p', 'printable',
'-m', 'modifiable',
'-c', 'copypastable',
'-a', 'annotatable',
'-s', 'savefile',
'-v', 'verbose',
'-h', 'help',
'-e128', 'encrypt128']
OWNER = ''
USER = ''
PRINTABLE = 1
MODIFIABLE = 1
COPYPASTABLE = 1
ANNOTATABLE = 1
SAVEFILE = 'encrypted.pdf'
#try:
caller = sys_argv[0] # may be required later - eg if called by security.py
argv = list(sys_argv)[1:]
if len(argv)>0:
if argv[0] == '-h' or argv[0] == 'help':
print usage
return
if len(argv)<2:
raise ValueError("Must include a filename and one or more arguments!")
if argv[0] not in known_modes:
infile = argv[0]
argv = argv[1:]
if not os.path.isfile(infile):
raise ValueError("Can't open input file '%s'!" % infile)
else:
raise ValueError("First argument must be name of the PDF input file!")
# meaningful name at this stage
STRENGTH = 40
if 'encrypt128' in argv:
STRENGTH = 128
argv.remove('encrypt128')
if '-e128' in argv:
STRENGTH = 128
argv.remove('-e128')
if ('-v' in argv) or ('verbose' in argv):
if '-v' in argv:
pos = argv.index('-v')
arg = "-v"
elif 'verbose' in argv:
pos = argv.index('verbose')
arg = "verbose"
try:
verbose = int(argv[pos+1])
except:
verbose = 1
argv.remove(argv[pos+1])
argv.remove(arg)
else:
from reportlab.rl_config import verbose
#argument, valid license variable, invalid license variable, text for print
arglist = (('-o', 'OWNER', OWNER, 'Owner password'),
('owner', 'OWNER', OWNER, 'Owner password'),
('-u', 'USER', USER, 'User password'),
('user', 'USER', USER, 'User password'),
('-p', 'PRINTABLE', PRINTABLE, "'Printable'"),
('printable', 'PRINTABLE', PRINTABLE, "'Printable'"),
('-m', 'MODIFIABLE', MODIFIABLE, "'Modifiable'"),
('modifiable', 'MODIFIABLE', MODIFIABLE, "'Modifiable'"),
('-c', 'COPYPASTABLE', COPYPASTABLE, "'Copypastable'"),
('copypastable', 'COPYPASTABLE', COPYPASTABLE, "'Copypastable'"),
('-a', 'ANNOTATABLE', ANNOTATABLE, "'Annotatable'"),
('annotatable', 'ANNOTATABLE', ANNOTATABLE, "'Annotatable'"),
('-s', 'SAVEFILE', SAVEFILE, "Output file"),
('savefile', 'SAVEFILE', SAVEFILE, "Output file"),
)
binaryrequired = ('-p', 'printable', '-m', 'modifiable', 'copypastable', '-c', 'annotatable', '-a')
for thisarg in arglist:
if thisarg[0] in argv:
pos = argv.index(thisarg[0])
if thisarg[0] in binaryrequired:
#try:
if argv[pos+1] not in ('1', '0'):
raise "%s value must be either '1' or '0'!" % thisarg[1]
#except:
#raise "Unable to set %s." % thisarg[4]
try:
if argv[pos+1] not in known_modes:
if thisarg[0] in binaryrequired:
exec(thisarg[1] +' = int(argv[pos+1])')
else:
exec(thisarg[1] +' = argv[pos+1]')
if verbose:
print "%s set to: '%s'." % (thisarg[3], argv[pos+1])
argv.remove(argv[pos+1])
argv.remove(thisarg[0])
except:
raise "Unable to set %s." % thisarg[3]
if verbose>4:
#useful if feeling paranoid and need to double check things at this point...
print "\ninfile:", infile
print "STRENGTH:", STRENGTH
print "SAVEFILE:", SAVEFILE
print "USER:", USER
print "OWNER:", OWNER
print "PRINTABLE:", PRINTABLE
print "MODIFIABLE:", MODIFIABLE
print "COPYPASTABLE:", COPYPASTABLE
print "ANNOTATABLE:", ANNOTATABLE
print "SAVEFILE:", SAVEFILE
print "VERBOSE:", verbose
if SAVEFILE == 'encrypted.pdf':
if infile[-4:] == '.pdf' or infile[-4:] == '.PDF':
tinfile = infile[:-4]
else:
tinfile = infile
SAVEFILE = tinfile+"_encrypted.pdf"
filesize = encryptPdfOnDisk(infile, SAVEFILE, USER, OWNER,
PRINTABLE, MODIFIABLE, COPYPASTABLE, ANNOTATABLE,
strength=STRENGTH)
if verbose:
print "wrote output file '%s'(%s bytes)\n owner password is '%s'\n user password is '%s'" % (SAVEFILE, filesize, OWNER, USER)
if len(argv)>0:
raise "\nUnrecognised arguments : %s\nknown arguments are:\n%s" % (str(argv)[1:-1], known_modes)
else:
print usage
def main():
from reportlab.rl_config import verbose
scriptInterp()
if __name__=="__main__": #NO RUNTESTS
a = filter(lambda x: x[:7]=='--debug',sys.argv)
if a:
sys.argv = filter(lambda x: x[:7]!='--debug',sys.argv)
DEBUG = len(a)
if '--test' in sys.argv: test()
else: main()
|
|
"""Module responsible for INI parsing."""
from collections import OrderedDict
import configparser
import os
import re
import time
from typing import Any, Dict, Callable, Iterable, IO, List, Tuple, Pattern
from neuralmonkey.config.builder import ClassSymbol, ObjectRef
from neuralmonkey.config.exceptions import ParseError
from neuralmonkey.logging import log
LINE_NUM = re.compile(r"^(.*) ([0-9]+)$")
INTEGER = re.compile(r"^-?[0-9]+$")
FLOAT = re.compile(r"^-?[0-9]*\.[0-9]*(e[+-]?[0-9]+)?$|^-?[0-9]+e[+-]?[0-9]+$")
LIST = re.compile(r"\[([^]]*)\]")
TUPLE = re.compile(r"\(([^)]+)\)")
STRING = re.compile(r'^"(.*)"$')
VAR_REF = re.compile(r"^\$([a-zA-Z][a-zA-Z0-9_]*)$")
OBJECT_REF = re.compile(
r"^<([a-zA-Z][a-zA-Z0-9_]*(\.[a-zA-Z][a-zA-Z0-9_]*)*)>$")
CLASS_NAME = re.compile(
r"^_*[a-zA-Z][a-zA-Z0-9_]*(\._*[a-zA-Z][a-zA-Z0-9_]*)+$")
CONSTANTS = {
"False": False,
"True": True,
"None": None
}
def get_first_match(pattern: Pattern, string: str) -> str:
"""Return the first matching substring.
Args:
pattern: The pattern to find.
string: The string to search.
Returns:
The first occurence of the pattern in the string.
Raises:
ValueError if the string does not match the pattern.
"""
match = pattern.match(string)
if match is None:
raise ValueError("String '{}' does not match the pattern '{}'"
.format(string, pattern.pattern))
return match.group(1)
# this is a function because of the parse_*
# functions which are not defined yet
def _keyval_parser_dict() -> Dict[Any, Callable]:
return {
INTEGER: lambda x, _: int(x),
FLOAT: lambda x, _: float(x),
STRING: _parse_string,
VAR_REF: lambda x, vars_dict: vars_dict[get_first_match(VAR_REF, x)],
CLASS_NAME: _parse_class_name,
OBJECT_REF: lambda x, _: ObjectRef(get_first_match(OBJECT_REF, x)),
LIST: _parse_list,
TUPLE: _parse_tuple
}
class VarsDict(OrderedDict, Dict[str, Any]):
def __missing__(self, key):
"""Try to fetch and parse the variable value from `os.environ`."""
if key in os.environ:
try:
value = _parse_value(os.environ[key], self)
except ParseError:
# If we cannot parse it, use it as a string.
value = os.environ[key]
log("Variable {}={!r} taken from the environment."
.format(key, value))
return value
raise ParseError("Undefined variable: {}".format(key))
def _split_on_commas(string: str) -> List[str]:
"""Split a bracketed string on commas.
The commas inside brackets are preserved.
"""
items = []
char_buffer = [] # type: List[str]
openings = [] # type: List[str]
for i, char in enumerate(string):
if char == "," and not openings:
if char_buffer:
items.append("".join(char_buffer))
char_buffer = []
continue
elif char == " " and not char_buffer:
continue
elif char in ("(", "["):
openings.append(char)
elif char == ")":
if openings.pop() != "(":
raise ParseError("Invalid bracket end ')', col {}.".format(i))
elif char == "]":
if openings.pop() != "[":
raise ParseError("Invalid bracket end ']', col {}.".format(i))
char_buffer.append(char)
if char_buffer:
items.append("".join(char_buffer))
return items
def _parse_string(string: str, vars_dict: VarsDict) -> str:
return get_first_match(STRING, string).format_map(vars_dict)
def _parse_list(string: str, vars_dict: VarsDict) -> List[Any]:
"""Parse the string recursively as a list."""
matched_content = get_first_match(LIST, string)
if not matched_content:
return []
items = _split_on_commas(matched_content)
values = [_parse_value(val, vars_dict) for val in items]
return values
def _parse_tuple(string: str, vars_dict: VarsDict) -> Tuple[Any, ...]:
"""Parse the string recursively as a tuple."""
items = _split_on_commas(get_first_match(TUPLE, string))
values = [_parse_value(val, vars_dict) for val in items]
return tuple(values)
def _parse_class_name(string: str, vars_dict: VarsDict) -> ClassSymbol:
"""Parse the string as a module or class name."""
del vars_dict
return ClassSymbol(string)
def _parse_value(string: str, vars_dict: VarsDict) -> Any:
"""Parse the value recursively according to the Nerual Monkey grammar.
Arguments:
string: the string to be parsed
vars_dict: a dictionary of variables for substitution
"""
string = string.strip()
if string in CONSTANTS:
return CONSTANTS[string]
for matcher, parser in _keyval_parser_dict().items():
if matcher.match(string) is not None:
return parser(string, vars_dict)
raise ParseError("Cannot parse value: '{}'.".format(string))
def _parse_ini(config_file: Iterable[str],
filename: str = "") -> Dict[str, Any]:
"""Parse an INI file into a dictionary."""
line_numbers = (line.strip() + " " + str(i + 1)
if line.strip() else ""
for i, line in
enumerate(config_file))
config = configparser.ConfigParser()
config.read_file(line_numbers, source=filename)
new_config = OrderedDict() # type: Dict[str, Any]
for section in config.sections():
new_config[section] = OrderedDict()
for key in config[section]:
match = LINE_NUM.match(config[section][key])
assert match is not None
new_config[section][key] = match.group(2), match.group(1)
return new_config
def _apply_change(config_dict: Dict[str, Any], setting: str) -> None:
if "=" not in setting:
raise ParseError("Invalid setting '{}'".format(setting))
key, value = (s.strip() for s in setting.split("=", maxsplit=1))
if "." in key:
section, option = key.split(".", maxsplit=1)
else:
section = "main"
option = key
if section not in config_dict:
log("Creating new section '{}'".format(section))
config_dict[section] = OrderedDict()
config_dict[section][option] = -1, value # no line number
def parse_file(config_file: Iterable[str],
changes: Iterable[str] = None
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""Parse an INI file and creates all values."""
parsed_dicts = OrderedDict() # type: Dict[str, Any]
config = _parse_ini(config_file)
if changes is not None:
for change in changes:
_apply_change(config, change)
vars_dict = VarsDict()
vars_dict["TIME"] = time.strftime("%Y-%m-%d-%H-%M-%S")
def parse_section(section: str, output_dict: Dict[str, Any]):
for key, (lineno, value_string) in config[section].items():
try:
value = _parse_value(value_string, vars_dict)
except ParseError as exc:
exc.set_line(lineno)
raise
output_dict[key] = value
if "vars" in config:
parse_section("vars", vars_dict)
for section in config:
if section != "vars":
parsed_dicts[section] = OrderedDict()
parse_section(section, parsed_dicts[section])
# also return the unparsed config dict; need to remove line numbers
raw_config = OrderedDict([
(name, OrderedDict([(key, val) for key, (_, val) in section.items()]))
for name, section in config.items()])
return raw_config, parsed_dicts
def write_file(config_dict: Dict[str, Any], config_file: IO[str]) -> None:
config = configparser.ConfigParser()
config.read_dict(config_dict)
config.write(config_file, space_around_delimiters=False)
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tensor_array_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import errors
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import tensor_array_grad
from tensorflow.python.ops import tensor_array_ops
class TensorArrayCPUTest(tf.test.TestCase):
_use_gpu = False
def testTensorArrayWriteRead(self):
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [[1.0]])
w2 = w1.write(2, -3.0)
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual([[4.0, 5.0]], d0)
self.assertAllEqual([[1.0]], d1)
self.assertAllEqual(-3.0, d2)
def _testTensorArrayWritePack(self, tf_dtype):
dtype = tf_dtype.as_numpy_dtype()
with self.test_session(use_gpu=self._use_gpu):
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
if tf_dtype == tf.string:
# In Python3, np.str is unicode, while we always want bytes
convert = lambda x: np.asarray(x).astype("|S")
else:
convert = lambda x: np.asarray(x).astype(dtype)
w0 = ta.write(0, convert([[4.0, 5.0]]))
w1 = w0.write(1, convert([[6.0, 7.0]]))
w2 = w1.write(2, convert([[8.0, 9.0]]))
c0 = w2.pack()
self.assertAllEqual(
convert([[[4.0, 5.0]], [[6.0, 7.0]], [[8.0, 9.0]]]), c0.eval())
def testTensorArrayWritePack(self):
self._testTensorArrayWritePack(tf.float32)
self._testTensorArrayWritePack(tf.float64)
self._testTensorArrayWritePack(tf.int32)
self._testTensorArrayWritePack(tf.int64)
self._testTensorArrayWritePack(tf.complex64)
self._testTensorArrayWritePack(tf.string)
def _testTensorArrayWriteConcat(self, tf_dtype):
dtype = tf_dtype.as_numpy_dtype()
with self.test_session(use_gpu=self._use_gpu):
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
if tf_dtype == tf.string:
# In Python3, np.str is unicode, while we always want bytes
convert = lambda x: np.asarray(x).astype("|S")
else:
convert = lambda x: np.asarray(x).astype(dtype)
w0 = ta.write(0, convert([[4.0, 5.0], [104.0, 105.0], [204.0, 205.0]]))
w1 = w0.write(1, convert([[6.0, 7.0], [106.0, 107.0]]))
w2 = w1.write(2, convert([[8.0, 9.0]]))
c0 = w2.concat()
self.assertAllEqual(
convert([[4.0, 5.0],
[104.0, 105.0],
[204.0, 205.0],
[6.0, 7.0],
[106.0, 107.0],
[8.0, 9.0]]), c0.eval())
def testTensorArrayWriteConcat(self):
self._testTensorArrayWriteConcat(tf.float32)
self._testTensorArrayWriteConcat(tf.float64)
self._testTensorArrayWriteConcat(tf.int32)
self._testTensorArrayWriteConcat(tf.int64)
self._testTensorArrayWriteConcat(tf.complex64)
self._testTensorArrayWriteConcat(tf.string)
def testTensorArrayUnpackWrongMajorSizeFails(self):
with self.test_session():
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
with self.assertRaisesOpError(
r"Input value must have first dimension "
r"equal to the array size \(2 vs. 3\)"):
ta.unpack([1.0, 2.0]).flow.eval()
def testTensorArrayPackNotAllValuesAvailableFails(self):
with self.test_session():
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
with self.assertRaisesOpError(
"Could not read from TensorArray index 1 "
"because it has not yet been written to."):
ta.write(0, [[4.0, 5.0]]).pack().eval()
def _testTensorArrayUnpackRead(self, tf_dtype):
dtype = tf_dtype.as_numpy_dtype()
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
if tf_dtype is tf.string:
# In Python3, np.str is unicode, while we always want bytes
convert = lambda x: np.asarray(x).astype("|S")
else:
convert = lambda x: np.asarray(x).astype(dtype)
# Unpack a vector into scalars
w0 = ta.unpack(convert([1.0, 2.0, 3.0]))
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual(convert(1.0), d0)
self.assertAllEqual(convert(2.0), d1)
self.assertAllEqual(convert(3.0), d2)
# Unpack a matrix into vectors
w1 = ta.unpack(convert([[1.0, 1.1], [2.0, 2.1], [3.0, 3.1]]))
r0 = w1.read(0)
r1 = w1.read(1)
r2 = w1.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual(convert([1.0, 1.1]), d0)
self.assertAllEqual(convert([2.0, 2.1]), d1)
self.assertAllEqual(convert([3.0, 3.1]), d2)
def testTensorArrayUnpackRead(self):
self._testTensorArrayUnpackRead(tf.float32)
self._testTensorArrayUnpackRead(tf.float64)
self._testTensorArrayUnpackRead(tf.int32)
self._testTensorArrayUnpackRead(tf.int64)
self._testTensorArrayUnpackRead(tf.complex64)
self._testTensorArrayUnpackRead(tf.string)
def _testTensorArraySplitRead(self, tf_dtype):
dtype = tf_dtype.as_numpy_dtype()
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
if tf_dtype == tf.string:
# In Python3, np.str is unicode, while we always want bytes
convert = lambda x: np.asarray(x).astype("|S")
else:
convert = lambda x: np.asarray(x).astype(dtype)
# Split an empty vector
lengths = tf.constant([0, 0, 0])
w0 = ta.split(convert([]), lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual(convert([]), d0)
self.assertAllEqual(convert([]), d1)
self.assertAllEqual(convert([]), d2)
# Split a vector
lengths = tf.constant([2, 0, 1])
w0 = ta.split(
convert([1.0, 2.0, 3.0]), lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual(convert([1.0, 2.0]), d0)
self.assertAllEqual(convert([]), d1)
self.assertAllEqual(convert([3.0]), d2)
# Split a matrix
lengths = tf.constant([2, 0, 1])
w0 = ta.split(
convert([[1.0, 101.0], [2.0, 201.0], [3.0, 301.0]]), lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual(convert([[1.0, 101.0], [2.0, 201.0]]), d0)
self.assertAllEqual(convert([]).reshape(0, 2), d1)
self.assertAllEqual(convert([[3.0, 301.0]]), d2)
def testTensorArraySplitRead(self):
self._testTensorArraySplitRead(tf.float32)
self._testTensorArraySplitRead(tf.float64)
self._testTensorArraySplitRead(tf.int32)
self._testTensorArraySplitRead(tf.int64)
self._testTensorArraySplitRead(tf.complex64)
self._testTensorArraySplitRead(tf.string)
def testTensorGradArrayWriteRead(self):
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
g_ta = ta.grad("grad")
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [[1.0]])
w2 = w1.write(2, -3.0)
g_w0 = g_ta.write(0, [[5.0, 6.0]])
g_w1 = g_w0.write(1, [[2.0]])
g_w2 = g_w1.write(2, -2.0)
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
g_r0 = g_w2.read(0)
g_r1 = g_w2.read(1)
g_r2 = g_w2.read(2)
d0, d1, d2, g_d0, g_d1, g_d2 = session.run([r0, r1, r2, g_r0, g_r1, g_r2])
self.assertAllEqual([[4.0, 5.0]], d0)
self.assertAllEqual([[1.0]], d1)
self.assertAllEqual(-3.0, d2)
self.assertAllEqual([[5.0, 6.0]], g_d0)
self.assertAllEqual([[2.0]], g_d1)
self.assertAllEqual(-2.0, g_d2)
def testTensorGradArrayDynamicWriteRead(self):
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=0, dynamic_size=True)
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [[1.0]])
w2 = w1.write(2, -3.0)
g_ta = w2.grad("grad") # Get gradient array here so we know the shape
s = w2.size()
g_s = g_ta.size()
g_w0 = g_ta.write(0, [[5.0, 6.0]])
g_w1 = g_w0.write(1, [[2.0]])
g_w2 = g_w1.write(2, -2.0)
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
g_r0 = g_w2.read(0)
g_r1 = g_w2.read(1)
g_r2 = g_w2.read(2)
d0, d1, d2, g_d0, g_d1, g_d2, vs, g_vs = session.run([
r0, r1, r2, g_r0, g_r1, g_r2, s, g_s])
self.assertAllEqual([[4.0, 5.0]], d0)
self.assertAllEqual([[1.0]], d1)
self.assertAllEqual(-3.0, d2)
self.assertAllEqual([[5.0, 6.0]], g_d0)
self.assertAllEqual([[2.0]], g_d1)
self.assertAllEqual(-2.0, g_d2)
self.assertAllEqual(3, vs)
self.assertAllEqual(3, g_vs)
def testTensorGradAccessTwiceReceiveSameObject(self):
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
g_ta_0 = ta.grad("grad")
g_ta_1 = ta.grad("grad")
with tf.control_dependencies([g_ta_0.write(0, [[4.0, 5.0]]).flow]):
# Write with one gradient handle, read with another copy of it
r1_0 = g_ta_1.read(0)
t_g_ta_0, t_g_ta_1, d_r1_0 = session.run(
[g_ta_0.handle, g_ta_1.handle, r1_0])
self.assertAllEqual(t_g_ta_0, t_g_ta_1)
self.assertAllEqual([[4.0, 5.0]], d_r1_0)
def testTensorArrayWriteWrongIndexOrDataTypeFails(self):
with self.test_session(use_gpu=self._use_gpu):
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
# Test writing the wrong datatype
with self.assertRaisesOpError(
"TensorArray dtype is float but Op is trying to write dtype string"):
ta.write(-1, "wrong_type_scalar").flow.eval()
# Test writing to a negative index
with self.assertRaisesOpError(
"Tried to write to index -1 but array is not "
"resizeable and size is: 3"):
ta.write(-1, 3.0).flow.eval()
# Test reading from too large an index
with self.assertRaisesOpError(
"Tried to write to index 3 but array is not "
"resizeable and size is: 3"):
ta.write(3, 3.0).flow.eval()
def testTensorArrayReadWrongIndexOrDataTypeFails(self):
with self.test_session(use_gpu=self._use_gpu):
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
w0 = ta.write(0, [[4.0, 5.0]])
# Test reading wrong datatype
r0_bad = gen_data_flow_ops._tensor_array_read(
handle=w0.handle, index=0, dtype=tf.int64, flow_in=w0.flow)
with self.assertRaisesOpError(
"TensorArray dtype is float but Op requested dtype int64."):
r0_bad.eval()
# Test reading from a different index than the one we wrote to
r1 = w0.read(1)
with self.assertRaisesOpError(
"Could not read from TensorArray index 1 because "
"it has not yet been written to."):
r1.eval()
# Test reading from a negative index
with self.assertRaisesOpError(
r"Tried to read from index -1 but array size is: 3"):
ta.read(-1).eval()
# Test reading from too large an index
with self.assertRaisesOpError(
"Tried to read from index 3 but array size is: 3"):
ta.read(3).eval()
def testTensorArrayWriteMultipleFails(self):
with self.test_session(use_gpu=self._use_gpu):
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
with self.assertRaisesOpError(
"Could not write to TensorArray index 2 because "
"it has already been written to."):
ta.write(2, 3.0).write(2, 3.0).flow.eval()
def testTensorArrayConcatIncompatibleShapesFails(self):
with self.test_session(use_gpu=self._use_gpu):
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
w1 = ta.write(0, 3.0)
w2 = w1.write(1, 4.0)
w3 = w2.write(2, [3.0])
with self.assertRaisesOpError(
"Concat saw a scalar shape at index 0 but requires at least vectors"):
w3.concat().eval()
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
w1 = ta.write(0, [3.0])
w2 = w1.write(1, [4.0])
w3 = w2.write(2, [[3.0]])
with self.assertRaisesOpError(
r"TensorArray has inconsistent shapes. Index 0 has "
r"\(excepting dimension 0\) shape: \[\] but index 2 has \(excepting "
r"dimension 0\) shape: \[1\]"):
w3.concat().eval()
def testTensorArraySplitIncompatibleShapesFails(self):
with self.test_session(use_gpu=self._use_gpu):
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
with self.assertRaisesOpError(
r"Expected lengths to be a vector, received shape: \[\]"):
lengths = tf.placeholder(tf.int64)
ta.split([1.0, 2.0, 3.0], lengths).flow.eval(feed_dict={lengths: 1})
with self.assertRaisesOpError(
r"Expected sum of lengths to be equal to values.shape\[0\], "
r"but sum of lengths is 1 and value's shape is: \[3\]"):
ta.split([1.0, 2.0, 3.0], [1]).flow.eval()
with self.assertRaisesOpError(
r"Expected value to be at least a vector, but received shape: \[\]"):
ta.split(1.0, [1]).flow.eval()
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=2)
with self.assertRaisesOpError(
r"TensorArray's size is not equal to the size of lengths "
r"\(2 vs. 1\), and the TensorArray is not marked as "
r"dynamically resizeable"):
ta.split([1.0], [1]).flow.eval()
def _testTensorArrayWriteGradientAddMultipleAdds(self, dtype):
with self.test_session(use_gpu=self._use_gpu):
ta = tensor_array_ops.TensorArray(
dtype=dtype, tensor_array_name="foo", size=3)
ta_grad = ta.grad("grad")
c = lambda x: np.asarray(x, dtype=dtype.as_numpy_dtype)
w0 = ta.write(2, c(3.0))
w1 = w0.write(2, c(4.0))
w0_grad = ta_grad.write(2, c(3.0))
w1_grad = w0_grad.write(2, c(4.0))
w2_grad = w1_grad.write(2, c(5.0))
# Assert that aggregation works correctly
self.assertAllEqual(c(12.00), w2_grad.read(2).eval())
# Assert that if multiple_writes_aggregate is not enabled,
# multiple writes raise an exception.
with self.assertRaisesOpError(
r"TensorArray foo: Could not write to TensorArray index 2 because "
r"it has already been written to."):
w1.flow.eval()
# Using differing shapes causes an exception
wb0_grad = ta_grad.write(1, c(1.0))
wb1_grad = wb0_grad.write(1, c([1.0]))
with self.assertRaisesOpError(
r"Could not aggregate to TensorArray index 1 because the "
r"existing shape is \[\] but the new input shape is \[1\]"):
wb1_grad.flow.eval()
def testTensorArrayWriteGradientAddMultipleAdds(self):
for dtype in [tf.int32, tf.int64, tf.float32, tf.float64, tf.complex64]:
self._testTensorArrayWriteGradientAddMultipleAdds(dtype)
def testMultiTensorArray(self):
with self.test_session(use_gpu=self._use_gpu):
h1 = tensor_array_ops.TensorArray(
size=1, dtype=tf.float32, tensor_array_name="foo")
w1 = h1.write(0, 4.0)
r1 = w1.read(0)
h2 = tensor_array_ops.TensorArray(
size=1, dtype=tf.float32, tensor_array_name="bar")
w2 = h2.write(0, 5.0)
r2 = w2.read(0)
r = r1 + r2
self.assertAllClose(9.0, r.eval())
def testDuplicateTensorArrayFails(self):
with self.test_session(use_gpu=self._use_gpu) as session:
h1 = tensor_array_ops.TensorArray(
size=1, dtype=tf.float32, tensor_array_name="foo")
c1 = h1.write(0, 4.0)
h2 = tensor_array_ops.TensorArray(
size=1, dtype=tf.float32, tensor_array_name="foo")
c2 = h2.write(0, 5.0)
with self.assertRaises(errors.AlreadyExistsError):
session.run([c1.flow, c2.flow])
def _testTensorArrayGradientWriteReadType(self, dtype):
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf.as_dtype(dtype), tensor_array_name="foo", size=3)
c = lambda x: np.array(x, dtype=dtype)
value_0 = tf.constant(c([[4.0, 5.0]]))
value_1 = tf.constant(c(3.0))
w0 = ta.write(0, value_0)
w1 = w0.write(1, value_1)
r0 = w1.read(0)
r1 = w1.read(1)
r0_2 = w1.read(0)
# Test individual components' gradients
grad_just_r0 = tf.gradients(
ys=[r0], xs=[value_0], grad_ys=[c([[2.0, 3.0]])])
grad_just_r0_vals = session.run(grad_just_r0)
self.assertAllEqual(c([[2.0, 3.0]]), grad_just_r0_vals[0])
grad_r0_r0_2 = tf.gradients(
ys=[r0, r0_2], xs=[value_0],
grad_ys=[c([[2.0, 3.0]]), c([[1.0, -1.0]])])
grad_r0_r0_2_vals = session.run(grad_r0_r0_2)
self.assertAllEqual(c([[3.0, 2.0]]), grad_r0_r0_2_vals[0])
grad_just_r1 = tf.gradients(
ys=[r1], xs=[value_1], grad_ys=[c(-2.0)])
grad_just_r1_vals = session.run(grad_just_r1)
self.assertAllEqual(c(-2.0), grad_just_r1_vals[0])
# Test combined gradients
grad = tf.gradients(
ys=[r0, r0_2, r1], xs=[value_0, value_1],
grad_ys=[c(-1.0), c(-2.0), c([[2.0, 3.0]])])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 2)
self.assertAllEqual(c(-3.0), grad_vals[0])
self.assertAllEqual(c([[2.0, 3.0]]), grad_vals[1])
def testTensorArrayGradientWriteRead(self):
for dtype in (np.float32, np.float64, np.int32, np.int64, np.complex64):
self._testTensorArrayGradientWriteReadType(dtype)
def testTensorArrayGradientWritePackConcatAndRead(self):
with self.test_session(use_gpu=self._use_gpu) as sess:
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=2,
clear_after_read=False)
value_0 = tf.constant([-1.0, 1.0])
value_1 = tf.constant([-10.0, 10.0])
w0 = ta.write(0, value_0)
w1 = w0.write(1, value_1)
p0 = w1.pack()
r0 = w1.read(0)
s0 = w1.concat()
# Test gradient accumulation between read(0), pack(), and concat()
with tf.control_dependencies([p0, r0, s0]):
grad_r = tf.gradients(
ys=[p0, r0, s0], xs=[value_0, value_1],
grad_ys=[
[[2.0, 3.0], [4.0, 5.0]], # pack gradient
[-0.5, 1.5], # read(0) gradient
[20.0, 30.0, 40.0, 50.0]]) # concat gradient
grad_vals = sess.run(grad_r) # 2 + 2 entries
self.assertAllClose([2.0 - 0.5 + 20.0, 3.0 + 1.5 + 30.0], grad_vals[0])
self.assertAllEqual([4.0 + 40.0, 5.0 + 50.0], grad_vals[1])
def testTensorArrayReadTwice(self):
with self.test_session(use_gpu=self._use_gpu):
value = tf.constant([[1.0, -1.0], [10.0, -10.0]])
ta_readonce = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=2)
w_readonce = ta_readonce.unpack(value)
r0_readonce = w_readonce.read(0)
with tf.control_dependencies([r0_readonce]):
r1_readonce = w_readonce.read(0)
with self.assertRaisesOpError(
r"Could not read index 0 twice because it was cleared after a "
r"previous read \(perhaps try setting clear_after_read = false\?\)"):
r1_readonce.eval()
ta_readtwice = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=2,
clear_after_read=False)
w_readtwice = ta_readtwice.unpack(value)
r0_readtwice = w_readtwice.read(0)
with tf.control_dependencies([r0_readtwice]):
r1_readtwice = w_readtwice.read(0)
self.assertAllEqual([1.0, -1.0], r1_readtwice.eval())
def testTensorArrayGradientUnpackRead(self):
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=2,
clear_after_read=False)
value = tf.constant([[1.0, -1.0], [10.0, -10.0]])
w = ta.unpack(value)
r0 = w.read(0)
r0_1 = w.read(0)
r1 = w.read(1)
# Test combined gradients + aggregation of read(0)
grad = tf.gradients(
ys=[r0, r0_1, r1], xs=[value],
grad_ys=[[2.0, 3.0], [-1.5, 1.5], [4.0, 5.0]])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[2.0 - 1.5, 3.0 + 1.5], [4.0, 5.0]], grad_vals[0])
def testTensorArrayGradientSplitConcat(self):
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=2)
value = tf.constant([[1.0, -1.0], [10.0, -10.0], [100.0, -100.0]])
w = ta.split(value, [2, 1])
r = w.concat()
# Test combined gradients
grad = tf.gradients(
ys=[r], xs=[value],
grad_ys=[[[2.0, -2.0], [20.0, -20.0], [200.0, -200.0]]])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual(
[[2.0, -2.0], [20.0, -20.0], [200.0, -200.0]], grad_vals[0])
def testTensorArrayGradientDynamicUnpackRead(self):
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=0, dynamic_size=True)
value = tf.constant([[1.0, -1.0], [10.0, -10.0]])
w = ta.unpack(value)
r0 = w.read(0)
r1 = w.read(1)
# Test combined gradients + aggregation of read(0)
grad = tf.gradients(
ys=[r0, r1], xs=[value], grad_ys=[[2.0, 3.0], [4.0, 5.0]])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[2.0, 3.0], [4.0, 5.0]], grad_vals[0])
def testCloseTensorArray(self):
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
c1 = ta.close()
session.run(c1)
def testSizeTensorArray(self):
with self.test_session(use_gpu=self._use_gpu):
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
s = ta.size()
self.assertAllEqual(3, s.eval())
def testWriteCloseTensorArray(self):
with self.test_session(use_gpu=self._use_gpu):
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [3.0])
w1.close().run() # Expected to run without problems
with self.assertRaisesOpError(
r"TensorArray foo has already been closed."):
with tf.control_dependencies([w1.close()]):
w1.write(2, 3.0).flow.eval()
def _testWhileLoopWritePackGradients(self, dynamic_size, dtype):
np_dtype = dtype.as_numpy_dtype
with self.test_session(use_gpu=self._use_gpu) as session:
v0 = tf.identity(np.arange(3*5, dtype=np_dtype).reshape(3, 5))
var = tf.Variable(np.arange(100, 105, dtype=np_dtype))
state0 = tf.identity(np.array([1] * 5, dtype=np_dtype))
ta = tensor_array_ops.TensorArray(
dtype=dtype, tensor_array_name="foo",
size=0 if dynamic_size else 3, dynamic_size=dynamic_size)
time_0 = tf.identity(0)
def body(time, ta_t, state):
sliced = tf.slice(v0, begin=tf.pack([time, 0]), size=[1, -1])
sliced = tf.squeeze(sliced)
out = sliced + var + state
state += sliced
ta_t = ta_t.write(time, out)
return (time+1, ta_t, state)
(unused_0, h_final, unused_2) = tf.while_loop(
cond=lambda time, unused_1, unused_2: time < 3,
body=body,
loop_vars=(time_0, ta, state0),
parallel_iterations=3)
vout = h_final.pack()
grad_val = -np.arange(3*5, dtype=np_dtype).reshape(3, 5)
v0_grad = tf.gradients([vout], [v0], [grad_val])[0]
state0_grad = tf.gradients([vout], [state0], [grad_val])[0]
var_grad = tf.gradients([vout], [var], [grad_val])[0]
tf.initialize_all_variables().run()
state0_t, var_t, v0_t, vout_t, v0_grad_t, var_grad_t, state0_grad_t = (
session.run([state0, var, v0, vout, v0_grad, var_grad, state0_grad]))
just_v0_grad_t, = session.run([v0_grad])
# state = [ state0 | state0 + v0[0] | state0 + v0[0] + v0[1] ]
# vout = [ v0[0] + var + state[0] |
# v0[1] + var + state[1] |
# v0[2] + var + state[2] ]
# = [ v0[0] + var + state0 |
# v0[1] + var + state0 + v0[0] |
# v0[2] + var + state0 + v0[0] + v0[1] ]
#
# d(vout[0])/d(v0) = [1 | 0 | 0 ]
# d(vout[1])/d(v0) = [1 | 1 | 0 ]
# d(vout[2])/d(v0) = [1 | 1 | 1 ]
# d(vout)/d(var) = [1 | 1 | 1]
# d(vout)/d(state0) = [ 1 | 1 | 1 ]
state_per_time = np.array([
state0_t,
state0_t + v0_t[0, :],
state0_t + v0_t[0, :] + v0_t[1, :]])
# Compare forward prop
self.assertAllClose(v0_t + var_t + state_per_time, vout_t)
# Compare backward prop
expected_v0_grad_t = np.array([
grad_val[0, :] + grad_val[1, :] + grad_val[2, :],
grad_val[1, :] + grad_val[2, :],
grad_val[2, :]])
self.assertAllEqual(expected_v0_grad_t, v0_grad_t)
self.assertAllEqual(expected_v0_grad_t, just_v0_grad_t)
self.assertAllClose(grad_val.sum(axis=0), var_grad_t)
self.assertAllClose(grad_val.sum(axis=0), state0_grad_t)
def testWhileLoopWritePackGradients(self):
self._testWhileLoopWritePackGradients(
dynamic_size=False, dtype=tf.float32)
# TODO(ebrevdo): re-enable when While supports non-float32 gradients.
# self._testWhileLoopWritePackGradients(
# dynamic_size=False, dtype=tf.int64)
def testWhileLoopDynamicWritePackGradients(self):
self._testWhileLoopWritePackGradients(
dynamic_size=True, dtype=tf.float32)
def testSumOfTwoReadVariablesWithoutRepeatGrad(self):
with self.test_session(use_gpu=self._use_gpu) as session:
a = tf.identity(np.arange(3*5, dtype=np.float32).reshape(3, 5) + 1)
b = tf.identity(np.arange(3*5, dtype=np.float32).reshape(3, 5) + 1 + 3*5)
ta = tensor_array_ops.TensorArray(dtype=tf.float32, size=2)
ta = ta.write(0, a, name="write_a")
ta = ta.write(1, b, name="write_b")
c = (ta.read(0, name="read_a_0") + # a + b
ta.read(1, name="read_b_0"))
g0 = -(np.arange(3*5, dtype=np.float32).reshape(3, 5) + 1)
grad_a = tf.gradients([c], [a], [g0])[0] # d(a+b)/da = 1
grad_b = tf.gradients([c], [b], [g0])[0] # d(a+b)/db = 1
# Test gradients calculated individually
grad_a_t, = session.run([grad_a])
self.assertAllEqual(grad_a_t, g0)
grad_b_t, = session.run([grad_b])
self.assertAllEqual(grad_b_t, g0)
# Test gradients calculated jointly
joint_grad_a_t, joint_grad_b_t = session.run([grad_a, grad_b])
self.assertAllEqual(joint_grad_a_t, g0)
self.assertAllEqual(joint_grad_b_t, g0)
def _grad_source_for_name(self, name):
return tensor_array_grad._GetGradSource(tf.constant(0, name=name))
def testGetGradSource_Invalid(self):
with self.assertRaises(ValueError):
self._grad_source_for_name("")
with self.assertRaises(ValueError):
self._grad_source_for_name("foo")
with self.assertRaises(ValueError):
self._grad_source_for_name("foo/bar")
def testGetGradSource_NoEnclosingScope(self):
self.assertEqual("gradients:0", self._grad_source_for_name("gradients"))
self.assertEqual("gradients_0:0", self._grad_source_for_name("gradients_0"))
self.assertEqual("gradients", self._grad_source_for_name("gradients/foo"))
self.assertEqual(
"gradients_0", self._grad_source_for_name("gradients_0/foo"))
self.assertEqual(
"gradients", self._grad_source_for_name("gradients/foo/bar"))
self.assertEqual(
"gradients_0", self._grad_source_for_name("gradients_0/foo/bar"))
def testGetGradSource_EnclosingScope(self):
self.assertEqual(
"foo/gradients:0", self._grad_source_for_name("foo/gradients"))
self.assertEqual(
"foo/gradients_0:0", self._grad_source_for_name("foo/gradients_0"))
self.assertEqual(
"foo/gradients", self._grad_source_for_name("foo/gradients/bar"))
self.assertEqual(
"foo/gradients_0", self._grad_source_for_name("foo/gradients_0/bar"))
self.assertEqual(
"foo/bar/gradients",
self._grad_source_for_name("foo/bar/gradients/baz"))
self.assertEqual(
"foo/bar/gradients_0",
self._grad_source_for_name("foo/bar/gradients_0/baz"))
def testGetGradSource_NestedUsesInnermost(self):
self.assertEqual(
"foo/gradients/bar/gradients_0",
self._grad_source_for_name("foo/gradients/bar/gradients_0/baz"))
def testWriteShape(self):
with self.test_session():
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3, infer_shape=True)
c0 = tf.constant([4.0, 5.0])
w0 = ta.write(0, c0)
r0 = w0.read(0)
self.assertAllEqual(c0.get_shape(), r0.get_shape())
c1 = tf.constant([6.0, 7.0])
w1 = w0.write(1, c1)
r0 = w1.read(0)
r1 = w1.read(1)
self.assertAllEqual(c0.get_shape(), r0.get_shape())
self.assertAllEqual(c1.get_shape(), r1.get_shape())
c2 = tf.constant([4.0, 5.0, 6.0])
with self.assertRaises(ValueError):
w0.write(0, c2)
def testUnpackShape(self):
with self.test_session():
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo",
size=0, dynamic_size=True, infer_shape=True)
value = tf.constant([[1.0, -1.0], [10.0, -10.0], [100.0, -100.0]])
w0 = ta.unpack(value)
r0 = w0.read(0)
self.assertAllEqual((2,), r0.get_shape())
c1 = tf.constant([4.0, 5.0])
w1 = w0.write(3, c1)
r1 = w1.read(0)
self.assertAllEqual(c1.get_shape(), r1.get_shape())
c2 = tf.constant([4.0, 5.0, 6.0])
with self.assertRaises(ValueError):
w1.write(4, c2)
def testSplitShape(self):
with self.test_session():
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo",
size=0, dynamic_size=True, infer_shape=True)
value = tf.constant([[1.0, -1.0], [2.0, -2.0], [3.0, -3.0]])
w0 = ta.split(value, [1, 1, 1])
r0 = w0.read(0)
self.assertAllEqual((1, 2), r0.get_shape())
ta1 = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo1",
size=0, dynamic_size=True, infer_shape=True)
w0 = ta1.split(value, [1, 2])
r0 = w0.read(0)
self.assertAllEqual(r0.get_shape(), tensor_shape.unknown_shape())
def testWriteUnknownShape(self):
with self.test_session():
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3, infer_shape=True)
c0 = tf.placeholder(tf.float32)
w0 = ta.write(0, c0)
r0 = w0.read(0)
self.assertAllEqual(r0.get_shape(), tensor_shape.unknown_shape())
class TensorArrayGPUTest(TensorArrayCPUTest):
_use_gpu = True
if __name__ == "__main__":
tf.test.main()
|
|
# -*- coding: utf-8 -*-
"""The extraction CLI tool."""
from __future__ import unicode_literals
import os
from dfvfs.lib import definitions as dfvfs_definitions
from dfvfs.resolver import context as dfvfs_context
# The following import makes sure the analyzers are registered.
from plaso import analyzers # pylint: disable=unused-import
# The following import makes sure the parsers are registered.
from plaso import parsers # pylint: disable=unused-import
from plaso.containers import artifacts
from plaso.cli import logger
from plaso.cli import storage_media_tool
from plaso.cli import tool_options
from plaso.cli import views
from plaso.cli.helpers import manager as helpers_manager
from plaso.engine import configurations
from plaso.engine import engine
from plaso.filters import parser_filter
from plaso.lib import definitions
from plaso.lib import errors
from plaso.parsers import manager as parsers_manager
from plaso.parsers import presets as parsers_presets
import pytz # pylint: disable=wrong-import-order
class ExtractionTool(
storage_media_tool.StorageMediaTool,
tool_options.HashersOptions,
tool_options.ProfilingOptions,
tool_options.StorageFileOptions):
"""Extraction CLI tool.
Attributes:
list_time_zones (bool): True if the time zones should be listed.
"""
# Approximately 250 MB of queued items per worker.
_DEFAULT_QUEUE_SIZE = 125000
_BYTES_IN_A_MIB = 1024 * 1024
_PRESETS_FILE_NAME = 'presets.yaml'
def __init__(self, input_reader=None, output_writer=None):
"""Initializes an CLI tool.
Args:
input_reader (Optional[InputReader]): input reader, where None indicates
that the stdin input reader should be used.
output_writer (Optional[OutputWriter]): output writer, where None
indicates that the stdout output writer should be used.
"""
super(ExtractionTool, self).__init__(
input_reader=input_reader, output_writer=output_writer)
self._artifacts_registry = None
self._buffer_size = 0
self._parser_filter_expression = None
self._preferred_time_zone = None
self._preferred_year = None
self._presets_file = None
self._presets_manager = parsers_presets.ParserPresetsManager()
self._process_archives = False
self._process_compressed_streams = True
self._process_memory_limit = None
self._queue_size = self._DEFAULT_QUEUE_SIZE
self._resolver_context = dfvfs_context.Context()
self._single_process_mode = False
self._storage_file_path = None
self._storage_format = definitions.STORAGE_FORMAT_SQLITE
self._task_storage_format = definitions.STORAGE_FORMAT_SQLITE
self._temporary_directory = None
self._text_prepend = None
self._yara_rules_string = None
self._worker_memory_limit = None
self._worker_timeout = None
self.list_time_zones = False
def _CreateProcessingConfiguration(self, knowledge_base):
"""Creates a processing configuration.
Args:
knowledge_base (KnowledgeBase): contains information from the source
data needed for parsing.
Returns:
ProcessingConfiguration: processing configuration.
Raises:
BadConfigOption: if presets in the parser filter expression could not
be expanded or if an invalid parser or plugin name is specified.
"""
parser_filter_expression = self._parser_filter_expression
if not parser_filter_expression:
operating_system_family = knowledge_base.GetValue('operating_system')
operating_system_product = knowledge_base.GetValue(
'operating_system_product')
operating_system_version = knowledge_base.GetValue(
'operating_system_version')
operating_system_artifact = artifacts.OperatingSystemArtifact(
family=operating_system_family, product=operating_system_product,
version=operating_system_version)
preset_definitions = self._presets_manager.GetPresetsByOperatingSystem(
operating_system_artifact)
if preset_definitions:
preset_names = [
preset_definition.name for preset_definition in preset_definitions]
filter_expression = ','.join(preset_names)
logger.info('Parser filter expression set to: {0:s}'.format(
filter_expression))
parser_filter_expression = filter_expression
parser_filter_helper = parser_filter.ParserFilterExpressionHelper()
try:
parser_filter_expression = parser_filter_helper.ExpandPresets(
self._presets_manager, parser_filter_expression)
except RuntimeError as exception:
raise errors.BadConfigOption((
'Unable to expand presets in parser filter expression with '
'error: {0!s}').format(exception))
_, invalid_parser_elements = (
parsers_manager.ParsersManager.CheckFilterExpression(
parser_filter_expression))
if invalid_parser_elements:
invalid_parser_names_string = ','.join(invalid_parser_elements)
raise errors.BadConfigOption(
'Unknown parser or plugin names in element(s): "{0:s}" of '
'parser filter expression: {1:s}'.format(
invalid_parser_names_string, parser_filter_expression))
# TODO: pass preferred_encoding.
configuration = configurations.ProcessingConfiguration()
configuration.artifact_filters = self._artifact_filters
configuration.credentials = self._credential_configurations
configuration.debug_output = self._debug_mode
configuration.extraction.hasher_file_size_limit = (
self._hasher_file_size_limit)
configuration.extraction.hasher_names_string = self._hasher_names_string
configuration.extraction.process_archives = self._process_archives
configuration.extraction.process_compressed_streams = (
self._process_compressed_streams)
configuration.extraction.yara_rules_string = self._yara_rules_string
configuration.filter_file = self._filter_file
configuration.log_filename = self._log_file
configuration.parser_filter_expression = parser_filter_expression
configuration.preferred_year = self._preferred_year
configuration.profiling.directory = self._profiling_directory
configuration.profiling.sample_rate = self._profiling_sample_rate
configuration.profiling.profilers = self._profilers
configuration.task_storage_format = self._task_storage_format
configuration.temporary_directory = self._temporary_directory
return configuration
def _ParsePerformanceOptions(self, options):
"""Parses the performance options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
"""
self._buffer_size = getattr(options, 'buffer_size', 0)
if self._buffer_size:
# TODO: turn this into a generic function that supports more size
# suffixes both MB and MiB and also that does not allow m as a valid
# indicator for MiB since m represents milli not Mega.
try:
if self._buffer_size[-1].lower() == 'm':
self._buffer_size = int(self._buffer_size[:-1], 10)
self._buffer_size *= self._BYTES_IN_A_MIB
else:
self._buffer_size = int(self._buffer_size, 10)
except ValueError:
raise errors.BadConfigOption(
'Invalid buffer size: {0!s}.'.format(self._buffer_size))
self._queue_size = self.ParseNumericOption(options, 'queue_size')
def _ParseProcessingOptions(self, options):
"""Parses the processing options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
"""
self._single_process_mode = getattr(options, 'single_process', False)
argument_helper_names = [
'process_resources', 'temporary_directory', 'vfs_backend', 'workers',
'zeromq']
helpers_manager.ArgumentHelperManager.ParseOptions(
options, self, names=argument_helper_names)
if self._vfs_back_end == 'fsext':
dfvfs_definitions.PREFERRED_EXT_BACK_END = (
dfvfs_definitions.TYPE_INDICATOR_EXT)
elif self._vfs_back_end == 'fshfs':
dfvfs_definitions.PREFERRED_HFS_BACK_END = (
dfvfs_definitions.TYPE_INDICATOR_HFS)
elif self._vfs_back_end == 'fsntfs':
dfvfs_definitions.PREFERRED_NTFS_BACK_END = (
dfvfs_definitions.TYPE_INDICATOR_NTFS)
elif self._vfs_back_end == 'tsk':
dfvfs_definitions.PREFERRED_EXT_BACK_END = (
dfvfs_definitions.TYPE_INDICATOR_TSK)
dfvfs_definitions.PREFERRED_HFS_BACK_END = (
dfvfs_definitions.TYPE_INDICATOR_TSK)
dfvfs_definitions.PREFERRED_NTFS_BACK_END = (
dfvfs_definitions.TYPE_INDICATOR_TSK)
def _ParseTimeZoneOption(self, options):
"""Parses the time zone options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
"""
time_zone_string = self.ParseStringOption(options, 'timezone')
if isinstance(time_zone_string, str):
if time_zone_string.lower() == 'list':
self.list_time_zones = True
elif time_zone_string:
try:
pytz.timezone(time_zone_string)
except pytz.UnknownTimeZoneError:
raise errors.BadConfigOption(
'Unknown time zone: {0:s}'.format(time_zone_string))
self._preferred_time_zone = time_zone_string
def _PreprocessSources(self, extraction_engine):
"""Preprocesses the sources.
Args:
extraction_engine (BaseEngine): extraction engine to preprocess
the sources.
"""
logger.debug('Starting preprocessing.')
try:
artifacts_registry = engine.BaseEngine.BuildArtifactsRegistry(
self._artifact_definitions_path, self._custom_artifacts_path)
extraction_engine.PreprocessSources(
artifacts_registry, self._source_path_specs,
resolver_context=self._resolver_context)
except IOError as exception:
logger.error('Unable to preprocess with error: {0!s}'.format(exception))
logger.debug('Preprocessing done.')
def _ReadParserPresetsFromFile(self):
"""Reads the parser presets from the presets.yaml file.
Raises:
BadConfigOption: if the parser presets file cannot be read.
"""
self._presets_file = os.path.join(
self._data_location, self._PRESETS_FILE_NAME)
if not os.path.isfile(self._presets_file):
raise errors.BadConfigOption(
'No such parser presets file: {0:s}.'.format(self._presets_file))
try:
self._presets_manager.ReadFromFile(self._presets_file)
except errors.MalformedPresetError as exception:
raise errors.BadConfigOption(
'Unable to read parser presets from file with error: {0!s}'.format(
exception))
def _SetExtractionParsersAndPlugins(self, configuration, session):
"""Sets the parsers and plugins before extraction.
Args:
configuration (ProcessingConfiguration): processing configuration.
session (Session): session.
"""
names_generator = parsers_manager.ParsersManager.GetParserAndPluginNames(
parser_filter_expression=configuration.parser_filter_expression)
session.enabled_parser_names = list(names_generator)
session.parser_filter_expression = configuration.parser_filter_expression
def _SetExtractionPreferredTimeZone(self, knowledge_base):
"""Sets the preferred time zone before extraction.
Args:
knowledge_base (KnowledgeBase): contains information from the source
data needed for parsing.
"""
# Note session.preferred_time_zone will default to UTC but
# self._preferred_time_zone is None when not set.
if self._preferred_time_zone:
try:
knowledge_base.SetTimeZone(self._preferred_time_zone)
except ValueError:
# pylint: disable=protected-access
logger.warning(
'Unsupported time zone: {0:s}, defaulting to {1:s}'.format(
self._preferred_time_zone, knowledge_base._time_zone.zone))
def AddPerformanceOptions(self, argument_group):
"""Adds the performance options to the argument group.
Args:
argument_group (argparse._ArgumentGroup): argparse argument group.
"""
argument_group.add_argument(
'--buffer_size', '--buffer-size', '--bs', dest='buffer_size',
action='store', default=0, help=(
'The buffer size for the output (defaults to 196MiB).'))
argument_group.add_argument(
'--queue_size', '--queue-size', dest='queue_size', action='store',
default=0, help=(
'The maximum number of queued items per worker '
'(defaults to {0:d})').format(self._DEFAULT_QUEUE_SIZE))
def AddProcessingOptions(self, argument_group):
"""Adds the processing options to the argument group.
Args:
argument_group (argparse._ArgumentGroup): argparse argument group.
"""
argument_group.add_argument(
'--single_process', '--single-process', dest='single_process',
action='store_true', default=False, help=(
'Indicate that the tool should run in a single process.'))
argument_helper_names = [
'temporary_directory', 'vfs_backend', 'workers', 'zeromq']
if self._CanEnforceProcessMemoryLimit():
argument_helper_names.append('process_resources')
helpers_manager.ArgumentHelperManager.AddCommandLineArguments(
argument_group, names=argument_helper_names)
def AddTimeZoneOption(self, argument_group):
"""Adds the time zone option to the argument group.
Args:
argument_group (argparse._ArgumentGroup): argparse argument group.
"""
# Note the default here is None so we can determine if the time zone
# option was set.
argument_group.add_argument(
'-z', '--zone', '--timezone', dest='timezone', action='store',
metavar='TIME_ZONE', type=str, default=None, help=(
'preferred time zone of extracted date and time values that are '
'stored without a time zone indicator. The time zone is determined '
'based on the source data where possible otherwise it will default '
'to UTC. Use "list" to see a list of available time zones.'))
def ListParsersAndPlugins(self):
"""Lists information about the available parsers and plugins."""
parsers_information = parsers_manager.ParsersManager.GetParsersInformation()
table_view = views.ViewsFactory.GetTableView(
self._views_format_type, column_names=['Name', 'Description'],
title='Parsers')
for name, description in sorted(parsers_information):
table_view.AddRow([name, description])
table_view.Write(self._output_writer)
parser_names = parsers_manager.ParsersManager.GetNamesOfParsersWithPlugins()
for parser_name in parser_names:
plugins_information = (
parsers_manager.ParsersManager.GetParserPluginsInformation(
parser_filter_expression=parser_name))
table_title = 'Parser plugins: {0:s}'.format(parser_name)
table_view = views.ViewsFactory.GetTableView(
self._views_format_type, column_names=['Name', 'Description'],
title=table_title)
for name, description in sorted(plugins_information):
table_view.AddRow([name, description])
table_view.Write(self._output_writer)
title = 'Parser presets'
if self._presets_file:
source_path = os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__))))
presets_file = self._presets_file
if presets_file.startswith(source_path):
presets_file = presets_file[len(source_path) + 1:]
title = '{0:s} ({1:s})'.format(title, presets_file)
presets_information = self._presets_manager.GetPresetsInformation()
table_view = views.ViewsFactory.GetTableView(
self._views_format_type, column_names=['Name', 'Parsers and plugins'],
title=title)
for name, description in sorted(presets_information):
table_view.AddRow([name, description])
table_view.Write(self._output_writer)
|
|
#############################################################################
## ipsec.py --- IPsec support for Scapy ##
## ##
## Copyright (C) 2014 6WIND ##
## ##
## This program is free software; you can redistribute it and/or modify it ##
## under the terms of the GNU General Public License version 2 as ##
## published by the Free Software Foundation. ##
## ##
## This program is distributed in the hope that it will be useful, but ##
## WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ##
## General Public License for more details. ##
#############################################################################
"""
IPsec layer
===========
Example of use:
>>> sa = SecurityAssociation(ESP, spi=0xdeadbeef, crypt_algo='AES-CBC',
... crypt_key='sixteenbytes key')
>>> p = IP(src='1.1.1.1', dst='2.2.2.2')
>>> p /= TCP(sport=45012, dport=80)
>>> p /= Raw('testdata')
>>> p = IP(str(p))
>>> p
<IP version=4L ihl=5L tos=0x0 len=48 id=1 flags= frag=0L ttl=64 proto=tcp chksum=0x74c2 src=1.1.1.1 dst=2.2.2.2 options=[] |<TCP sport=45012 dport=http seq=0 ack=0 dataofs=5L reserved=0L flags=S window=8192 chksum=0x1914 urgptr=0 options=[] |<Raw load='testdata' |>>>
>>>
>>> e = sa.encrypt(p)
>>> e
<IP version=4L ihl=5L tos=0x0 len=76 id=1 flags= frag=0L ttl=64 proto=esp chksum=0x747a src=1.1.1.1 dst=2.2.2.2 |<ESP spi=0xdeadbeef seq=1 data=b'\xf8\xdb\x1e\x83[T\xab\\\xd2\x1b\xed\xd1\xe5\xc8Y\xc2\xa5d\x92\xc1\x05\x17\xa6\x92\x831\xe6\xc1]\x9a\xd6K}W\x8bFfd\xa5B*+\xde\xc8\x89\xbf{\xa9' |>>
>>>
>>> d = sa.decrypt(e)
>>> d
<IP version=4L ihl=5L tos=0x0 len=48 id=1 flags= frag=0L ttl=64 proto=tcp chksum=0x74c2 src=1.1.1.1 dst=2.2.2.2 |<TCP sport=45012 dport=http seq=0 ack=0 dataofs=5L reserved=0L flags=S window=8192 chksum=0x1914 urgptr=0 options=[] |<Raw load='testdata' |>>>
>>>
>>> d == p
True
"""
from __future__ import absolute_import
from fractions import gcd
import os
import socket
import struct
from scapy.config import conf, crypto_validator
from scapy.data import IP_PROTOS
from scapy.error import log_loading
from scapy.fields import ByteEnumField, ByteField, IntField, PacketField, \
ShortField, StrField, XIntField, XStrField, XStrLenField
from scapy.packet import Packet, bind_layers, Raw
from scapy.layers.inet import IP, UDP
import scapy.modules.six as six
from scapy.modules.six.moves import range
from scapy.layers.inet6 import IPv6, IPv6ExtHdrHopByHop, IPv6ExtHdrDestOpt, \
IPv6ExtHdrRouting
#------------------------------------------------------------------------------
class AH(Packet):
"""
Authentication Header
See https://tools.ietf.org/rfc/rfc4302.txt
"""
name = 'AH'
def __get_icv_len(self):
"""
Compute the size of the ICV based on the payloadlen field.
Padding size is included as it can only be known from the authentication
algorithm provided by the Security Association.
"""
# payloadlen = length of AH in 32-bit words (4-byte units), minus "2"
# payloadlen = 3 32-bit word fixed fields + ICV + padding - 2
# ICV = (payloadlen + 2 - 3 - padding) in 32-bit words
return (self.payloadlen - 1) * 4
fields_desc = [
ByteEnumField('nh', None, IP_PROTOS),
ByteField('payloadlen', None),
ShortField('reserved', None),
XIntField('spi', 0x0),
IntField('seq', 0),
XStrLenField('icv', None, length_from=__get_icv_len),
# Padding len can only be known with the SecurityAssociation.auth_algo
XStrLenField('padding', None, length_from=lambda x: 0),
]
overload_fields = {
IP: {'proto': socket.IPPROTO_AH},
IPv6: {'nh': socket.IPPROTO_AH},
IPv6ExtHdrHopByHop: {'nh': socket.IPPROTO_AH},
IPv6ExtHdrDestOpt: {'nh': socket.IPPROTO_AH},
IPv6ExtHdrRouting: {'nh': socket.IPPROTO_AH},
}
bind_layers(IP, AH, proto=socket.IPPROTO_AH)
bind_layers(IPv6, AH, nh=socket.IPPROTO_AH)
bind_layers(AH, IP, nh=socket.IPPROTO_IP)
bind_layers(AH, IPv6, nh=socket.IPPROTO_IPV6)
#------------------------------------------------------------------------------
class ESP(Packet):
"""
Encapsulated Security Payload
See https://tools.ietf.org/rfc/rfc4303.txt
"""
name = 'ESP'
fields_desc = [
XIntField('spi', 0x0),
IntField('seq', 0),
XStrField('data', None),
]
overload_fields = {
IP: {'proto': socket.IPPROTO_ESP},
IPv6: {'nh': socket.IPPROTO_ESP},
IPv6ExtHdrHopByHop: {'nh': socket.IPPROTO_ESP},
IPv6ExtHdrDestOpt: {'nh': socket.IPPROTO_ESP},
IPv6ExtHdrRouting: {'nh': socket.IPPROTO_ESP},
}
bind_layers(IP, ESP, proto=socket.IPPROTO_ESP)
bind_layers(IPv6, ESP, nh=socket.IPPROTO_ESP)
bind_layers(UDP, ESP, dport=4500) # NAT-Traversal encapsulation
bind_layers(UDP, ESP, sport=4500) # NAT-Traversal encapsulation
#------------------------------------------------------------------------------
class _ESPPlain(Packet):
"""
Internal class to represent unencrypted ESP packets.
"""
name = 'ESP'
fields_desc = [
XIntField('spi', 0x0),
IntField('seq', 0),
StrField('iv', ''),
PacketField('data', '', Raw),
StrField('padding', ''),
ByteField('padlen', 0),
ByteEnumField('nh', 0, IP_PROTOS),
StrField('icv', ''),
]
def data_for_encryption(self):
return str(self.data) + self.padding + chr(self.padlen) + chr(self.nh)
#------------------------------------------------------------------------------
if conf.crypto_valid:
from cryptography.exceptions import InvalidTag
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import interfaces
from cryptography.hazmat.primitives.ciphers import (
Cipher,
algorithms,
modes,
)
else:
log_loading.info("Can't import python-cryptography v1.7+. "
"Disabled IPsec encryption/authentication.")
InvalidTag = default_backend = interfaces = None
Cipher = algorithms = modes = None
#------------------------------------------------------------------------------
def _lcm(a, b):
"""
Least Common Multiple between 2 integers.
"""
if a == 0 or b == 0:
return 0
else:
return abs(a * b) / gcd(a, b)
class CryptAlgo(object):
"""
IPsec encryption algorithm
"""
def __init__(self, name, cipher, mode, block_size=None, iv_size=None,
key_size=None, icv_size=None, salt_size=None, format_mode_iv=None):
"""
@param name: the name of this encryption algorithm
@param cipher: a Cipher module
@param mode: the mode used with the cipher module
@param block_size: the length a block for this algo. Defaults to the
`block_size` of the cipher.
@param iv_size: the length of the initialization vector of this algo.
Defaults to the `block_size` of the cipher.
@param key_size: an integer or list/tuple of integers. If specified,
force the secret keys length to one of the values.
Defaults to the `key_size` of the cipher.
@param icv_size: the length of the Integrity Check Value of this algo.
Used by Combined Mode Algorithms e.g. GCM
@param salt_size: the length of the salt to use as the IV prefix.
Usually used by Counter modes e.g. CTR
@param format_mode_iv: function to format the Initialization Vector
e.g. handle the salt value
Default is the random buffer from `generate_iv`
"""
self.name = name
self.cipher = cipher
self.mode = mode
self.icv_size = icv_size
if modes and self.mode is not None:
self.is_aead = issubclass(self.mode,
modes.ModeWithAuthenticationTag)
else:
self.is_aead = False
if block_size is not None:
self.block_size = block_size
elif cipher is not None:
self.block_size = cipher.block_size // 8
else:
self.block_size = 1
if iv_size is None:
self.iv_size = self.block_size
else:
self.iv_size = iv_size
if key_size is not None:
self.key_size = key_size
elif cipher is not None:
self.key_size = tuple(i // 8 for i in cipher.key_sizes)
else:
self.key_size = None
if salt_size is None:
self.salt_size = 0
else:
self.salt_size = salt_size
if format_mode_iv is None:
self._format_mode_iv = lambda iv, **kw: iv
else:
self._format_mode_iv = format_mode_iv
def check_key(self, key):
"""
Check that the key length is valid.
@param key: a byte string
"""
if self.key_size and not (len(key) == self.key_size or len(key) in self.key_size):
raise TypeError('invalid key size %s, must be %s' %
(len(key), self.key_size))
def generate_iv(self):
"""
Generate a random initialization vector.
"""
# XXX: Handle counter modes with real counters? RFCs allow the use of
# XXX: random bytes for counters, so it is not wrong to do it that way
return os.urandom(self.iv_size)
@crypto_validator
def new_cipher(self, key, mode_iv, digest=None):
"""
@param key: the secret key, a byte string
@param mode_iv: the initialization vector or nonce, a byte string.
Formatted by `format_mode_iv`.
@param digest: also known as tag or icv. A byte string containing the
digest of the encrypted data. Only use this during
decryption!
@return: an initialized cipher object for this algo
"""
if self.is_aead and digest is not None:
# With AEAD, the mode needs the digest during decryption.
return Cipher(
self.cipher(key),
self.mode(mode_iv, digest, len(digest)),
default_backend(),
)
else:
return Cipher(
self.cipher(key),
self.mode(mode_iv),
default_backend(),
)
def pad(self, esp):
"""
Add the correct amount of padding so that the data to encrypt is
exactly a multiple of the algorithm's block size.
Also, make sure that the total ESP packet length is a multiple of 4
bytes.
@param esp: an unencrypted _ESPPlain packet
@return: an unencrypted _ESPPlain packet with valid padding
"""
# 2 extra bytes for padlen and nh
data_len = len(esp.data) + 2
# according to the RFC4303, section 2.4. Padding (for Encryption)
# the size of the ESP payload must be a multiple of 32 bits
align = _lcm(self.block_size, 4)
# pad for block size
esp.padlen = -data_len % align
# Still according to the RFC, the default value for padding *MUST* be an
# array of bytes starting from 1 to padlen
# TODO: Handle padding function according to the encryption algo
esp.padding = ''.join(chr(b) for b in range(1, esp.padlen + 1))
# If the following test fails, it means that this algo does not comply
# with the RFC
payload_len = len(esp.iv) + len(esp.data) + len(esp.padding) + 2
if payload_len % 4 != 0:
raise ValueError('The size of the ESP data is not aligned to 32 bits after padding.')
return esp
def encrypt(self, sa, esp, key):
"""
Encrypt an ESP packet
@param sa: the SecurityAssociation associated with the ESP packet.
@param esp: an unencrypted _ESPPlain packet with valid padding
@param key: the secret key used for encryption
@return: a valid ESP packet encrypted with this algorithm
"""
data = esp.data_for_encryption()
if self.cipher:
mode_iv = self._format_mode_iv(algo=self, sa=sa, iv=esp.iv)
cipher = self.new_cipher(key, mode_iv)
encryptor = cipher.encryptor()
if self.is_aead:
aad = struct.pack('!LL', esp.spi, esp.seq)
encryptor.authenticate_additional_data(aad)
data = encryptor.update(data) + encryptor.finalize()
data += encryptor.tag[:self.icv_size]
else:
data = encryptor.update(data) + encryptor.finalize()
return ESP(spi=esp.spi, seq=esp.seq, data=esp.iv + data)
def decrypt(self, sa, esp, key, icv_size=None):
"""
Decrypt an ESP packet
@param sa: the SecurityAssociation associated with the ESP packet.
@param esp: an encrypted ESP packet
@param key: the secret key used for encryption
@param icv_size: the length of the icv used for integrity check
@return: a valid ESP packet encrypted with this algorithm
@raise IPSecIntegrityError: if the integrity check fails with an AEAD
algorithm
"""
if icv_size is None:
icv_size = self.icv_size if self.is_aead else 0
iv = esp.data[:self.iv_size]
data = esp.data[self.iv_size:len(esp.data) - icv_size]
icv = esp.data[len(esp.data) - icv_size:]
if self.cipher:
mode_iv = self._format_mode_iv(sa=sa, iv=iv)
cipher = self.new_cipher(key, mode_iv, icv)
decryptor = cipher.decryptor()
if self.is_aead:
# Tag value check is done during the finalize method
decryptor.authenticate_additional_data(
struct.pack('!LL', esp.spi, esp.seq)
)
try:
data = decryptor.update(data) + decryptor.finalize()
except InvalidTag as err:
raise IPSecIntegrityError(err)
# extract padlen and nh
padlen = ord(data[-2])
nh = ord(data[-1])
# then use padlen to determine data and padding
data = data[:len(data) - padlen - 2]
padding = data[len(data) - padlen - 2: len(data) - 2]
return _ESPPlain(spi=esp.spi,
seq=esp.seq,
iv=iv,
data=data,
padding=padding,
padlen=padlen,
nh=nh,
icv=icv)
#------------------------------------------------------------------------------
# The names of the encryption algorithms are the same than in scapy.contrib.ikev2
# see http://www.iana.org/assignments/ikev2-parameters/ikev2-parameters.xhtml
CRYPT_ALGOS = {
'NULL': CryptAlgo('NULL', cipher=None, mode=None, iv_size=0),
}
if algorithms:
CRYPT_ALGOS['AES-CBC'] = CryptAlgo('AES-CBC',
cipher=algorithms.AES,
mode=modes.CBC)
_aes_ctr_format_mode_iv = lambda sa, iv, **kw: sa.crypt_salt + iv + b'\x00\x00\x00\x01'
CRYPT_ALGOS['AES-CTR'] = CryptAlgo('AES-CTR',
cipher=algorithms.AES,
mode=modes.CTR,
iv_size=8,
salt_size=4,
format_mode_iv=_aes_ctr_format_mode_iv)
_salt_format_mode_iv = lambda sa, iv, **kw: sa.crypt_salt + iv
CRYPT_ALGOS['AES-GCM'] = CryptAlgo('AES-GCM',
cipher=algorithms.AES,
mode=modes.GCM,
salt_size=4,
iv_size=8,
icv_size=16,
format_mode_iv=_salt_format_mode_iv)
if hasattr(modes, 'CCM'):
CRYPT_ALGOS['AES-CCM'] = CryptAlgo('AES-CCM',
cipher=algorithms.AES,
mode=modes.CCM,
iv_size=8,
salt_size=3,
icv_size=16,
format_mode_iv=_salt_format_mode_iv)
# XXX: Flagged as weak by 'cryptography'. Kept for backward compatibility
CRYPT_ALGOS['Blowfish'] = CryptAlgo('Blowfish',
cipher=algorithms.Blowfish,
mode=modes.CBC)
# XXX: RFC7321 states that DES *MUST NOT* be implemented.
# XXX: Keep for backward compatibility?
# Using a TripleDES cipher algorithm for DES is done by using the same 64
# bits key 3 times (done by cryptography when given a 64 bits key)
CRYPT_ALGOS['DES'] = CryptAlgo('DES',
cipher=algorithms.TripleDES,
mode=modes.CBC,
key_size=(8,))
CRYPT_ALGOS['3DES'] = CryptAlgo('3DES',
cipher=algorithms.TripleDES,
mode=modes.CBC)
CRYPT_ALGOS['CAST'] = CryptAlgo('CAST',
cipher=algorithms.CAST5,
mode=modes.CBC)
#------------------------------------------------------------------------------
if conf.crypto_valid:
from cryptography.hazmat.primitives.hmac import HMAC
from cryptography.hazmat.primitives.cmac import CMAC
from cryptography.hazmat.primitives import hashes
else:
# no error if cryptography is not available but authentication won't be supported
HMAC = CMAC = hashes = None
#------------------------------------------------------------------------------
class IPSecIntegrityError(Exception):
"""
Error risen when the integrity check fails.
"""
pass
class AuthAlgo(object):
"""
IPsec integrity algorithm
"""
def __init__(self, name, mac, digestmod, icv_size, key_size=None):
"""
@param name: the name of this integrity algorithm
@param mac: a Message Authentication Code module
@param digestmod: a Hash or Cipher module
@param icv_size: the length of the integrity check value of this algo
@param key_size: an integer or list/tuple of integers. If specified,
force the secret keys length to one of the values.
Defaults to the `key_size` of the cipher.
"""
self.name = name
self.mac = mac
self.digestmod = digestmod
self.icv_size = icv_size
self.key_size = key_size
def check_key(self, key):
"""
Check that the key length is valid.
@param key: a byte string
"""
if self.key_size and len(key) not in self.key_size:
raise TypeError('invalid key size %s, must be one of %s' %
(len(key), self.key_size))
@crypto_validator
def new_mac(self, key):
"""
@param key: a byte string
@return: an initialized mac object for this algo
"""
if self.mac is CMAC:
return self.mac(self.digestmod(key), default_backend())
else:
return self.mac(key, self.digestmod(), default_backend())
def sign(self, pkt, key):
"""
Sign an IPsec (ESP or AH) packet with this algo.
@param pkt: a packet that contains a valid encrypted ESP or AH layer
@param key: the authentication key, a byte string
@return: the signed packet
"""
if not self.mac:
return pkt
mac = self.new_mac(key)
if pkt.haslayer(ESP):
mac.update(str(pkt[ESP]))
pkt[ESP].data += mac.finalize()[:self.icv_size]
elif pkt.haslayer(AH):
clone = zero_mutable_fields(pkt.copy(), sending=True)
mac.update(str(clone))
pkt[AH].icv = mac.finalize()[:self.icv_size]
return pkt
def verify(self, pkt, key):
"""
Check that the integrity check value (icv) of a packet is valid.
@param pkt: a packet that contains a valid encrypted ESP or AH layer
@param key: the authentication key, a byte string
@raise IPSecIntegrityError: if the integrity check fails
"""
if not self.mac or self.icv_size == 0:
return
mac = self.new_mac(key)
pkt_icv = 'not found'
computed_icv = 'not computed'
if isinstance(pkt, ESP):
pkt_icv = pkt.data[len(pkt.data) - self.icv_size:]
clone = pkt.copy()
clone.data = clone.data[:len(clone.data) - self.icv_size]
elif pkt.haslayer(AH):
if len(pkt[AH].icv) != self.icv_size:
# Fill padding since we know the actual icv_size
pkt[AH].padding = pkt[AH].icv[self.icv_size:]
pkt[AH].icv = pkt[AH].icv[:self.icv_size]
pkt_icv = pkt[AH].icv
clone = zero_mutable_fields(pkt.copy(), sending=False)
mac.update(str(clone))
computed_icv = mac.finalize()[:self.icv_size]
# XXX: Cannot use mac.verify because the ICV can be truncated
if pkt_icv != computed_icv:
raise IPSecIntegrityError('pkt_icv=%r, computed_icv=%r' %
(pkt_icv, computed_icv))
#------------------------------------------------------------------------------
# The names of the integrity algorithms are the same than in scapy.contrib.ikev2
# see http://www.iana.org/assignments/ikev2-parameters/ikev2-parameters.xhtml
AUTH_ALGOS = {
'NULL': AuthAlgo('NULL', mac=None, digestmod=None, icv_size=0),
}
if HMAC and hashes:
# XXX: NIST has deprecated SHA1 but is required by RFC7321
AUTH_ALGOS['HMAC-SHA1-96'] = AuthAlgo('HMAC-SHA1-96',
mac=HMAC,
digestmod=hashes.SHA1,
icv_size=12)
AUTH_ALGOS['SHA2-256-128'] = AuthAlgo('SHA2-256-128',
mac=HMAC,
digestmod=hashes.SHA256,
icv_size=16)
AUTH_ALGOS['SHA2-384-192'] = AuthAlgo('SHA2-384-192',
mac=HMAC,
digestmod=hashes.SHA384,
icv_size=24)
AUTH_ALGOS['SHA2-512-256'] = AuthAlgo('SHA2-512-256',
mac=HMAC,
digestmod=hashes.SHA512,
icv_size=32)
# XXX:Flagged as deprecated by 'cryptography'. Kept for backward compat
AUTH_ALGOS['HMAC-MD5-96'] = AuthAlgo('HMAC-MD5-96',
mac=HMAC,
digestmod=hashes.MD5,
icv_size=12)
if CMAC and algorithms:
AUTH_ALGOS['AES-CMAC-96'] = AuthAlgo('AES-CMAC-96',
mac=CMAC,
digestmod=algorithms.AES,
icv_size=12,
key_size=(16,))
#------------------------------------------------------------------------------
def split_for_transport(orig_pkt, transport_proto):
"""
Split an IP(v6) packet in the correct location to insert an ESP or AH
header.
@param orig_pkt: the packet to split. Must be an IP or IPv6 packet
@param transport_proto: the IPsec protocol number that will be inserted
at the split position.
@return: a tuple (header, nh, payload) where nh is the protocol number of
payload.
"""
# force resolution of default fields to avoid padding errors
header = orig_pkt.__class__(str(orig_pkt))
next_hdr = header.payload
nh = None
if header.version == 4:
nh = header.proto
header.proto = transport_proto
header.remove_payload()
del header.chksum
del header.len
return header, nh, next_hdr
else:
found_rt_hdr = False
prev = header
# Since the RFC 4302 is vague about where the ESP/AH headers should be
# inserted in IPv6, I chose to follow the linux implementation.
while isinstance(next_hdr, (IPv6ExtHdrHopByHop, IPv6ExtHdrRouting, IPv6ExtHdrDestOpt)):
if isinstance(next_hdr, IPv6ExtHdrHopByHop):
pass
if isinstance(next_hdr, IPv6ExtHdrRouting):
found_rt_hdr = True
elif isinstance(next_hdr, IPv6ExtHdrDestOpt) and found_rt_hdr:
break
prev = next_hdr
next_hdr = next_hdr.payload
nh = prev.nh
prev.nh = transport_proto
prev.remove_payload()
del header.plen
return header, nh, next_hdr
#------------------------------------------------------------------------------
# see RFC 4302 - Appendix A. Mutability of IP Options/Extension Headers
IMMUTABLE_IPV4_OPTIONS = (
0, # End Of List
1, # No OPeration
2, # Security
5, # Extended Security
6, # Commercial Security
20, # Router Alert
21, # Sender Directed Multi-Destination Delivery
)
def zero_mutable_fields(pkt, sending=False):
"""
When using AH, all "mutable" fields must be "zeroed" before calculating
the ICV. See RFC 4302, Section 3.3.3.1. Handling Mutable Fields.
@param pkt: an IP(v6) packet containing an AH layer.
NOTE: The packet will be modified
@param sending: if true, ipv6 routing headers will not be reordered
"""
if pkt.haslayer(AH):
pkt[AH].icv = chr(0) * len(pkt[AH].icv)
else:
raise TypeError('no AH layer found')
if pkt.version == 4:
# the tos field has been replaced by DSCP and ECN
# Routers may rewrite the DS field as needed to provide a
# desired local or end-to-end service
pkt.tos = 0
# an intermediate router might set the DF bit, even if the source
# did not select it.
pkt.flags = 0
# changed en route as a normal course of processing by routers
pkt.ttl = 0
# will change if any of these other fields change
pkt.chksum = 0
immutable_opts = []
for opt in pkt.options:
if opt.option in IMMUTABLE_IPV4_OPTIONS:
immutable_opts.append(opt)
else:
immutable_opts.append(Raw(chr(0) * len(opt)))
pkt.options = immutable_opts
else:
# holds DSCP and ECN
pkt.tc = 0
# The flow label described in AHv1 was mutable, and in RFC 2460 [DH98]
# was potentially mutable. To retain compatibility with existing AH
# implementations, the flow label is not included in the ICV in AHv2.
pkt.fl = 0
# same as ttl
pkt.hlim = 0
next_hdr = pkt.payload
while isinstance(next_hdr, (IPv6ExtHdrHopByHop, IPv6ExtHdrRouting, IPv6ExtHdrDestOpt)):
if isinstance(next_hdr, (IPv6ExtHdrHopByHop, IPv6ExtHdrDestOpt)):
for opt in next_hdr.options:
if opt.otype & 0x20:
# option data can change en-route and must be zeroed
opt.optdata = chr(0) * opt.optlen
elif isinstance(next_hdr, IPv6ExtHdrRouting) and sending:
# The sender must order the field so that it appears as it
# will at the receiver, prior to performing the ICV computation.
next_hdr.segleft = 0
if next_hdr.addresses:
final = next_hdr.addresses.pop()
next_hdr.addresses.insert(0, pkt.dst)
pkt.dst = final
else:
break
next_hdr = next_hdr.payload
return pkt
#------------------------------------------------------------------------------
class SecurityAssociation(object):
"""
This class is responsible of "encryption" and "decryption" of IPsec packets.
"""
SUPPORTED_PROTOS = (IP, IPv6)
def __init__(self, proto, spi, seq_num=1, crypt_algo=None, crypt_key=None,
auth_algo=None, auth_key=None, tunnel_header=None, nat_t_header=None):
"""
@param proto: the IPsec proto to use (ESP or AH)
@param spi: the Security Parameters Index of this SA
@param seq_num: the initial value for the sequence number on encrypted
packets
@param crypt_algo: the encryption algorithm name (only used with ESP)
@param crypt_key: the encryption key (only used with ESP)
@param auth_algo: the integrity algorithm name
@param auth_key: the integrity key
@param tunnel_header: an instance of a IP(v6) header that will be used
to encapsulate the encrypted packets.
@param nat_t_header: an instance of a UDP header that will be used
for NAT-Traversal.
"""
if proto not in (ESP, AH, ESP.name, AH.name):
raise ValueError("proto must be either ESP or AH")
if isinstance(proto, six.string_types):
self.proto = eval(proto)
else:
self.proto = proto
self.spi = spi
self.seq_num = seq_num
if crypt_algo:
if crypt_algo not in CRYPT_ALGOS:
raise TypeError('unsupported encryption algo %r, try %r' %
(crypt_algo, list(CRYPT_ALGOS.keys())))
self.crypt_algo = CRYPT_ALGOS[crypt_algo]
if crypt_key:
salt_size = self.crypt_algo.salt_size
self.crypt_key = crypt_key[:len(crypt_key) - salt_size]
self.crypt_salt = crypt_key[len(crypt_key) - salt_size:]
else:
self.crypt_key = None
self.crypt_salt = None
else:
self.crypt_algo = CRYPT_ALGOS['NULL']
self.crypt_key = None
if auth_algo:
if auth_algo not in AUTH_ALGOS:
raise TypeError('unsupported integrity algo %r, try %r' %
(auth_algo, list(AUTH_ALGOS.keys())))
self.auth_algo = AUTH_ALGOS[auth_algo]
self.auth_key = auth_key
else:
self.auth_algo = AUTH_ALGOS['NULL']
self.auth_key = None
if tunnel_header and not isinstance(tunnel_header, (IP, IPv6)):
raise TypeError('tunnel_header must be %s or %s' % (IP.name, IPv6.name))
self.tunnel_header = tunnel_header
if nat_t_header:
if proto is not ESP:
raise TypeError('nat_t_header is only allowed with ESP')
if not isinstance(nat_t_header, UDP):
raise TypeError('nat_t_header must be %s' % UDP.name)
self.nat_t_header = nat_t_header
def check_spi(self, pkt):
if pkt.spi != self.spi:
raise TypeError('packet spi=0x%x does not match the SA spi=0x%x' %
(pkt.spi, self.spi))
def _encrypt_esp(self, pkt, seq_num=None, iv=None):
if iv is None:
iv = self.crypt_algo.generate_iv()
else:
if len(iv) != self.crypt_algo.iv_size:
raise TypeError('iv length must be %s' % self.crypt_algo.iv_size)
esp = _ESPPlain(spi=self.spi, seq=seq_num or self.seq_num, iv=iv)
if self.tunnel_header:
tunnel = self.tunnel_header.copy()
if tunnel.version == 4:
del tunnel.proto
del tunnel.len
del tunnel.chksum
else:
del tunnel.nh
del tunnel.plen
pkt = tunnel.__class__(str(tunnel / pkt))
ip_header, nh, payload = split_for_transport(pkt, socket.IPPROTO_ESP)
esp.data = payload
esp.nh = nh
esp = self.crypt_algo.pad(esp)
esp = self.crypt_algo.encrypt(self, esp, self.crypt_key)
self.auth_algo.sign(esp, self.auth_key)
if self.nat_t_header:
nat_t_header = self.nat_t_header.copy()
nat_t_header.chksum = 0
del nat_t_header.len
if ip_header.version == 4:
del ip_header.proto
else:
del ip_header.nh
ip_header /= nat_t_header
if ip_header.version == 4:
ip_header.len = len(ip_header) + len(esp)
del ip_header.chksum
ip_header = ip_header.__class__(str(ip_header))
else:
ip_header.plen = len(ip_header.payload) + len(esp)
# sequence number must always change, unless specified by the user
if seq_num is None:
self.seq_num += 1
return ip_header / esp
def _encrypt_ah(self, pkt, seq_num=None):
ah = AH(spi=self.spi, seq=seq_num or self.seq_num,
icv=chr(0) * self.auth_algo.icv_size)
if self.tunnel_header:
tunnel = self.tunnel_header.copy()
if tunnel.version == 4:
del tunnel.proto
del tunnel.len
del tunnel.chksum
else:
del tunnel.nh
del tunnel.plen
pkt = tunnel.__class__(str(tunnel / pkt))
ip_header, nh, payload = split_for_transport(pkt, socket.IPPROTO_AH)
ah.nh = nh
if ip_header.version == 6 and len(ah) % 8 != 0:
# For IPv6, the total length of the header must be a multiple of
# 8-octet units.
ah.padding = chr(0) * (-len(ah) % 8)
elif len(ah) % 4 != 0:
# For IPv4, the total length of the header must be a multiple of
# 4-octet units.
ah.padding = chr(0) * (-len(ah) % 4)
# RFC 4302 - Section 2.2. Payload Length
# This 8-bit field specifies the length of AH in 32-bit words (4-byte
# units), minus "2".
ah.payloadlen = len(ah) / 4 - 2
if ip_header.version == 4:
ip_header.len = len(ip_header) + len(ah) + len(payload)
del ip_header.chksum
ip_header = ip_header.__class__(str(ip_header))
else:
ip_header.plen = len(ip_header.payload) + len(ah) + len(payload)
signed_pkt = self.auth_algo.sign(ip_header / ah / payload, self.auth_key)
# sequence number must always change, unless specified by the user
if seq_num is None:
self.seq_num += 1
return signed_pkt
def encrypt(self, pkt, seq_num=None, iv=None):
"""
Encrypt (and encapsulate) an IP(v6) packet with ESP or AH according
to this SecurityAssociation.
@param pkt: the packet to encrypt
@param seq_num: if specified, use this sequence number instead of the
generated one
@param iv: if specified, use this initialization vector for
encryption instead of a random one.
@return: the encrypted/encapsulated packet
"""
if not isinstance(pkt, self.SUPPORTED_PROTOS):
raise TypeError('cannot encrypt %s, supported protos are %s'
% (pkt.__class__, self.SUPPORTED_PROTOS))
if self.proto is ESP:
return self._encrypt_esp(pkt, seq_num=seq_num, iv=iv)
else:
return self._encrypt_ah(pkt, seq_num=seq_num)
def _decrypt_esp(self, pkt, verify=True):
encrypted = pkt[ESP]
if verify:
self.check_spi(pkt)
self.auth_algo.verify(encrypted, self.auth_key)
esp = self.crypt_algo.decrypt(self, encrypted, self.crypt_key,
self.crypt_algo.icv_size or
self.auth_algo.icv_size)
if self.tunnel_header:
# drop the tunnel header and return the payload untouched
pkt.remove_payload()
if pkt.version == 4:
pkt.proto = esp.nh
else:
pkt.nh = esp.nh
cls = pkt.guess_payload_class(esp.data)
return cls(esp.data)
else:
ip_header = pkt
if ip_header.version == 4:
ip_header.proto = esp.nh
del ip_header.chksum
ip_header.remove_payload()
ip_header.len = len(ip_header) + len(esp.data)
# recompute checksum
ip_header = ip_header.__class__(str(ip_header))
else:
encrypted.underlayer.nh = esp.nh
encrypted.underlayer.remove_payload()
ip_header.plen = len(ip_header.payload) + len(esp.data)
cls = ip_header.guess_payload_class(esp.data)
# reassemble the ip_header with the ESP payload
return ip_header / cls(esp.data)
def _decrypt_ah(self, pkt, verify=True):
if verify:
self.check_spi(pkt)
self.auth_algo.verify(pkt, self.auth_key)
ah = pkt[AH]
payload = ah.payload
payload.remove_underlayer(None) # useless argument...
if self.tunnel_header:
return payload
else:
ip_header = pkt
if ip_header.version == 4:
ip_header.proto = ah.nh
del ip_header.chksum
ip_header.remove_payload()
ip_header.len = len(ip_header) + len(payload)
# recompute checksum
ip_header = ip_header.__class__(str(ip_header))
else:
ah.underlayer.nh = ah.nh
ah.underlayer.remove_payload()
ip_header.plen = len(ip_header.payload) + len(payload)
# reassemble the ip_header with the AH payload
return ip_header / payload
def decrypt(self, pkt, verify=True):
"""
Decrypt (and decapsulate) an IP(v6) packet containing ESP or AH.
@param pkt: the packet to decrypt
@param verify: if False, do not perform the integrity check
@return: the decrypted/decapsulated packet
@raise IPSecIntegrityError: if the integrity check fails
"""
if not isinstance(pkt, self.SUPPORTED_PROTOS):
raise TypeError('cannot decrypt %s, supported protos are %s'
% (pkt.__class__, self.SUPPORTED_PROTOS))
if self.proto is ESP and pkt.haslayer(ESP):
return self._decrypt_esp(pkt, verify=verify)
elif self.proto is AH and pkt.haslayer(AH):
return self._decrypt_ah(pkt, verify=verify)
else:
raise TypeError('%s has no %s layer' % (pkt, self.proto.name))
|
|
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from oslo_config import cfg
import webob
from nova.api.openstack.compute.legacy_v2.contrib import os_tenant_networks \
as networks
from nova.api.openstack.compute import tenant_networks \
as networks_v21
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
CONF = cfg.CONF
NETWORKS = [
{
"id": 1,
"cidr": "10.20.105.0/24",
"label": "new net 1"
},
{
"id": 2,
"cidr": "10.20.105.0/24",
"label": "new net 2"
}
]
DEFAULT_NETWORK = {
"id": 3,
"cidr": "10.20.105.0/24",
"label": "default"
}
NETWORKS_WITH_DEFAULT_NET = copy.deepcopy(NETWORKS)
NETWORKS_WITH_DEFAULT_NET.append(DEFAULT_NETWORK)
DEFAULT_TENANT_ID = 1
def fake_network_api_get_all(context):
if (context.project_id == DEFAULT_TENANT_ID):
return NETWORKS_WITH_DEFAULT_NET
else:
return NETWORKS
class TenantNetworksTestV21(test.NoDBTestCase):
ctrlr = networks_v21.TenantNetworkController
validation_error = exception.ValidationError
def setUp(self):
super(TenantNetworksTestV21, self).setUp()
self.controller = self.ctrlr()
self.flags(enable_network_quota=True)
self.req = fakes.HTTPRequest.blank('')
self.original_value = CONF.use_neutron_default_nets
def tearDown(self):
super(TenantNetworksTestV21, self).tearDown()
CONF.set_override("use_neutron_default_nets", self.original_value)
def _fake_network_api_create(self, context, **kwargs):
self.assertEqual(context.project_id, kwargs['project_id'])
return NETWORKS
@mock.patch('nova.quota.QUOTAS.reserve')
@mock.patch('nova.quota.QUOTAS.rollback')
@mock.patch('nova.network.api.API.disassociate')
@mock.patch('nova.network.api.API.delete')
def _test_network_delete_exception(self, delete_ex, disassociate_ex, expex,
delete_mock, disassociate_mock,
rollback_mock, reserve_mock):
ctxt = self.req.environ['nova.context']
reserve_mock.return_value = 'rv'
if delete_mock:
delete_mock.side_effect = delete_ex
if disassociate_ex:
disassociate_mock.side_effect = disassociate_ex
self.assertRaises(expex, self.controller.delete, self.req, 1)
disassociate_mock.assert_called_once_with(ctxt, 1)
if not disassociate_ex:
delete_mock.assert_called_once_with(ctxt, 1)
rollback_mock.assert_called_once_with(ctxt, 'rv')
reserve_mock.assert_called_once_with(ctxt, networks=-1)
def test_network_delete_exception_network_not_found(self):
ex = exception.NetworkNotFound(network_id=1)
expex = webob.exc.HTTPNotFound
self._test_network_delete_exception(None, ex, expex)
def test_network_delete_exception_policy_failed(self):
ex = exception.PolicyNotAuthorized(action='dummy')
expex = webob.exc.HTTPForbidden
self._test_network_delete_exception(ex, None, expex)
def test_network_delete_exception_network_in_use(self):
ex = exception.NetworkInUse(network_id=1)
expex = webob.exc.HTTPConflict
self._test_network_delete_exception(ex, None, expex)
@mock.patch('nova.quota.QUOTAS.reserve')
@mock.patch('nova.quota.QUOTAS.commit')
@mock.patch('nova.network.api.API.delete')
@mock.patch('nova.network.api.API.disassociate')
def test_network_delete(self, disassociate_mock, delete_mock, commit_mock,
reserve_mock):
ctxt = self.req.environ['nova.context']
reserve_mock.return_value = 'rv'
res = self.controller.delete(self.req, 1)
# NOTE: on v2.1, http status code is set as wsgi_code of API
# method instead of status_int in a response object.
if isinstance(self.controller, networks_v21.TenantNetworkController):
status_int = self.controller.delete.wsgi_code
else:
status_int = res.status_int
self.assertEqual(202, status_int)
disassociate_mock.assert_called_once_with(ctxt, 1)
delete_mock.assert_called_once_with(ctxt, 1)
commit_mock.assert_called_once_with(ctxt, 'rv')
reserve_mock.assert_called_once_with(ctxt, networks=-1)
@mock.patch('nova.network.api.API.get')
def test_network_show(self, get_mock):
get_mock.return_value = NETWORKS[0]
res = self.controller.show(self.req, 1)
self.assertEqual(res['network'], NETWORKS[0])
@mock.patch('nova.network.api.API.get')
def test_network_show_not_found(self, get_mock):
ctxt = self.req.environ['nova.context']
get_mock.side_effect = exception.NetworkNotFound(network_id=1)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, self.req, 1)
get_mock.assert_called_once_with(ctxt, 1)
@mock.patch('nova.network.api.API.get_all')
def _test_network_index(self, get_all_mock, default_net=True):
CONF.set_override("use_neutron_default_nets", default_net)
get_all_mock.side_effect = fake_network_api_get_all
expected = NETWORKS
if default_net is True:
self.req.environ['nova.context'].project_id = DEFAULT_TENANT_ID
expected = NETWORKS_WITH_DEFAULT_NET
res = self.controller.index(self.req)
self.assertEqual(res['networks'], expected)
def test_network_index_with_default_net(self):
self._test_network_index()
def test_network_index_without_default_net(self):
self._test_network_index(default_net=False)
@mock.patch('nova.quota.QUOTAS.reserve')
@mock.patch('nova.quota.QUOTAS.commit')
@mock.patch('nova.network.api.API.create')
def test_network_create(self, create_mock, commit_mock, reserve_mock):
ctxt = self.req.environ['nova.context']
reserve_mock.return_value = 'rv'
create_mock.side_effect = self._fake_network_api_create
body = copy.deepcopy(NETWORKS[0])
del body['id']
body = {'network': body}
res = self.controller.create(self.req, body=body)
self.assertEqual(res['network'], NETWORKS[0])
commit_mock.assert_called_once_with(ctxt, 'rv')
reserve_mock.assert_called_once_with(ctxt, networks=1)
@mock.patch('nova.quota.QUOTAS.reserve')
def test_network_create_quota_error(self, reserve_mock):
ctxt = self.req.environ['nova.context']
reserve_mock.side_effect = exception.OverQuota(overs='fake')
body = {'network': {"cidr": "10.20.105.0/24",
"label": "new net 1"}}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, self.req, body=body)
reserve_mock.assert_called_once_with(ctxt, networks=1)
@mock.patch('nova.quota.QUOTAS.reserve')
@mock.patch('nova.quota.QUOTAS.rollback')
@mock.patch('nova.network.api.API.create')
def _test_network_create_exception(self, ex, expex, create_mock,
rollback_mock, reserve_mock):
ctxt = self.req.environ['nova.context']
reserve_mock.return_value = 'rv'
create_mock.side_effect = ex
body = {'network': {"cidr": "10.20.105.0/24",
"label": "new net 1"}}
self.assertRaises(expex, self.controller.create, self.req, body=body)
reserve_mock.assert_called_once_with(ctxt, networks=1)
def test_network_create_exception_policy_failed(self):
ex = exception.PolicyNotAuthorized(action='dummy')
expex = webob.exc.HTTPForbidden
self._test_network_create_exception(ex, expex)
def test_network_create_exception_service_unavailable(self):
ex = Exception
expex = webob.exc.HTTPServiceUnavailable
self._test_network_create_exception(ex, expex)
def test_network_create_empty_body(self):
self.assertRaises(exception.ValidationError,
self.controller.create, self.req, body={})
def test_network_create_without_cidr(self):
body = {'network': {"label": "new net 1"}}
self.assertRaises(self.validation_error,
self.controller.create, self.req, body=body)
def test_network_create_bad_format_cidr(self):
body = {'network': {"cidr": "123",
"label": "new net 1"}}
self.assertRaises(self.validation_error,
self.controller.create, self.req, body=body)
def test_network_create_empty_network(self):
body = {'network': {}}
self.assertRaises(self.validation_error,
self.controller.create, self.req, body=body)
def test_network_create_without_label(self):
body = {'network': {"cidr": "10.20.105.0/24"}}
self.assertRaises(self.validation_error,
self.controller.create, self.req, body=body)
class TenantNetworksTestV2(TenantNetworksTestV21):
ctrlr = networks.NetworkController
validation_error = webob.exc.HTTPBadRequest
def setUp(self):
super(TenantNetworksTestV2, self).setUp()
self.req = fakes.HTTPRequest.blank('', use_admin_context=True)
def test_network_create_empty_body(self):
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller.create, self.req, {})
class TenantNetworksEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(TenantNetworksEnforcementV21, self).setUp()
self.controller = networks_v21.TenantNetworkController()
self.req = fakes.HTTPRequest.blank('')
def test_create_policy_failed(self):
rule_name = 'os_compute_api:os-tenant-networks'
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.create,
self.req, body={'network': {'label': 'test',
'cidr': '10.0.0.0/32'}})
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_index_policy_failed(self):
rule_name = 'os_compute_api:os-tenant-networks'
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.index,
self.req)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_delete_policy_failed(self):
rule_name = 'os_compute_api:os-tenant-networks'
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.delete,
self.req, fakes.FAKE_UUID)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_show_policy_failed(self):
rule_name = 'os_compute_api:os-tenant-networks'
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.show,
self.req, fakes.FAKE_UUID)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
|
|
#
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from novaclient import client as base_client
from novaclient import exceptions as nova_exceptions
import requests
from six.moves.urllib import parse as urlparse
from heat.tests import fakes
NOVA_API_VERSION = "2.1"
Client = base_client.Client(NOVA_API_VERSION).__class__
def fake_exception(status_code=404, message=None, details=None):
resp = mock.Mock()
resp.status_code = status_code
resp.headers = None
body = {'error': {'message': message, 'details': details}}
return nova_exceptions.from_response(resp, body, None)
class FakeClient(fakes.FakeClient, Client):
def __init__(self, *args, **kwargs):
super(FakeClient, self).__init__('username', 'password', 'project_id',
'auth_url', direct_use=False)
self.client = FakeHTTPClient(**kwargs)
class FakeHTTPClient(base_client.HTTPClient):
def __init__(self, **kwargs):
super(FakeHTTPClient, self).__init__('username', 'password',
'project_id', 'auth_url')
self.callstack = []
def _cs_request(self, url, method, **kwargs):
# Check that certain things are called correctly
if method in ['GET', 'DELETE']:
assert 'body' not in kwargs
elif method == 'PUT':
assert 'body' in kwargs
# Call the method
args = urlparse.parse_qsl(urlparse.urlparse(url)[4])
kwargs.update(args)
munged_url = url.rsplit('?', 1)[0]
munged_url = munged_url.strip('/').replace('/', '_').replace(
'.', '_').replace(' ', '_')
munged_url = munged_url.replace('-', '_')
callback = "%s_%s" % (method.lower(), munged_url)
if not hasattr(self, callback):
raise AssertionError('Called unknown API method: %s %s, '
'expected fakes method name: %s' %
(method, url, callback))
# Note the call
self.callstack.append((method, url, kwargs.get('body')))
status, body = getattr(self, callback)(**kwargs)
response = requests.models.Response()
if isinstance(status, dict):
response.status_code = status.pop("status")
response.headers = status
else:
response.status_code = status
return response, body
#
# Servers
#
def get_servers_detail(self, **kw):
return (
200,
{"servers": [{"id": "1234",
"name": "sample-server",
"OS-EXT-SRV-ATTR:instance_name":
"sample-server",
"image": {"id": 2, "name": "sample image"},
"flavor": {"id": 1, "name": "256 MB Server"},
"hostId": "e4d909c290d0fb1ca068ffaddf22cbd0",
"status": "BUILD",
"progress": 60,
"addresses": {"public": [{"version": 4,
"addr": "1.2.3.4"},
{"version": 4,
"addr": "5.6.7.8"}],
"private": [{"version": 4,
"addr": "10.11.12.13"}]},
"accessIPv4": "",
"accessIPv6": "",
"metadata": {"Server Label": "Web Head 1",
"Image Version": "2.1"}},
{"id": "5678",
"name": "sample-server2",
"OS-EXT-AZ:availability_zone": "nova2",
"OS-EXT-SRV-ATTR:instance_name":
"sample-server2",
"image": {"id": 2, "name": "sample image"},
"flavor": {"id": 1, "name": "256 MB Server"},
"hostId": "9e107d9d372bb6826bd81d3542a419d6",
"status": "ACTIVE",
"accessIPv4": "192.0.2.0",
"accessIPv6": "::babe:4317:0A83",
"addresses": {"public": [{"version": 4,
"addr": "4.5.6.7",
"OS-EXT-IPS-MAC:mac_addr":
"fa:16:3e:8c:22:aa"},
{"version": 4,
"addr": "5.6.9.8",
"OS-EXT-IPS-MAC:mac_addr":
"fa:16:3e:8c:33:bb"}],
"private": [{"version": 4,
"addr": "10.13.12.13",
"OS-EXT-IPS-MAC:mac_addr":
"fa:16:3e:8c:44:cc"}]},
"metadata": {}},
{"id": "9101",
"name": "hard-reboot",
"OS-EXT-SRV-ATTR:instance_name":
"hard-reboot",
"image": {"id": 2, "name": "sample image"},
"flavor": {"id": 1, "name": "256 MB Server"},
"hostId": "9e44d8d435c43dd8d96bb63ed995605f",
"status": "HARD_REBOOT",
"accessIPv4": "",
"accessIPv6": "",
"addresses": {"public": [{"version": 4,
"addr": "172.17.1.2"},
{"version": 4,
"addr": "10.20.30.40"}],
"private": [{"version": 4,
"addr": "10.13.12.13"}]},
"metadata": {"Server Label": "DB 1"}},
{"id": "9102",
"name": "server-with-no-ip",
"OS-EXT-SRV-ATTR:instance_name":
"server-with-no-ip",
"image": {"id": 2, "name": "sample image"},
"flavor": {"id": 1, "name": "256 MB Server"},
"hostId": "c1365ba78c624df9b2ff446515a682f5",
"status": "ACTIVE",
"accessIPv4": "",
"accessIPv6": "",
"addresses": {"empty_net": []},
"metadata": {"Server Label": "DB 1"}},
{"id": "9999",
"name": "sample-server3",
"OS-EXT-SRV-ATTR:instance_name":
"sample-server3",
"OS-EXT-AZ:availability_zone": "nova3",
"image": {"id": 3, "name": "sample image"},
"flavor": {"id": 3, "name": "m1.large"},
"hostId": "9e107d9d372bb6826bd81d3542a419d6",
"status": "ACTIVE",
"accessIPv4": "",
"accessIPv6": "",
"addresses": {
"public": [{"version": 4, "addr": "4.5.6.7"},
{"version": 4, "addr": "5.6.9.8"}],
"private": [{"version": 4,
"addr": "10.13.12.13"}]},
"metadata": {"Server Label": "DB 1"},
"os-extended-volumes:volumes_attached":
[{"id":
"66359157-dace-43ab-a7ed-a7e7cd7be59d"}]},
{"id": 56789,
"name": "server-with-metadata",
"OS-EXT-SRV-ATTR:instance_name":
"sample-server2",
"image": {"id": 2, "name": "sample image"},
"flavor": {"id": 1, "name": "256 MB Server"},
"hostId": "9e107d9d372bb6826bd81d3542a419d6",
"status": "ACTIVE",
"accessIPv4": "192.0.2.0",
"accessIPv6": "::babe:4317:0A83",
"addresses": {"public": [{"version": 4,
"addr": "4.5.6.7"},
{"version": 4,
"addr": "5.6.9.8"}],
"private": [{"version": 4,
"addr": "10.13.12.13"}]},
"metadata": {'test': '123', 'this': 'that'}}]})
def get_servers_1234(self, **kw):
r = {'server': self.get_servers_detail()[1]['servers'][0]}
return (200, r)
def get_servers_56789(self, **kw):
r = {'server': self.get_servers_detail()[1]['servers'][5]}
return (200, r)
def get_servers_WikiServerOne(self, **kw):
r = {'server': self.get_servers_detail()[1]['servers'][0]}
return (200, r)
def get_servers_WikiServerOne1(self, **kw):
r = {'server': self.get_servers_detail()[1]['servers'][0]}
return (200, r)
def get_servers_WikiServerOne2(self, **kw):
r = {'server': self.get_servers_detail()[1]['servers'][3]}
return (200, r)
def get_servers_5678(self, **kw):
r = {'server': self.get_servers_detail()[1]['servers'][1]}
return (200, r)
def delete_servers_1234(self, **kw):
return (202, None)
def get_servers_9999(self, **kw):
r = {'server': self.get_servers_detail()[1]['servers'][4]}
return (200, r)
def get_servers_9102(self, **kw):
r = {'server': self.get_servers_detail()[1]['servers'][3]}
return (200, r)
#
# Server actions
#
def post_servers_1234_action(self, body, **kw):
_body = None
resp = 202
assert len(body.keys()) == 1
action = next(iter(body))
if action == 'reboot':
assert list(body[action].keys()) == ['type']
assert body[action]['type'] in ['HARD', 'SOFT']
elif action == 'rebuild':
keys = list(body[action].keys())
if 'adminPass' in keys:
keys.remove('adminPass')
assert keys == ['imageRef']
_body = self.get_servers_1234()[1]
elif action == 'resize':
assert list(body[action].keys()) == ['flavorRef']
elif action == 'confirmResize':
assert body[action] is None
# This one method returns a different response code
return (204, None)
elif action in ['revertResize',
'migrate',
'rescue', 'unrescue',
'suspend', 'resume',
'lock', 'unlock',
]:
assert body[action] is None
elif action == 'addFixedIp':
assert list(body[action].keys()) == ['networkId']
elif action in ['removeFixedIp',
'addFloatingIp',
'removeFloatingIp',
]:
assert list(body[action].keys()) == ['address']
elif action == 'createImage':
assert set(body[action].keys()) == set(['name', 'metadata'])
resp = {"status": 202,
"location": "http://blah/images/456"}
elif action == 'changePassword':
assert list(body[action].keys()) == ['adminPass']
elif action == 'os-getConsoleOutput':
assert list(body[action].keys()) == ['length']
return (202, {'output': 'foo'})
elif action == 'os-getVNCConsole':
assert list(body[action].keys()) == ['type']
elif action == 'os-migrateLive':
assert set(body[action].keys()) == set(['host',
'block_migration',
'disk_over_commit'])
else:
raise AssertionError("Unexpected server action: %s" % action)
return (resp, _body)
def post_servers_5678_action(self, body, **kw):
_body = None
resp = 202
assert len(body.keys()) == 1
action = next(iter(body))
if action in ['addFloatingIp',
'removeFloatingIp',
]:
assert list(body[action].keys()) == ['address']
return (resp, _body)
#
# Flavors
#
def get_flavors(self, **kw):
return (200, {'flavors': [
{'id': 1, 'name': '256 MB Server', 'ram': 256, 'disk': 10,
'OS-FLV-EXT-DATA:ephemeral': 10},
{'id': 2, 'name': 'm1.small', 'ram': 512, 'disk': 20,
'OS-FLV-EXT-DATA:ephemeral': 20},
{'id': 3, 'name': 'm1.large', 'ram': 512, 'disk': 20,
'OS-FLV-EXT-DATA:ephemeral': 30}
]})
def get_flavors_256_MB_Server(self, **kw):
raise fake_exception()
def get_flavors_m1_small(self, **kw):
raise fake_exception()
def get_flavors_m1_large(self, **kw):
raise fake_exception()
def get_flavors_1(self, **kw):
return (200, {'flavor': {
'id': 1, 'name': '256 MB Server', 'ram': 256, 'disk': 10,
'OS-FLV-EXT-DATA:ephemeral': 10}})
def get_flavors_2(self, **kw):
return (200, {'flavor': {
'id': 2, 'name': 'm1.small', 'ram': 512, 'disk': 20,
'OS-FLV-EXT-DATA:ephemeral': 20}})
def get_flavors_3(self, **kw):
return (200, {'flavor': {
'id': 3, 'name': 'm1.large', 'ram': 512, 'disk': 20,
'OS-FLV-EXT-DATA:ephemeral': 30}})
#
# Floating ips
#
def get_os_floating_ips_1(self, **kw):
return (200, {'floating_ip': {'id': 1,
'fixed_ip': '10.0.0.1',
'ip': '11.0.0.1'}})
def post_os_floating_ips(self, body, **kw):
return (202, self.get_os_floating_ips_1()[1])
def delete_os_floating_ips_1(self, **kw):
return (204, None)
#
# Images
#
def get_images_detail(self, **kw):
return (200, {'images': [{'id': 1,
'name': 'CentOS 5.2',
"updated": "2010-10-10T12:00:00Z",
"created": "2010-08-10T12:00:00Z",
"status": "ACTIVE",
"metadata": {"test_key": "test_value"},
"links": {}},
{"id": 743,
"name": "My Server Backup",
"serverId": 1234,
"updated": "2010-10-10T12:00:00Z",
"created": "2010-08-10T12:00:00Z",
"status": "SAVING",
"progress": 80,
"links": {}},
{"id": 744,
"name": "F17-x86_64-gold",
"serverId": 9999,
"updated": "2010-10-10T12:00:00Z",
"created": "2010-08-10T12:00:00Z",
"status": "SAVING",
"progress": 80,
"links": {}},
{"id": 745,
"name": "F17-x86_64-cfntools",
"serverId": 9998,
"updated": "2010-10-10T12:00:00Z",
"created": "2010-08-10T12:00:00Z",
"status": "SAVING",
"progress": 80,
"links": {}},
{"id": 746,
"name": "F20-x86_64-cfntools",
"serverId": 9998,
"updated": "2010-10-10T12:00:00Z",
"created": "2010-08-10T12:00:00Z",
"status": "SAVING",
"progress": 80,
"links": {}}]})
def get_images_1(self, **kw):
return (200, {'image': self.get_images_detail()[1]['images'][0]})
get_images_456 = get_images_1
get_images_image_name = get_images_1
#
# Keypairs
#
def get_os_keypairs(self, *kw):
return (200, {"keypairs": [{'fingerprint': 'FAKE_KEYPAIR',
'name': 'test',
'public_key': 'foo'}]})
def get_os_keypairs_test(self, *kw):
return (200, {"keypair": {'fingerprint': 'FAKE_KEYPAIR',
'name': 'test',
'public_key': 'foo'}})
def get_os_keypairs_test2(self, *kw):
raise fake_exception()
def get_os_availability_zone(self, *kw):
return (200, {"availabilityZoneInfo": [{'zoneName': 'nova1'}]})
def get_os_networks(self, **kw):
return (200, {'networks':
[{'label': 'public',
'id': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'},
{'label': 'foo',
'id': '42'},
{'label': 'foo',
'id': '42'}]})
#
# Limits
#
def get_limits(self, *kw):
return (200, {'limits': {'absolute': {'maxServerMeta': 3,
'maxPersonalitySize': 10240,
'maxPersonality': 5}}})
|
|
import urllib
import sys
import os
import re
import mimetypes
import warnings
from copy import copy
from urlparse import urlparse, urlunparse, urlsplit
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.conf import settings
from django.contrib.auth import authenticate, login
from django.core.handlers.base import BaseHandler
from django.core.handlers.wsgi import WSGIRequest
from django.core.signals import got_request_exception
from django.http import SimpleCookie, HttpRequest, QueryDict
from django.template import TemplateDoesNotExist
from django.test import signals
from django.utils.functional import curry
from django.utils.encoding import smart_str
from django.utils.http import urlencode
from django.utils.importlib import import_module
from django.utils.itercompat import is_iterable
from django.db import transaction, close_connection
from django.test.utils import ContextList
__all__ = ('Client', 'RequestFactory', 'encode_file', 'encode_multipart')
BOUNDARY = 'BoUnDaRyStRiNg'
MULTIPART_CONTENT = 'multipart/form-data; boundary=%s' % BOUNDARY
CONTENT_TYPE_RE = re.compile('.*; charset=([\w\d-]+);?')
class FakePayload(object):
"""
A wrapper around StringIO that restricts what can be read since data from
the network can't be seeked and cannot be read outside of its content
length. This makes sure that views can't do anything under the test client
that wouldn't work in Real Life.
"""
def __init__(self, content):
self.__content = StringIO(content)
self.__len = len(content)
def read(self, num_bytes=None):
if num_bytes is None:
num_bytes = self.__len or 0
assert self.__len >= num_bytes, "Cannot read more than the available bytes from the HTTP incoming data."
content = self.__content.read(num_bytes)
self.__len -= num_bytes
return content
class ClientHandler(BaseHandler):
"""
A HTTP Handler that can be used for testing purposes.
Uses the WSGI interface to compose requests, but returns
the raw HttpResponse object
"""
def __init__(self, enforce_csrf_checks=True, *args, **kwargs):
self.enforce_csrf_checks = enforce_csrf_checks
super(ClientHandler, self).__init__(*args, **kwargs)
def __call__(self, environ):
from django.conf import settings
from django.core import signals
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._request_middleware is None:
self.load_middleware()
signals.request_started.send(sender=self.__class__)
try:
request = WSGIRequest(environ)
# sneaky little hack so that we can easily get round
# CsrfViewMiddleware. This makes life easier, and is probably
# required for backwards compatibility with external tests against
# admin views.
request._dont_enforce_csrf_checks = not self.enforce_csrf_checks
response = self.get_response(request)
finally:
signals.request_finished.disconnect(close_connection)
signals.request_finished.send(sender=self.__class__)
signals.request_finished.connect(close_connection)
return response
def store_rendered_templates(store, signal, sender, template, context, **kwargs):
"""
Stores templates and contexts that are rendered.
The context is copied so that it is an accurate representation at the time
of rendering.
"""
store.setdefault('templates', []).append(template)
store.setdefault('context', ContextList()).append(copy(context))
def encode_multipart(boundary, data):
"""
Encodes multipart POST data from a dictionary of form values.
The key will be used as the form data name; the value will be transmitted
as content. If the value is a file, the contents of the file will be sent
as an application/octet-stream; otherwise, str(value) will be sent.
"""
lines = []
to_str = lambda s: smart_str(s, settings.DEFAULT_CHARSET)
# Not by any means perfect, but good enough for our purposes.
is_file = lambda thing: hasattr(thing, "read") and callable(thing.read)
# Each bit of the multipart form data could be either a form value or a
# file, or a *list* of form values and/or files. Remember that HTTP field
# names can be duplicated!
for (key, value) in data.items():
if is_file(value):
lines.extend(encode_file(boundary, key, value))
elif not isinstance(value, basestring) and is_iterable(value):
for item in value:
if is_file(item):
lines.extend(encode_file(boundary, key, item))
else:
lines.extend([
'--' + boundary,
'Content-Disposition: form-data; name="%s"' % to_str(key),
'',
to_str(item)
])
else:
lines.extend([
'--' + boundary,
'Content-Disposition: form-data; name="%s"' % to_str(key),
'',
to_str(value)
])
lines.extend([
'--' + boundary + '--',
'',
])
return '\r\n'.join(lines)
def encode_file(boundary, key, file):
to_str = lambda s: smart_str(s, settings.DEFAULT_CHARSET)
content_type = mimetypes.guess_type(file.name)[0]
if content_type is None:
content_type = 'application/octet-stream'
return [
'--' + boundary,
'Content-Disposition: form-data; name="%s"; filename="%s"' \
% (to_str(key), to_str(os.path.basename(file.name))),
'Content-Type: %s' % content_type,
'',
file.read()
]
class RequestFactory(object):
"""
Class that lets you create mock Request objects for use in testing.
Usage:
rf = RequestFactory()
get_request = rf.get('/hello/')
post_request = rf.post('/submit/', {'foo': 'bar'})
Once you have a request object you can pass it to any view function,
just as if that view had been hooked up using a URLconf.
"""
def __init__(self, **defaults):
self.defaults = defaults
self.cookies = SimpleCookie()
self.errors = StringIO()
def _base_environ(self, **request):
"""
The base environment for a request.
"""
environ = {
'HTTP_COOKIE': self.cookies.output(header='', sep='; '),
'PATH_INFO': '/',
'QUERY_STRING': '',
'REMOTE_ADDR': '127.0.0.1',
'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'SERVER_NAME': 'testserver',
'SERVER_PORT': '80',
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.version': (1,0),
'wsgi.url_scheme': 'http',
'wsgi.errors': self.errors,
'wsgi.multiprocess': True,
'wsgi.multithread': False,
'wsgi.run_once': False,
}
environ.update(self.defaults)
environ.update(request)
return environ
def request(self, **request):
"Construct a generic request object."
return WSGIRequest(self._base_environ(**request))
def _get_path(self, parsed):
# If there are parameters, add them
if parsed[3]:
return urllib.unquote(parsed[2] + ";" + parsed[3])
else:
return urllib.unquote(parsed[2])
def get(self, path, data={}, **extra):
"Construct a GET request"
parsed = urlparse(path)
r = {
'CONTENT_TYPE': 'text/html; charset=utf-8',
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': urlencode(data, doseq=True) or parsed[4],
'REQUEST_METHOD': 'GET',
'wsgi.input': FakePayload('')
}
r.update(extra)
return self.request(**r)
def post(self, path, data={}, content_type=MULTIPART_CONTENT,
**extra):
"Construct a POST request."
if content_type is MULTIPART_CONTENT:
post_data = encode_multipart(BOUNDARY, data)
else:
# Encode the content so that the byte representation is correct.
match = CONTENT_TYPE_RE.match(content_type)
if match:
charset = match.group(1)
else:
charset = settings.DEFAULT_CHARSET
post_data = smart_str(data, encoding=charset)
parsed = urlparse(path)
r = {
'CONTENT_LENGTH': len(post_data),
'CONTENT_TYPE': content_type,
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': parsed[4],
'REQUEST_METHOD': 'POST',
'wsgi.input': FakePayload(post_data),
}
r.update(extra)
return self.request(**r)
def head(self, path, data={}, **extra):
"Construct a HEAD request."
parsed = urlparse(path)
r = {
'CONTENT_TYPE': 'text/html; charset=utf-8',
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': urlencode(data, doseq=True) or parsed[4],
'REQUEST_METHOD': 'HEAD',
'wsgi.input': FakePayload('')
}
r.update(extra)
return self.request(**r)
def options(self, path, data={}, **extra):
"Constrict an OPTIONS request"
parsed = urlparse(path)
r = {
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': urlencode(data, doseq=True) or parsed[4],
'REQUEST_METHOD': 'OPTIONS',
'wsgi.input': FakePayload('')
}
r.update(extra)
return self.request(**r)
def put(self, path, data={}, content_type=MULTIPART_CONTENT,
**extra):
"Construct a PUT request."
if content_type is MULTIPART_CONTENT:
post_data = encode_multipart(BOUNDARY, data)
else:
post_data = data
# Make `data` into a querystring only if it's not already a string. If
# it is a string, we'll assume that the caller has already encoded it.
query_string = None
if not isinstance(data, basestring):
query_string = urlencode(data, doseq=True)
parsed = urlparse(path)
r = {
'CONTENT_LENGTH': len(post_data),
'CONTENT_TYPE': content_type,
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': query_string or parsed[4],
'REQUEST_METHOD': 'PUT',
'wsgi.input': FakePayload(post_data),
}
r.update(extra)
return self.request(**r)
def delete(self, path, data={}, **extra):
"Construct a DELETE request."
parsed = urlparse(path)
r = {
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': urlencode(data, doseq=True) or parsed[4],
'REQUEST_METHOD': 'DELETE',
'wsgi.input': FakePayload('')
}
r.update(extra)
return self.request(**r)
class Client(RequestFactory):
"""
A class that can act as a client for testing purposes.
It allows the user to compose GET and POST requests, and
obtain the response that the server gave to those requests.
The server Response objects are annotated with the details
of the contexts and templates that were rendered during the
process of serving the request.
Client objects are stateful - they will retain cookie (and
thus session) details for the lifetime of the Client instance.
This is not intended as a replacement for Twill/Selenium or
the like - it is here to allow testing against the
contexts and templates produced by a view, rather than the
HTML rendered to the end-user.
"""
def __init__(self, enforce_csrf_checks=False, **defaults):
super(Client, self).__init__(**defaults)
self.handler = ClientHandler(enforce_csrf_checks)
self.exc_info = None
def store_exc_info(self, **kwargs):
"""
Stores exceptions when they are generated by a view.
"""
self.exc_info = sys.exc_info()
def _session(self):
"""
Obtains the current session variables.
"""
if 'django.contrib.sessions' in settings.INSTALLED_APPS:
engine = import_module(settings.SESSION_ENGINE)
cookie = self.cookies.get(settings.SESSION_COOKIE_NAME, None)
if cookie:
return engine.SessionStore(cookie.value)
return {}
session = property(_session)
def request(self, **request):
"""
The master request method. Composes the environment dictionary
and passes to the handler, returning the result of the handler.
Assumes defaults for the query environment, which can be overridden
using the arguments to the request.
"""
environ = self._base_environ(**request)
# Curry a data dictionary into an instance of the template renderer
# callback function.
data = {}
on_template_render = curry(store_rendered_templates, data)
signals.template_rendered.connect(on_template_render, dispatch_uid="template-render")
# Capture exceptions created by the handler.
got_request_exception.connect(self.store_exc_info, dispatch_uid="request-exception")
try:
try:
response = self.handler(environ)
except TemplateDoesNotExist, e:
# If the view raises an exception, Django will attempt to show
# the 500.html template. If that template is not available,
# we should ignore the error in favor of re-raising the
# underlying exception that caused the 500 error. Any other
# template found to be missing during view error handling
# should be reported as-is.
if e.args != ('500.html',):
raise
# Look for a signalled exception, clear the current context
# exception data, then re-raise the signalled exception.
# Also make sure that the signalled exception is cleared from
# the local cache!
if self.exc_info:
exc_info = self.exc_info
self.exc_info = None
raise exc_info[1], None, exc_info[2]
# Save the client and request that stimulated the response.
response.client = self
response.request = request
# Add any rendered template detail to the response.
response.templates = data.get("templates", [])
response.context = data.get("context")
# Flatten a single context. Not really necessary anymore thanks to
# the __getattr__ flattening in ContextList, but has some edge-case
# backwards-compatibility implications.
if response.context and len(response.context) == 1:
response.context = response.context[0]
# Provide a backwards-compatible (but pending deprecation) response.template
def _get_template(self):
warnings.warn("response.template is deprecated; use response.templates instead (which is always a list)",
DeprecationWarning, stacklevel=2)
if not self.templates:
return None
elif len(self.templates) == 1:
return self.templates[0]
return self.templates
response.__class__.template = property(_get_template)
# Update persistent cookie data.
if response.cookies:
self.cookies.update(response.cookies)
return response
finally:
signals.template_rendered.disconnect(dispatch_uid="template-render")
got_request_exception.disconnect(dispatch_uid="request-exception")
def get(self, path, data={}, follow=False, **extra):
"""
Requests a response from the server using GET.
"""
response = super(Client, self).get(path, data=data, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def post(self, path, data={}, content_type=MULTIPART_CONTENT,
follow=False, **extra):
"""
Requests a response from the server using POST.
"""
response = super(Client, self).post(path, data=data, content_type=content_type, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def head(self, path, data={}, follow=False, **extra):
"""
Request a response from the server using HEAD.
"""
response = super(Client, self).head(path, data=data, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def options(self, path, data={}, follow=False, **extra):
"""
Request a response from the server using OPTIONS.
"""
response = super(Client, self).options(path, data=data, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def put(self, path, data={}, content_type=MULTIPART_CONTENT,
follow=False, **extra):
"""
Send a resource to the server using PUT.
"""
response = super(Client, self).put(path, data=data, content_type=content_type, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def delete(self, path, data={}, follow=False, **extra):
"""
Send a DELETE request to the server.
"""
response = super(Client, self).delete(path, data=data, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def login(self, **credentials):
"""
Sets the Factory to appear as if it has successfully logged into a site.
Returns True if login is possible; False if the provided credentials
are incorrect, or the user is inactive, or if the sessions framework is
not available.
"""
user = authenticate(**credentials)
if user and user.is_active \
and 'django.contrib.sessions' in settings.INSTALLED_APPS:
engine = import_module(settings.SESSION_ENGINE)
# Create a fake request to store login details.
request = HttpRequest()
if self.session:
request.session = self.session
else:
request.session = engine.SessionStore()
login(request, user)
# Save the session values.
request.session.save()
# Set the cookie to represent the session.
session_cookie = settings.SESSION_COOKIE_NAME
self.cookies[session_cookie] = request.session.session_key
cookie_data = {
'max-age': None,
'path': '/',
'domain': settings.SESSION_COOKIE_DOMAIN,
'secure': settings.SESSION_COOKIE_SECURE or None,
'expires': None,
}
self.cookies[session_cookie].update(cookie_data)
return True
else:
return False
def logout(self):
"""
Removes the authenticated user's cookies and session object.
Causes the authenticated user to be logged out.
"""
session = import_module(settings.SESSION_ENGINE).SessionStore()
session_cookie = self.cookies.get(settings.SESSION_COOKIE_NAME)
if session_cookie:
session.delete(session_key=session_cookie.value)
self.cookies = SimpleCookie()
def _handle_redirects(self, response, **extra):
"Follows any redirects by requesting responses from the server using GET."
response.redirect_chain = []
while response.status_code in (301, 302, 303, 307):
url = response['Location']
scheme, netloc, path, query, fragment = urlsplit(url)
redirect_chain = response.redirect_chain
redirect_chain.append((url, response.status_code))
if scheme:
extra['wsgi.url_scheme'] = scheme
# The test client doesn't handle external links,
# but since the situation is simulated in test_client,
# we fake things here by ignoring the netloc portion of the
# redirected URL.
response = self.get(path, QueryDict(query), follow=False, **extra)
response.redirect_chain = redirect_chain
# Prevent loops
if response.redirect_chain[-1] in response.redirect_chain[0:-1]:
break
return response
|
|
import process
import assign
import utils
import kinectData
import clusters
import numpy as np
def setupComparison(data1,data2):
inds = data1.feature_inds
print data2.feat_array[0,:]
data2.feat_array = data2.all_features[:,inds]
print data2.feat_array[0,:]
data2.feature_inds = inds
def getSeveralBases(data,curr_extrema,several=11):
def defineInitBasisLabels(data,curr_extrema,basis_dim, k):
curr_input = data.feat_array[curr_extrema[0]:curr_extrema[1],:]
labels, centers, U = clusters.spectralClustering(curr_input, similarity_method=data.similarity_method, k_nn=6, basis_dim=basis_dim, num_clusters=k)
labels = utils.orderStates(labels)
labels = [int(x) for x in labels]
labels = list(labels)
for ind,col in enumerate(U.T):
U[:,ind] = assign.stretchBasisCol(col)
return labels, U
labels,U = defineInitBasisLabels(data,curr_extrema,basis_dim=several,k=3)
for i in range(several):
assign.plotClassPoints(U[:,i],labels)
def combineTasks(data1,data2,starts,howmany):
task1 = process.Task(data_object=data1,curr_extrema=[starts[0],starts[1]],k=3)
task2 = process.Task(data_object=data2,curr_extrema=[starts[0],starts[1]],k=3)
task1init = [task1.path, task1.times]
task2init = [task2.path, task2.times]
assign.plotClassPoints(task1.history[0],task1.labels)
print 'task1:', task1init
print 'task2:', task2init
for i in range(howmany):
#task1.update(data1,[starts[i+1],starts[i+2]])
task1.update(data2,[starts[i+1],starts[i+2]])
task2.update(data1,[starts[i+1],starts[i+2]])
#task2.update(data2,[starts[i+1],starts[i+2]])
return task1,task2
def plotTaskBases(task):
start = 0
for h in task.history:
assign.plotClassPoints(h,task.labels[start:start+len(h)])
start += len(h)
def setup(task_id='p2-3'):
from main import begin
receiver,giver,starts,rtask,gtask = begin(task_id)
print 'here'
print giver.feat_array[0,:]
setupComparison(receiver,giver)
print giver.feat_array[0,:]
print 'there'
return receiver,giver,starts
def getSubspaces(receiver,giver,starts,dim=4):
urec = {}
ugiv = {}
for i in range(11):
task = process.Task(receiver,[starts[i],starts[i+1]],k=3,basis_dim=dim) #gets 5 different basis vectors
urec[i] = task.subspace
task = process.Task(giver,[starts[i],starts[i+1]],k=3,basis_dim=dim)
ugiv[i] = task.subspace
return urec,ugiv
def compareBases(subspace,Unew):
z = subspace.projectOnMe(Unew,onlyshape=True) #get corrected shape of new task
error = []
for i,col in enumerate(z.T):
error.append(np.sum(np.abs(subspace.U[:,-2]-col)))
return z,error
def subspaceTimeWarp(subspace1,subspace2,col1,col2,constraint=0.05 ,window=10):
# reshape second subspace basis to fit length of first subspace
C = subspace1.projectOnMe(subspace2.U,onlyshape=True)
#define time series
q = subspace1.U[:,col1]
#print 'info: ', subspace2.U[0,-1], subspace1.U[0,-1], int(subspace2.U[0,-1]) == int(subspace1.U[0,-1])
if int(subspace2.U[0,-1]) != int(subspace1.U[0,-1]):
print 'made it'
C = -1*C
c = C[:,col2]
#print 'lenghts: ', len(q),len(c)
c = utils.runningAvg(c,window)
q = utils.runningAvg(q,window)
path,cost = basisTimeWarp(q,c,constraint)
return q,c,path,cost
def basisTimeWarp(q,c,constraint=0.05 ,dist='squared'):
qlen,clen = len(q),len(c)
#print 'qlen,clen', qlen,clen
#define distance matrix D
D = np.zeros((qlen,clen))
for i in np.arange(qlen):
for j in np.arange(clen):
if dist=='squared':
D[i,j] = (q[i]-c[j])**2
else:
D[i,j] = np.abs(q[i]-c[j])
#dynamic programming
i = qlen-1
j = clen-1
path = [[i,j]] #final point will always happen
cost = 0
#define cost map
G = np.zeros((qlen,clen))
G[i,j] = D[i,j]
for k in np.arange(1,i+1): #can only go one direction if at end point for i or j
G[i-k,j] = np.sum(G[(i-k+1):,j]) + D[i-k,j]
for n in np.arange(1,j+1):
G[i,j-n] = np.sum(G[i,(j-n+1):]) + D[i,j-n]
#fill in the rest via G(i,j) = D(i,j) + min{G(i-1,j-1),G(i-1,j),G(i,j-1)}
for k in np.arange(1,i+1):
for n in np.arange(1,j+1):
G[i-k,j-n] = min(G[i-k+1,j-n],G[i-k+1,j-n+1],G[i-k,j-n+1]) + D[i-k,j-n]
#determine path and accumated cost
while i>0 and j>0:
if i==0:
j -= 1
elif j==0:
i -= 1
elif np.abs(i-j) > constraint*qlen: #i and j are too different
if i > j:
minG = min(G[i-1,j],G[i-1,j-1])
if G[i-1,j] == minG:
i -= 1
elif G[i-1,j-1] == minG:
i -= 1
j -= 1
elif i < j:
minG = min(G[i,j-1],G[i-1,j-1])
if G[i,j-1] == minG:
j -= 1
elif G[i-1,j-1] == minG:
i -= 1
j -= 1
else:
minG = min(G[i-1,j],G[i-1,j-1],G[i,j-1])
if G[i-1,j] == minG:
i -= 1
elif G[i,j-1] == minG:
j -= 1
else:
i = i-1
j = j-1
path.append([i,j])
path.append([0,0])
for [x,y] in path:
cost = cost + D[x, y]
return path,cost
def plotit(q,c,path):
import matplotlib.pyplot as plt
plt.plot(q, 'bo-' ,label='q')
plt.plot(c, 'g^-', label = 'c')
plt.legend();
for [map_q, map_c] in path:
print map_q, q[map_q], ":", map_c, c[map_c]
plt.plot([map_q, map_c], [q[map_q], c[map_c]], 'r')
def getCosts(subspace1,subspace2,col1,constraint=0.75,window=3):
costs = []
for col2 in range(subspace2.U.shape[1]):
q,c,path,cost = subspaceTimeWarp(subspace1,subspace2,col1,col2,constraint,window)
print 'column: ', col2, 'cost: ', cost
costs.append(cost)
return costs
def printMinCosts(subspace1,subspace2_group,col1=2,constraint=0.75,window=3):
costs_list = []
for i in range(11):
costs = getCosts(subspace1,subspace2_group[i],col1,constraint,window)
costs_list.append(np.mean(costs[:-1]))
print '\n\nAvg Costs\n'
for item in costs_list:
print item
return costs_list
def compareFeatureCosts(data_obj1,data_obj2,starts1,starts2,feature,numtasks=10,constraint=0.05):
'''
Purpose:
creates a grid of task to task comparisons colored based on the strength of the costs from DTW. One half of the diagonal represents the inter-task-type comparisons (ie receiver task 0 vs receiver task 1), and the alternate half of the diagonal reprsents the contra-task-type comparisons (ie receiver task 0 vs giver task 1)
'''
#define all the task bases
feature_inds = data_obj1.feature_inds
data1_features = data_obj1.all_features[:,feature_inds]
data2_features = data_obj2.all_features[:,feature_inds] #its a dumb name, semi-ignore the name
relevant_feature = data1_features[:,feature]
irrelevant_feature = data2_features[:,feature]
R = {}
G = {}
for i in np.arange(numtasks):
ri = relevant_feature[starts1[i]:starts1[i+1]]
gi = irrelevant_feature[starts2[i]:starts2[i+1]]
R[i] = ri.reshape(len(ri),1)
G[i] = gi.reshape(len(gi),1)
costmap = np.zeros((numtasks,numtasks))
for i,r in R.iteritems():
for j,rc in R.iteritems():
if j>i:
path,cost = basisTimeWarp(r,rc,constraint=constraint)
costmap[i,j] = cost
for k,g in G.iteritems():
if k>=i:
path,cost = basisTimeWarp(r,g,constraint=constraint)
costmap[k,i] = cost
return costmap
def plotCostMap(data_obj1,data_obj2,starts1,starts2,endtype='median',numtasks=10,constraint=0.05,colormap='cool',threshold=True,plot=True):
import matplotlib.cm as cm
import matplotlib.pyplot as plt
my_cmap = cm.get_cmap(colormap)
num_features = 6
costmaps = ['']*num_features
for i in range(num_features):
costmaps[i] = compareFeatureCosts(data_obj1,data_obj2,starts1,starts2,i,numtasks,constraint)
print 'feature', i, 'complete'
if endtype == 'avg':
costmap = np.mean(costmaps,axis=0)
elif endtype == 'median':
costmap = np.median(costmaps,axis=0)
if threshold:
for i,c in enumerate(costmap):
for j,k in enumerate(c):
if k > 3.353:
costmap[i,j] = 12
elif k<=3.353:
costmap[i,j] = 4
x = np.arange(numtasks+1)
X,Y = np.meshgrid(x,x)
if plot:
plt.pcolor(X,Y,costmap,cmap=my_cmap)
cbar = plt.colorbar(ticks=[])
#plt.gca().invert_yaxis()
plt.title('Task Comparison Matrix - receiver vs. giver tasks')
plt.yticks(np.arange(numtasks)+0.5,range(numtasks))
plt.xticks(np.arange(numtasks)+0.5,range(numtasks))
plt.ylabel('Comparison Task Number')
plt.xlabel('Base Task Number')
print costmap
return costmap
def getTaskMetric(path,times,current_labels,position,frames_since_state_change,constraint=0.05):
estimated_path = createPathObject(path,times,position,frames_since_state_change)
estimated_path = np.array(estimated_path).reshape(len(estimated_path),1)
current_labels = np.array(current_labels).reshape(len(current_labels),1)
#print 'est shape: ', estimated_path.shape
#print 'curr shape:', current_labels.shape
path,cost= basisTimeWarp(estimated_path,current_labels,constraint=constraint)
return cost
def createPathObject(path,times,position,frames_since_state_change):
full = []
for i,p in enumerate(path):
if i < position:
full += [p]*times[i]
else:
full += [p]*min(times[i],frames_since_state_change)
break
return full
def getAllCostmaps(txtnames,kinectDict,startsDict,numtasks=8):
all_costmaps = {}
for i in np.arange(len(txtnames)):
for j in np.arange(len(txtnames)):
if i == j:
continue
type1,type2 = txtnames[i],txtnames[j]
ij = str(i)+str(j)
all_costmaps[ij] = plotCostMap(kinectDict[type1],kinectDict[type2],startsDict[type1],startsDict[type2],numtasks=numtasks,threshold=False,plot=False)
return all_costmaps
def getMedianComparisons(costmap):
same_vals,diff_vals = [],[]
for i,row in enumerate(costmap):
for j,item in enumerate(row):
if j > i:
same_vals.append(item)
else:
diff_vals.append(item)
median_same = np.median(same_vals)
median_diff = np.median(diff_vals)
return median_same,median_diff
def getMedianComparisonsDict(all_costmaps):
comparison_dict = {}
for k,v in all_costmaps.iteritems():
same,diff = getMedianComparisons(v)
comparison_dict[k] = [same, diff]
return comparison_dict
def getGammaFitRV(data): # deprecated due to seaborn being awesome
import scipy.stats as stats
fit_alpha,fit_loc, fit_beta = stats.gamma.fit(data)
rv = stats.gamma(fit_alpha,fit_loc,fit_beta)
return rv
def plotGammaRVs(datas,labels):
import matplotlib.pyplot as plt
for i,data in enumerate(datas):
max_val = np.amax(data)
x = np.linspace(0,30,100)
rv = getGammaFitRV(data)
y = rv.pdf(x)*len(data)
plt.plot(x,y,'-',label=labels[i])
plt.legend()
plt.grid(True)
def histAndGammaPlot(datas,labels):
import seaborn as sns
import scipy.stats as stats
import matplotlib.pyplot as plt
fig = plt.figure(1)
ax = plt.subplot(111)
fit_intersect = [1.79,0.062]
sns.set_style(style='whitegrid',rc={"lines.linewidth": 0.5})
sns.set_context(context="paper")
sns.color_palette(palette="Paired")
sns.distplot(datas[0],kde=False,fit=stats.gamma,label='Same')
sns.distplot(datas[1],kde=False,fit=stats.gamma,label='Different')
sns.despine(trim=True)
plt.legend()
ax.annotate('1.79', xy=(fit_intersect[0], fit_intersect[1]), color='gray',xytext=(7, 0.65),arrowprops={'arrowstyle': '-|>','color':'gray'})
#ax.text(fit_intersect[0],fit_intersect[1]+0.03,fit_intersect[0],color='gray')
plt.title('Intertask Comparison by Median DTW Costs')
plt.xlabel('Dynamic Time Warping Cost')
plt.show()
def main():
pass
if __name__== '__main__': main()
|
|
from __future__ import unicode_literals
from django.test import TestCase
from django.utils import six
from rest_framework import serializers
from tests.models import (
ForeignKeySource, ForeignKeyTarget, ManyToManySource, ManyToManyTarget,
NullableForeignKeySource, NullableOneToOneSource,
NullableUUIDForeignKeySource, OneToOneTarget, UUIDForeignKeyTarget
)
# ManyToMany
class ManyToManyTargetSerializer(serializers.ModelSerializer):
class Meta:
model = ManyToManyTarget
fields = ('id', 'name', 'sources')
class ManyToManySourceSerializer(serializers.ModelSerializer):
class Meta:
model = ManyToManySource
fields = ('id', 'name', 'targets')
# ForeignKey
class ForeignKeyTargetSerializer(serializers.ModelSerializer):
class Meta:
model = ForeignKeyTarget
fields = ('id', 'name', 'sources')
class ForeignKeySourceSerializer(serializers.ModelSerializer):
class Meta:
model = ForeignKeySource
fields = ('id', 'name', 'target')
# Nullable ForeignKey
class NullableForeignKeySourceSerializer(serializers.ModelSerializer):
class Meta:
model = NullableForeignKeySource
fields = ('id', 'name', 'target')
# Nullable UUIDForeignKey
class NullableUUIDForeignKeySourceSerializer(serializers.ModelSerializer):
target = serializers.PrimaryKeyRelatedField(
pk_field=serializers.UUIDField(),
queryset=UUIDForeignKeyTarget.objects.all(),
allow_null=True)
class Meta:
model = NullableUUIDForeignKeySource
fields = ('id', 'name', 'target')
# Nullable OneToOne
class NullableOneToOneTargetSerializer(serializers.ModelSerializer):
class Meta:
model = OneToOneTarget
fields = ('id', 'name', 'nullable_source')
# TODO: Add test that .data cannot be accessed prior to .is_valid
class PKManyToManyTests(TestCase):
def setUp(self):
for idx in range(1, 4):
target = ManyToManyTarget(name='target-%d' % idx)
target.save()
source = ManyToManySource(name='source-%d' % idx)
source.save()
for target in ManyToManyTarget.objects.all():
source.targets.add(target)
def test_many_to_many_retrieve(self):
queryset = ManyToManySource.objects.all()
serializer = ManyToManySourceSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'source-1', 'targets': [1]},
{'id': 2, 'name': 'source-2', 'targets': [1, 2]},
{'id': 3, 'name': 'source-3', 'targets': [1, 2, 3]}
]
with self.assertNumQueries(4):
self.assertEqual(serializer.data, expected)
def test_many_to_many_retrieve_prefetch_related(self):
queryset = ManyToManySource.objects.all().prefetch_related('targets')
serializer = ManyToManySourceSerializer(queryset, many=True)
with self.assertNumQueries(2):
serializer.data
def test_reverse_many_to_many_retrieve(self):
queryset = ManyToManyTarget.objects.all()
serializer = ManyToManyTargetSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'target-1', 'sources': [1, 2, 3]},
{'id': 2, 'name': 'target-2', 'sources': [2, 3]},
{'id': 3, 'name': 'target-3', 'sources': [3]}
]
with self.assertNumQueries(4):
self.assertEqual(serializer.data, expected)
def test_many_to_many_update(self):
data = {'id': 1, 'name': 'source-1', 'targets': [1, 2, 3]}
instance = ManyToManySource.objects.get(pk=1)
serializer = ManyToManySourceSerializer(instance, data=data)
self.assertTrue(serializer.is_valid())
serializer.save()
self.assertEqual(serializer.data, data)
# Ensure source 1 is updated, and everything else is as expected
queryset = ManyToManySource.objects.all()
serializer = ManyToManySourceSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'source-1', 'targets': [1, 2, 3]},
{'id': 2, 'name': 'source-2', 'targets': [1, 2]},
{'id': 3, 'name': 'source-3', 'targets': [1, 2, 3]}
]
self.assertEqual(serializer.data, expected)
def test_reverse_many_to_many_update(self):
data = {'id': 1, 'name': 'target-1', 'sources': [1]}
instance = ManyToManyTarget.objects.get(pk=1)
serializer = ManyToManyTargetSerializer(instance, data=data)
self.assertTrue(serializer.is_valid())
serializer.save()
self.assertEqual(serializer.data, data)
# Ensure target 1 is updated, and everything else is as expected
queryset = ManyToManyTarget.objects.all()
serializer = ManyToManyTargetSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'target-1', 'sources': [1]},
{'id': 2, 'name': 'target-2', 'sources': [2, 3]},
{'id': 3, 'name': 'target-3', 'sources': [3]}
]
self.assertEqual(serializer.data, expected)
def test_many_to_many_create(self):
data = {'id': 4, 'name': 'source-4', 'targets': [1, 3]}
serializer = ManyToManySourceSerializer(data=data)
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(serializer.data, data)
self.assertEqual(obj.name, 'source-4')
# Ensure source 4 is added, and everything else is as expected
queryset = ManyToManySource.objects.all()
serializer = ManyToManySourceSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'source-1', 'targets': [1]},
{'id': 2, 'name': 'source-2', 'targets': [1, 2]},
{'id': 3, 'name': 'source-3', 'targets': [1, 2, 3]},
{'id': 4, 'name': 'source-4', 'targets': [1, 3]},
]
self.assertEqual(serializer.data, expected)
def test_many_to_many_unsaved(self):
source = ManyToManySource(name='source-unsaved')
serializer = ManyToManySourceSerializer(source)
expected = {'id': None, 'name': 'source-unsaved', 'targets': []}
# no query if source hasn't been created yet
with self.assertNumQueries(0):
self.assertEqual(serializer.data, expected)
def test_reverse_many_to_many_create(self):
data = {'id': 4, 'name': 'target-4', 'sources': [1, 3]}
serializer = ManyToManyTargetSerializer(data=data)
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(serializer.data, data)
self.assertEqual(obj.name, 'target-4')
# Ensure target 4 is added, and everything else is as expected
queryset = ManyToManyTarget.objects.all()
serializer = ManyToManyTargetSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'target-1', 'sources': [1, 2, 3]},
{'id': 2, 'name': 'target-2', 'sources': [2, 3]},
{'id': 3, 'name': 'target-3', 'sources': [3]},
{'id': 4, 'name': 'target-4', 'sources': [1, 3]}
]
self.assertEqual(serializer.data, expected)
class PKForeignKeyTests(TestCase):
def setUp(self):
target = ForeignKeyTarget(name='target-1')
target.save()
new_target = ForeignKeyTarget(name='target-2')
new_target.save()
for idx in range(1, 4):
source = ForeignKeySource(name='source-%d' % idx, target=target)
source.save()
def test_foreign_key_retrieve(self):
queryset = ForeignKeySource.objects.all()
serializer = ForeignKeySourceSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'source-1', 'target': 1},
{'id': 2, 'name': 'source-2', 'target': 1},
{'id': 3, 'name': 'source-3', 'target': 1}
]
with self.assertNumQueries(1):
self.assertEqual(serializer.data, expected)
def test_reverse_foreign_key_retrieve(self):
queryset = ForeignKeyTarget.objects.all()
serializer = ForeignKeyTargetSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'target-1', 'sources': [1, 2, 3]},
{'id': 2, 'name': 'target-2', 'sources': []},
]
with self.assertNumQueries(3):
self.assertEqual(serializer.data, expected)
def test_reverse_foreign_key_retrieve_prefetch_related(self):
queryset = ForeignKeyTarget.objects.all().prefetch_related('sources')
serializer = ForeignKeyTargetSerializer(queryset, many=True)
with self.assertNumQueries(2):
serializer.data
def test_foreign_key_update(self):
data = {'id': 1, 'name': 'source-1', 'target': 2}
instance = ForeignKeySource.objects.get(pk=1)
serializer = ForeignKeySourceSerializer(instance, data=data)
self.assertTrue(serializer.is_valid())
serializer.save()
self.assertEqual(serializer.data, data)
# Ensure source 1 is updated, and everything else is as expected
queryset = ForeignKeySource.objects.all()
serializer = ForeignKeySourceSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'source-1', 'target': 2},
{'id': 2, 'name': 'source-2', 'target': 1},
{'id': 3, 'name': 'source-3', 'target': 1}
]
self.assertEqual(serializer.data, expected)
def test_foreign_key_update_incorrect_type(self):
data = {'id': 1, 'name': 'source-1', 'target': 'foo'}
instance = ForeignKeySource.objects.get(pk=1)
serializer = ForeignKeySourceSerializer(instance, data=data)
self.assertFalse(serializer.is_valid())
self.assertEqual(serializer.errors, {'target': ['Incorrect type. Expected pk value, received %s.' % six.text_type.__name__]})
def test_reverse_foreign_key_update(self):
data = {'id': 2, 'name': 'target-2', 'sources': [1, 3]}
instance = ForeignKeyTarget.objects.get(pk=2)
serializer = ForeignKeyTargetSerializer(instance, data=data)
self.assertTrue(serializer.is_valid())
# We shouldn't have saved anything to the db yet since save
# hasn't been called.
queryset = ForeignKeyTarget.objects.all()
new_serializer = ForeignKeyTargetSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'target-1', 'sources': [1, 2, 3]},
{'id': 2, 'name': 'target-2', 'sources': []},
]
self.assertEqual(new_serializer.data, expected)
serializer.save()
self.assertEqual(serializer.data, data)
# Ensure target 2 is update, and everything else is as expected
queryset = ForeignKeyTarget.objects.all()
serializer = ForeignKeyTargetSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'target-1', 'sources': [2]},
{'id': 2, 'name': 'target-2', 'sources': [1, 3]},
]
self.assertEqual(serializer.data, expected)
def test_foreign_key_create(self):
data = {'id': 4, 'name': 'source-4', 'target': 2}
serializer = ForeignKeySourceSerializer(data=data)
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(serializer.data, data)
self.assertEqual(obj.name, 'source-4')
# Ensure source 4 is added, and everything else is as expected
queryset = ForeignKeySource.objects.all()
serializer = ForeignKeySourceSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'source-1', 'target': 1},
{'id': 2, 'name': 'source-2', 'target': 1},
{'id': 3, 'name': 'source-3', 'target': 1},
{'id': 4, 'name': 'source-4', 'target': 2},
]
self.assertEqual(serializer.data, expected)
def test_reverse_foreign_key_create(self):
data = {'id': 3, 'name': 'target-3', 'sources': [1, 3]}
serializer = ForeignKeyTargetSerializer(data=data)
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(serializer.data, data)
self.assertEqual(obj.name, 'target-3')
# Ensure target 3 is added, and everything else is as expected
queryset = ForeignKeyTarget.objects.all()
serializer = ForeignKeyTargetSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'target-1', 'sources': [2]},
{'id': 2, 'name': 'target-2', 'sources': []},
{'id': 3, 'name': 'target-3', 'sources': [1, 3]},
]
self.assertEqual(serializer.data, expected)
def test_foreign_key_update_with_invalid_null(self):
data = {'id': 1, 'name': 'source-1', 'target': None}
instance = ForeignKeySource.objects.get(pk=1)
serializer = ForeignKeySourceSerializer(instance, data=data)
self.assertFalse(serializer.is_valid())
self.assertEqual(serializer.errors, {'target': ['This field may not be null.']})
def test_foreign_key_with_unsaved(self):
source = ForeignKeySource(name='source-unsaved')
expected = {'id': None, 'name': 'source-unsaved', 'target': None}
serializer = ForeignKeySourceSerializer(source)
# no query if source hasn't been created yet
with self.assertNumQueries(0):
self.assertEqual(serializer.data, expected)
def test_foreign_key_with_empty(self):
"""
Regression test for #1072
https://github.com/tomchristie/django-rest-framework/issues/1072
"""
serializer = NullableForeignKeySourceSerializer()
self.assertEqual(serializer.data['target'], None)
class PKNullableForeignKeyTests(TestCase):
def setUp(self):
target = ForeignKeyTarget(name='target-1')
target.save()
for idx in range(1, 4):
if idx == 3:
target = None
source = NullableForeignKeySource(name='source-%d' % idx, target=target)
source.save()
def test_foreign_key_retrieve_with_null(self):
queryset = NullableForeignKeySource.objects.all()
serializer = NullableForeignKeySourceSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'source-1', 'target': 1},
{'id': 2, 'name': 'source-2', 'target': 1},
{'id': 3, 'name': 'source-3', 'target': None},
]
self.assertEqual(serializer.data, expected)
def test_foreign_key_create_with_valid_null(self):
data = {'id': 4, 'name': 'source-4', 'target': None}
serializer = NullableForeignKeySourceSerializer(data=data)
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(serializer.data, data)
self.assertEqual(obj.name, 'source-4')
# Ensure source 4 is created, and everything else is as expected
queryset = NullableForeignKeySource.objects.all()
serializer = NullableForeignKeySourceSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'source-1', 'target': 1},
{'id': 2, 'name': 'source-2', 'target': 1},
{'id': 3, 'name': 'source-3', 'target': None},
{'id': 4, 'name': 'source-4', 'target': None}
]
self.assertEqual(serializer.data, expected)
def test_foreign_key_create_with_valid_emptystring(self):
"""
The emptystring should be interpreted as null in the context
of relationships.
"""
data = {'id': 4, 'name': 'source-4', 'target': ''}
expected_data = {'id': 4, 'name': 'source-4', 'target': None}
serializer = NullableForeignKeySourceSerializer(data=data)
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(serializer.data, expected_data)
self.assertEqual(obj.name, 'source-4')
# Ensure source 4 is created, and everything else is as expected
queryset = NullableForeignKeySource.objects.all()
serializer = NullableForeignKeySourceSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'source-1', 'target': 1},
{'id': 2, 'name': 'source-2', 'target': 1},
{'id': 3, 'name': 'source-3', 'target': None},
{'id': 4, 'name': 'source-4', 'target': None}
]
self.assertEqual(serializer.data, expected)
def test_foreign_key_update_with_valid_null(self):
data = {'id': 1, 'name': 'source-1', 'target': None}
instance = NullableForeignKeySource.objects.get(pk=1)
serializer = NullableForeignKeySourceSerializer(instance, data=data)
self.assertTrue(serializer.is_valid())
serializer.save()
self.assertEqual(serializer.data, data)
# Ensure source 1 is updated, and everything else is as expected
queryset = NullableForeignKeySource.objects.all()
serializer = NullableForeignKeySourceSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'source-1', 'target': None},
{'id': 2, 'name': 'source-2', 'target': 1},
{'id': 3, 'name': 'source-3', 'target': None}
]
self.assertEqual(serializer.data, expected)
def test_foreign_key_update_with_valid_emptystring(self):
"""
The emptystring should be interpreted as null in the context
of relationships.
"""
data = {'id': 1, 'name': 'source-1', 'target': ''}
expected_data = {'id': 1, 'name': 'source-1', 'target': None}
instance = NullableForeignKeySource.objects.get(pk=1)
serializer = NullableForeignKeySourceSerializer(instance, data=data)
self.assertTrue(serializer.is_valid())
serializer.save()
self.assertEqual(serializer.data, expected_data)
# Ensure source 1 is updated, and everything else is as expected
queryset = NullableForeignKeySource.objects.all()
serializer = NullableForeignKeySourceSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'source-1', 'target': None},
{'id': 2, 'name': 'source-2', 'target': 1},
{'id': 3, 'name': 'source-3', 'target': None}
]
self.assertEqual(serializer.data, expected)
def test_null_uuid_foreign_key_serializes_as_none(self):
source = NullableUUIDForeignKeySource(name='Source')
serializer = NullableUUIDForeignKeySourceSerializer(source)
data = serializer.data
self.assertEqual(data["target"], None)
def test_nullable_uuid_foreign_key_is_valid_when_none(self):
data = {"name": "Source", "target": None}
serializer = NullableUUIDForeignKeySourceSerializer(data=data)
self.assertTrue(serializer.is_valid(), serializer.errors)
class PKNullableOneToOneTests(TestCase):
def setUp(self):
target = OneToOneTarget(name='target-1')
target.save()
new_target = OneToOneTarget(name='target-2')
new_target.save()
source = NullableOneToOneSource(name='source-1', target=new_target)
source.save()
def test_reverse_foreign_key_retrieve_with_null(self):
queryset = OneToOneTarget.objects.all()
serializer = NullableOneToOneTargetSerializer(queryset, many=True)
expected = [
{'id': 1, 'name': 'target-1', 'nullable_source': None},
{'id': 2, 'name': 'target-2', 'nullable_source': 1},
]
self.assertEqual(serializer.data, expected)
|
|
# PyVision License
#
# Copyright (c) 2006-2008 David S. Bolme
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither name of copyright holders nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
This module contains functions for reading and writing files
for the Biometrics Evaluation Environment (BEE) including distance
matricies and sigsets.
@authors: David S. Bolme (CSU) and C.J. Carey (NIST)
see: <a href="http://www.bee-biometrics.org">http://www.bee-biometrics.org</a>
'''
import xml.etree.cElementTree as ET
import os.path
import struct
import binascii
import numpy as np
#import scipy as sp
import scipy.io as spio
import pyvision as pv
import pyvision.analysis.roc as roc
import gzip
BIOMETRIC_SIGNATURE = '{http://www.bee-biometrics.org/schemas/sigset/0.1}biometric-signature'
PRESENTATION = '{http://www.bee-biometrics.org/schemas/sigset/0.1}presentation'
COMPLEX_BIOMETRIC_SIGNATURE = '{http://www.bee-biometrics.org/schemas/sigset/0.1}complex-biometric-signature'
COMPLEX_PRESENTATION = '{http://www.bee-biometrics.org/schemas/sigset/0.1}complex-presentation'
COMPLEX_COMPONENT = '{http://www.bee-biometrics.org/schemas/sigset/0.1}presentation-component'
COMPLEX_DATA = '{http://www.bee-biometrics.org/schemas/sigset/0.1}data'
BEE_NONMATCH = 0x7f
BEE_MATCH = -1 #0xff
BEE_DONTCARE = 0x00
BEE_CODE_MAP = {
0x7f:"NONMATCH",
0xff:"MATCH",
-1:"MATCH",
0x00:"DONTCARE",
}
##
# Parse a BEE sigset.
def parseSigSet(filename):
'''
the format of a sigset is::
sigset = [
("subject_id", #biometric-signature
[ # multiple presentations
{'name':"recording_id", 'modality':"...", 'file-name':"...", 'file-format':"..."},
{'name':"recording_id", 'modality':"...", 'file-name':"...", 'file-format':"..."},
{'name':"recording_id", 'modality':"...", 'file-name':"...", 'file-format':"..."}
]
),
("subject_id",#biometric-signature
[ # multiple presentations
{'name':"recording_id", 'modality':"...", 'file-name':"...", 'file-format':"..."},
{'name':"recording_id", 'modality':"...", 'file-name':"...", 'file-format':"..."},
{'name':"recording_id", 'modality':"...", 'file-name':"...", 'file-format':"..."}
]
)
]
'''
if isinstance(filename,str) and filename.endswith('.gz'):
# assume the file is compressed
filename = gzip.open(filename,'rb')
sigset = ET.parse(filename)
result = []
# Parse standard biometric signatures without namespaces
for sig in sigset.findall('biometric-signature'):
name = sig.get('name')
signature = []
result.append( (name,signature) )
for pres in sig.findall('presentation'):
presentation = {}
for key in pres.keys():
presentation[key] = pres.get(key)
signature.append(presentation)
# Parse standard biometric signatures.
for sig in sigset.findall(BIOMETRIC_SIGNATURE):
name = sig.get('name')
signature = []
result.append( (name, signature ) )
for pres in sig.findall(PRESENTATION):
presentation = {}
for key in pres.keys():
presentation[key] = pres.get(key)
signature.append(presentation)
# Parse complex biometric signatures.
for sig in sigset.findall(COMPLEX_BIOMETRIC_SIGNATURE):
name = sig.get('name')
signature = []
result.append( (name, signature) )
for pres in sig.findall(COMPLEX_PRESENTATION):
presentation = {}
for key in pres.keys():
presentation[key] = pres.get(key)
for comp in pres.findall(COMPLEX_COMPONENT):
for data in comp.findall(COMPLEX_DATA):
for key in data.keys():
presentation[key] = data.get(key)
signature.append(presentation)
return result
def saveSigset(ss,filename):
'''
save a sigset to a file.
@param ss: a sigset structured list
@param filename: a file object or filename
'''
if isinstance(filename,str) and filename.endswith('.gz'):
# assume the file should be compressed
filename = gzip.open(filename,'wb')
xmlss = sigset2xml(ss)
xmlss.write(filename)
def sigset2xml(ss):
root = ET.Element("biometric-signature-set")
root.text="\n "
for signature in ss:
sig = ET.SubElement(root,"biometric-signature")
sig.set('name',signature[0])
sig.text="\n "
sig.tail="\n "
for presentation in signature[1]:
pres = ET.SubElement(sig,'presentation')
for key,value in presentation.iteritems():
pres.set(key,value)
pres.tail="\n "
tree = ET.ElementTree(root)
return tree
def sigset2array(ss):
result = []
for signature in ss:
sub_id = signature[0]
if len(signature[1]) != 1:
raise TypeError("This function only handles simple sigsets.")
#print signature[1][0]
mode = signature[1][0]['modality']
file_format = signature[1][0]['file-format']
rec_id = signature[1][0]['name']
filename = signature[1][0]['file-name']
result.append([sub_id,mode,file_format,rec_id,filename])
return result
def formatSigset(ss,n=None):
c = 0
for name,data in ss:
if c == n:
break
print "Name: %s"%name
for i in range(len(data)):
print " Presentation %d" %i
pres = data[i]
for key,value in pres.iteritems():
print " %-15s : %s"%(key,value)
c += 1
def fastROC(sorted_positives, sorted_negatives):
'''
'''
positives = sorted_positives
negatives = sorted_negatives
n_pos = len(positives)
n_neg = len(negatives)
assert len(positives) < len(negatives)
#timer.mark("Starting search sorted")
indexes = np.searchsorted(negatives,positives)
#timer.mark("Search time")
#print "Searched:", len(indexes)
tp = (1.0/n_pos) * np.arange(n_pos)
fn = (1.0/n_neg) * indexes
#timer.mark("ROC computed")
curve = np.array([tp,fn]).transpose()
#print "Curve:",curve.shape
#print curve
return curve
class BEEDistanceMatrix:
def __init__(self, *args, **kwargs):
'''
Creates a BEE distance matrix
'''
if isinstance(args[0],str):
self.loadFile(*args,**kwargs)
elif isinstance(args[0],np.ndarray):
self.loadMatrix(*args,**kwargs)
else:
raise TypeError("Cannot create a BEEDistanceMatrix from an object of type: %s"%type(args[0]))
def loadFile(self,filename,sigset_dir=None):
'''
Loads a BEE matrix from a file.
'''
self.filename = filename
self.shortname = os.path.basename(filename)
# open the file for reading
f = open(filename,'rb')
#read the distance matrix header (first four lines of the file)
line = f.readline()
# Test line endings
if len(line) != 3 or line[-1] != "\x0a":
# Note: \x0a is the "official" line ending char as of
# \x0d is also supported in the Java and C++ tools but it will cause a failure in this implementation.
# see IARPA BEST - Challenge Problem Specification and Executable Application Program Interface
# thanks to Todd Scruggs
raise ValueError("Unsupported line ending. Should two characters followed by LF (0x0A).")
# Check Format
line = line.strip()
if line not in ['D2','S2','M2']:
raise ValueError('Unknown matrix Format "%s". Should be D2, S2, or M2.'%line)
self.is_distance = True
if line[0][0] == 'S':
self.is_distance = False
# read and process line 2 (target sigset)
line = f.readline().split()
self.target_filename = os.path.basename(line[0])
# read and process line 3 (query sigset)
line = f.readline().split()
self.query_filename = os.path.basename(line[0])
# read and process line 4 (MF n_queries n_targets magic_number)
line = f.readline().split()
assert line[0] in ['MF','MB']
file_type = line[0][1]
self.n_queries = int(line[1])
self.n_targets = int(line[2])
big_endian = struct.pack(">I",0x12345678)
little_endian = struct.pack("<I",0x12345678)
if line[3] != big_endian and line[3] != little_endian:
print "Warning unsupported magic number is BEE matrix: 0x%s"%binascii.hexlify(line[3])
self.magic_number = struct.unpack_from("=I",line[3])[0]
if self.magic_number == 0x12345678:
byteswap = False
elif self.magic_number == 0x78563412:
byteswap = True
else:
raise ValueError("Unknown magic number in similarity matrix.")
# Read the matrix data
if file_type=='F':
self.matrix = np.fromfile(f,dtype=np.float32)
elif file_type=='B':
self.matrix = np.fromfile(f,dtype=np.byte)
else:
raise TypeError("Unknown matrix file_type: %s"%file_type)
if file_type=='F' and byteswap:
self.matrix = self.matrix.byteswap()
assert self.matrix.shape[0] == self.n_targets*self.n_queries
self.matrix = self.matrix.reshape(self.n_queries,self.n_targets)
# Try to read the sigsets.
if sigset_dir == None:
sigset_dir = os.path.dirname(self.filename)
self.queries = None
try:
ss_name = os.path.join(sigset_dir,self.query_filename)
self.queries = parseSigSet(ss_name)
assert len(self.queries) == self.n_queries
except:
pass
#print "Warning: cound not read the query sigset for distance matrix %s"%self.shortname
#print " SigSet File:",ss_name
#print " Expected:",self.n_queries,"Read:",len(self.queries)
self.targets = None
try:
ss_name = os.path.join(sigset_dir,self.target_filename)
self.targets = parseSigSet(ss_name)
assert len(self.targets) == self.n_targets
except:
pass
#print "Warning: cound not read the target sigset for distance matrix %s"%self.shortname
#print " SigSet File:",ss_name
#print " Expected:",self.n_targets,"Read:",len(self.targets)
def loadMatrix(self, mat, query_filename, target_filename, sigset_dir=None, is_distance=True):
'''
Creates a bee matrix from a numpy array.
'''
self.shortname=None
#read the distance matrix header (first four lines of the file)
if mat.dtype != np.byte:
mat = mat.astype(np.float32)
# select distance or similarity
self.is_distance = is_distance
# read and process line 2 (target sigset)
self.target_filename = target_filename
# read and process line 3 (query sigset)
self.query_filename = query_filename
# read and process line 4 (MF n_queries n_targets magic_number)
self.n_queries = mat.shape[0]
self.n_targets = mat.shape[1]
self.magic_number = 0x12345678
# Read the matrix data
self.matrix = mat
# Try to read the sigsets.
self.queries = None
self.targets = None
if sigset_dir != None:
try:
ss_name = os.path.join(sigset_dir,self.query_filename)
self.queries = parseSigSet(ss_name)
assert len(self.queries) == self.n_queries
except:
print "Warning: cound not read the query sigset for distance matrix"
print " SigSet File:",ss_name
print " Expected:",self.n_queries,"Read:",len(self.queries)
try:
ss_name = os.path.join(sigset_dir,self.target_filename)
self.targets = parseSigSet(ss_name)
assert len(self.targets) == self.n_targets
except:
print "Warning: cound not read the target sigset for distance matrix"
print " SigSet File:",ss_name
print " Expected:",self.n_targets,"Read:",len(self.targets)
def cohort_norm(self):
for i in range(self.matrix.shape[0]):
a = self.matrix[i,:]
mn = a.mean()
sd = a.std()
self.matrix[i,:] = (self.matrix[i,:]-mn)/sd
def getMatchScores(self,mask=None):
#assert self.queries != None
#assert self.targets != None
matches = []
if self.queries != None and self.targets != None:
queries = np.array([ name for name,_ in self.queries ])
targets = np.array([ name for name,_ in self.targets ])
for i in range(self.matrix.shape[0]):
#print i, len(matches)
if mask != None:
matches.append(self.matrix[i,mask.matrix[i,:] == BEE_MATCH])
else:
query = queries[i]
matches.append(self.matrix[i,query==targets])
total = 0
for each in matches:
total += len(each)
scores = np.zeros(shape=(total),dtype=np.float32)
i = 0
for each in matches:
s = len(each)
scores[i:i+s] = each
i += s
return scores
def getMatchScoresBySubject(self,mask=None):
assert self.queries != None
assert self.targets != None
matches = {}
queries = np.array([ name for name,_ in self.queries ])
targets = np.array([ name for name,_ in self.targets ])
qnames = set(queries)
#tnames = set(targets)
for name in qnames:
rows = np.nonzero(name == queries)[0]
cols = np.nonzero(name == targets)[0]
tmp = self.matrix[rows][:,cols]
if mask != None:
m = mask.matrix[rows][:,cols] == BEE_MATCH
matches[name] = tmp.flatten()[m.flatten()]
else:
matches[name] = tmp.flatten()
if len(matches[name]) == 0:
del matches[name]
return matches
def getNonMatchScores(self,mask=None):
#assert self.queries != None
#assert self.targets != None
matches = []
if self.queries != None and self.targets != None:
queries = np.array([ name for name,_ in self.queries ])
targets = np.array([ name for name,_ in self.targets ])
for i in range(self.matrix.shape[0]):
if mask != None:
matches.append(self.matrix[i,mask.matrix[i,:] == BEE_NONMATCH])
else:
query = queries[i]
matches.append(self.matrix[i,query!=targets])
total = 0
for each in matches:
total += len(each)
scores = np.zeros(shape=(total),dtype=np.float32)
i = 0
for each in matches:
s = len(each)
scores[i:i+s] = each
i += s
return scores
def asFlatArray(self,mask=None):
'''query,target,score,type'''
r,c = self.matrix.shape
result = np.zeros((r*c,4),dtype=np.object)
for i in range(r):
for j in range(c):
result[c*i+j,0] = i
result[c*i+j,1] = j
result[c*i+j,2] = self.matrix[i,j]
if BEE_CODE_MAP.has_key(mask[i,j]):
result[c*i+j,3] = BEE_CODE_MAP[mask[i,j]]
else:
result[c*i+j,3] = "0x%02x"%mask[i,j]
return result
def printInfo(self):
print "BEEDistanceMatrix:",self.filename
print " is_distance :",self.is_distance
print " target_filename :",self.target_filename
print " query_filename :",self.query_filename
print " n_queries :",self.n_queries
print " n_targets :",self.n_targets
print " <total size> :",self.n_targets*self.n_queries
print " magic_number : %x"%self.magic_number
print " matrix.shape :",self.matrix.shape
def write(self,filename):
self.save(filename)
def save(self,filename):
'''
Writes the BEE distance matrix to file. WARNING: DOES NOT HANDLE MASK MATRICES CORRECTLY!
'''
if filename.endswith('.mtx'):
# save a BEE formated matrix
self.saveBeeFormat(filename)
elif filename.endswith('.mat'):
# save a matlab formated matrix
if self.is_distance:
matrix_name = 'dist_matrix'
else:
matrix_name = 'sim_matrix'
spio.savemat(filename, {matrix_name:self.matrix})
else:
return NotImplementedError("Unsupported matrix format for filename %s"%filename)
def saveBeeFormat(self,filename):
#maybe check for overwrite? and add param for allowing overwrite
f = open(filename, "wb")
# write line 1 : file_type and version
file_type = 'D'
if self.matrix.dtype == np.byte:
file_type = 'M'
elif self.is_distance:
file_type = 'D'
else:
file_type = 'S'
f.write(file_type)
f.write("2\x0a")
# write lines 2 and 3 (target and query sigsets)
f.write(self.target_filename+"\x0a")
f.write(self.query_filename+"\x0a")
# write line 4 (MF n_queries n_targets magic_number)
magic_number = struct.pack('=I',0x12345678)
assert len(magic_number) == 4 # Bug fix: verify the magic number is really 4 bytes
if file_type == 'M':
f.write("MB %d %d %s\x0a" %(self.n_queries, self.n_targets, magic_number))
else:
f.write("MF %d %d %s\x0a" %(self.n_queries, self.n_targets, magic_number))
# write the data
f.write(self.matrix)
f.close()
def histogram(self,value_range=None,bins=100,normed=True,mask=None):
match_scores = self.getMatchScores(mask=mask)
nonmatch_scores = self.getNonMatchScores(mask=mask)
if value_range == None:
value_range = (self.matrix.min(),self.matrix.max())
match_counts,_ = np.histogram(match_scores,range=value_range,bins=bins,normed=normed)
nonmatch_counts,vals = np.histogram(nonmatch_scores,range=value_range,bins=bins,normed=normed)
hist = pv.Table()
for i in range(len(match_counts)):
hist[i,'min'] = vals[i]
hist[i,'center'] = 0.5*(vals[i]+vals[i+1])
hist[i,'max'] = vals[i+1]
hist[i,'match_count'] = match_counts[i]
hist[i,'nonmatch_count'] = nonmatch_counts[i]
return hist
def getROC(self,mask=None):
nonmatch = self.getNonMatchScores(mask=mask)
match = self.getMatchScores(mask=mask)
return roc.ROC(match,nonmatch,is_distance=self.is_distance)
def getRank1(self,mask=None):
rows,_ = self.matrix.shape
queries = np.array([ name for name,_ in self.queries ])
targets = np.array([ name for name,_ in self.targets ])
success = 0.0
count = 0.0
for i in range(rows):
row = self.matrix[i]
if self.is_distance:
j = row.argmin()
else:
j = row.argmax()
if queries[i] == targets[j]:
success += 1
count += 1
#print success, count, success/count
return success/count
def stats(self):
table = pv.Table()
table['Mean','Value'] = self.matrix.mean()
# not computed effecently: table['Std','Value'] = self.matrix.flatten().std()
table['Min','Value'] = self.matrix.min()
table['Max','Value'] = self.matrix.max()
return table
def __str__(self):
'''
Returns a string describing the matrix.
'''
file_type = {True:"Distance",False:"Similarity"}[self.is_distance]
return "BEE[file=%s;type=%s]"%(self.shortname,file_type)
def __getitem__(self,index):
'''An accessor to quickly read matrix data'''
return self.matrix.__getitem__(index)
def shape(self):
'''@returns: the number of rows and columns.'''
return self.matrix.shape
def computeMaskMatrix(target_sigset,query_sigset,target_filename,query_filename,symmetric = True):
'''
Computes a mask matrix from two sigsets.
@param target_sigset: the target sigset to use.
@param query_sigset: the query sigset to use.
@param symmetric: if true and the sigsets are equal it assumes that the matrix is symmetric and will treat the low left triangle as DONT_CARE's.
@returns: a bee mask matrix.
'''
assert len(target_sigset) > 0
assert len(query_sigset) > 0
target_subid = np.array([each[0] for each in target_sigset])
query_subid = np.array([each[0] for each in query_sigset])
target_recid = np.array([each[1][0]['name'] for each in target_sigset])
query_recid = np.array([each[1][0]['name'] for each in query_sigset])
cols = target_subid.shape[0]
rows = query_subid.shape[0]
target_subid.shape = (1,cols)
query_subid.shape = (rows,1)
target_recid.shape = (1,cols)
query_recid.shape = (rows,1)
# Initialize matrix to non match
mat = np.zeros((rows,cols),dtype=np.byte)
mat[:,:] = pv.BEE_NONMATCH
# Set matches to match
matches = target_subid == query_subid
mat[matches] = pv.BEE_MATCH
# Set duplicates to don't care.
duplicates = target_recid == query_recid
mat[duplicates] = pv.BEE_DONTCARE
# Check for symetric matrix
if symmetric and rows == cols:
ts = target_recid.flatten()
qs = query_recid.flatten()
if (ts == qs).sum() == rows:
# Exclude the lower triangle
r = np.arange(rows)
c = np.arange(cols)
r.shape = (rows,1)
c.shape = (1,cols)
tmp = r > c
mat[tmp] = pv.BEE_DONTCARE
return pv.BEEDistanceMatrix(mat, query_filename, target_filename)
|
|
#!/usr/bin/env python3
#
# Copyright 2013 Simone Campagna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = 'Simone Campagna'
#!/usr/bin/env python3
#
# Copyright 2013 Simone Campagna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = 'Simone Campagna'
import os
import sys
import argparse
import collections
from .helper import Helper
from ..manager import Manager
from ..errors import SessionConfigError
from ..utils.debug import set_quiet, set_verbose, set_debug, LOGGER
from ..utils.trace import set_trace, trace
from ..utils.install_data import set_home_dir, set_admin_user, set_version, get_version, get_zapper_profile
from ..utils.argparse_autocomplete import autocomplete_monkey_patch
from ..utils.strings import string_to_bool
_ZAPPER_COMPLETE_FUNCTION = string_to_bool(os.environ.get("ZAPPER_COMPLETE_FUNCTION", "False"))
_ZAPPER_QUIET_MODE = string_to_bool(os.environ.get("ZAPPER_QUIET_MODE", "False"))
def _set_global_flags(enable_complete_function, *, quiet, verbose, debug, trace):
if enable_complete_function:
# to avoid unwanted log output during completion
set_quiet()
#set_trace(False)
#set_verbose(False)
#set_debug(False)
else:
if quiet:
set_quiet()
else:
set_verbose(verbose)
set_debug(debug)
set_trace(trace)
def create_manager():
try:
manager = Manager()
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
trace(True)
LOGGER.critical("{0}: {1}".format(exc_type.__name__, exc_value))
if not isinstance(exc_value, SessionConfigError):
LOGGER.critical("Unrecoverable error\n")
sys.exit(1)
_set_global_flags(
_ZAPPER_COMPLETE_FUNCTION or _ZAPPER_QUIET_MODE,
quiet=manager.get_config_key('quiet'),
verbose=manager.get_config_key('verbose'),
debug=manager.get_config_key('debug'),
trace=manager.get_config_key('trace'),
)
try:
manager.restore_session()
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
trace(True)
LOGGER.critical("{0}: {1}".format(exc_type.__name__, exc_value))
if not isinstance(exc_value, SessionConfigError):
LOGGER.critical("Session is corrupted. Unset environment variable ZAPPER_SESSION and try again with a new session.")
sys.exit(1)
if manager.translation_name is None:
LOGGER.critical("Translation is not defined. Zapper environment is not complete. Try sourcing {!r}".format(get_zapper_profile()))
sys.exit(2)
return manager
def create_top_level_parser(manager):
class Formatter(argparse.RawTextHelpFormatter):
def __init__(self, prog, indent_increment=2, max_help_position=27, width=None):
super().__init__(prog, indent_increment, max_help_position, width)
#enable_completion_option = manager.translation_name == 'bash'
enable_completion_option = manager.translation_name == 'bash' and os.environ.get("ZAPPER_ENABLE_BASH_COMPLETION_OPTION", "").title() == "True"
helper = Helper(manager)
admin_mode = manager.is_admin()
package_options = collections.OrderedDict()
package_options['version_defaults'] = ('versions', [])
#Formatter = argparse.HelpFormatter
### Common parser
common_parser = argparse.ArgumentParser(
add_help=False,
formatter_class=Formatter)
common_parser.add_argument("--version", "-V",
action="version",
version=get_version(),
help="show zapper version")
common_parser.add_argument("--verbose", "-v",
action="store_true",
default=manager.get_config_key('verbose'),
help="set verbose on")
common_parser.add_argument("--debug", "-d",
action="store_true",
default=manager.get_config_key('debug'),
help="set debug on")
common_parser.add_argument("--quiet", "-q",
action="store_true",
default=manager.get_config_key('quiet'),
help="quiet mode")
common_parser.add_argument("--trace", "-t",
action="store_true",
default=manager.get_config_key('trace'),
help="show traceback on errors")
common_parser.add_argument("--dry-run", "-D",
dest="dry_run",
action="store_true",
default=False,
help="do not apply changes")
common_parser.add_argument("--force", "-f",
dest="force",
action="store_true",
default=False,
help="allow changes to read-only sessions")
common_parser.add_argument("--show-header",
dest='show_header',
action="store_true",
default=manager.get_config_key('show_header'),
help="show header for non-empty tables")
common_parser.add_argument("--hide-header",
dest='show_header',
action="store_false",
default=manager.get_config_key('show_header'),
help="do not show header for non-empty tables")
common_parser.add_argument("--show-header-if-empty",
dest='show_header_if_empty',
action="store_true",
default=manager.get_config_key('show_header_if_empty'),
help="show header for empty tables")
common_parser.add_argument("--hide-header-if-empty",
dest='show_header_if_empty',
action="store_false",
default=manager.get_config_key('show_header_if_empty'),
help="show header for empty tables")
common_parser.add_argument("--show-translation",
dest='show_translation',
action="store_true",
default=manager.get_config_key('show_translation'),
help="show translation")
package_format_parser = argparse.ArgumentParser(
add_help=False,
formatter_class=Formatter)
package_format_parser.add_argument("--package-format",
type=manager.PackageFormat,
default=None,
help="set the format for package info")
package_format_parser.add_argument("--package-sort-keys",
type=manager.PackageSortKeys,
default=None,
help="set the sorting keys for packages")
package_dir_format_parser = argparse.ArgumentParser(
add_help=False,
formatter_class=Formatter)
package_dir_format_parser.add_argument("--package-dir-format",
type=manager.PackageDirFormat,
default=None,
help="set the format for package dir info")
package_dir_format_parser.add_argument("--package-dir-sort-keys",
type=manager.PackageDirSortKeys,
default=None,
help="set the sorting keys for package directories")
session_format_parser = argparse.ArgumentParser(
add_help=False,
formatter_class=Formatter)
session_format_parser.add_argument("--session-format",
type=manager.SessionFormat,
default=None,
help="set the format for session info")
session_format_parser.add_argument("--session-sort-keys",
type=manager.SessionSortKeys,
default=None,
help="set the sorting keys for sessions")
### Top-level parser
top_level_parser = argparse.ArgumentParser(
parents = [common_parser],
formatter_class=Formatter,
description="""\
Change the current session""",
epilog = "")
top_level_parser.set_defaults(
package_format=None,
package_sort_keys=None,
package_dir_format=None,
package_dir_sort_keys=None,
session_format=None,
session_sort_keys=None,
)
### Subparsers
top_level_subparsers = top_level_parser.add_subparsers(
description="Commands to change the current session.")
### Help
parser_help = top_level_subparsers.add_parser("help",
aliases=[],
parents=[common_parser],
formatter_class=Formatter,
help="help")
parser_help.set_defaults(function=helper.show_topic)
parser_help.set_defaults(complete_function=helper.complete_help_topics)
parser_help.add_argument('topic',
nargs='?',
default='general',
choices=helper.get_topics(),
help="help topic")
### Bash completion
def generate_completion(parser, filename, manager):
if filename == '-':
filename = None
from zapper.utils.argparse_completion import complete
complete(
parser=top_level_parser,
filename=filename,
complete_function_name='complete_function',
complete_add_arguments_name='complete_add_arguments',
progname=os.path.basename(sys.argv[0]),
activate_complete_function="ZAPPER_COMPLETE_FUNCTION=true ",
skip_keys=['completion'],
)
if enable_completion_option:
parser_completion = top_level_subparsers.add_parser(
"completion",
parents=[common_parser],
formatter_class=Formatter,
help="generate completion for {}".format(manager.translation_name))
parser_completion.set_defaults(function=generate_completion, parser=top_level_parser, manager=manager)
parser_completion.add_argument(
"filename",
type=str,
default=os.path.join(manager.USER_RC_DIR, 'completion.{}'.format(manager.translation_name)),
nargs='?',
help="output filename")
### Package_options
parser_package_option = {}
package_option_subparsers = {}
for option, (parser_name, parser_aliases) in package_options.items():
parser_package_option[option] = top_level_subparsers.add_parser(parser_name,
aliases=parser_aliases,
parents=[common_parser],
formatter_class=Formatter,
help="packages' {0}".format(option))
package_option_subparsers[option] = parser_package_option[option].add_subparsers(
description="{0} subcommands.".format(option.title()))
### Config subparser
parser_config = top_level_subparsers.add_parser("config",
aliases=[],
parents=[common_parser],
formatter_class=Formatter,
help="default values for some command line options")
#parser_config.set_defaults(default_function=manager.show_current_config, keys=None)
config_subparsers = parser_config.add_subparsers(
description="Config subcommand.")
### Host subparser
parser_host = top_level_subparsers.add_parser("host",
aliases=[],
parents=[common_parser],
formatter_class=Formatter,
help="host configuration; only available for administrators")
host_subparsers = parser_host.add_subparsers(
description="Host subcommand.")
parser_host_package_option = {}
host_package_option_subparsers = {}
for option, (parser_name, parser_aliases) in package_options.items():
parser_host_package_option[option] = host_subparsers.add_parser(parser_name,
aliases=parser_aliases,
formatter_class=Formatter,
help="host default packages' {0}".format(option))
host_package_option_subparsers[option] = parser_host_package_option[option].add_subparsers(
description="Host {0} management.".format(option.title()))
parser_host_config = host_subparsers.add_parser("config",
aliases=[],
formatter_class=Formatter,
help="host default values")
#parser_host_config.set_defaults(default_function=manager.show_host_config, keys=None)
host_config_subparsers = parser_host_config.add_subparsers(
description="Host default values management.")
### User subparser
parser_user = top_level_subparsers.add_parser("user",
aliases=[],
parents=[common_parser],
formatter_class=Formatter,
help="user configuration")
user_subparsers = parser_user.add_subparsers(
description="User subcommand")
parser_user_package_option = {}
user_package_option_subparsers = {}
for option, (parser_name, parser_aliases) in package_options.items():
parser_user_package_option[option] = user_subparsers.add_parser(parser_name,
aliases=parser_aliases,
formatter_class=Formatter,
help="user default packages' {0}".format(option))
user_package_option_subparsers[option] = parser_user_package_option[option].add_subparsers(
description="User {0} management.".format(option.title()))
parser_user_config = user_subparsers.add_parser("config",
aliases=[],
formatter_class=Formatter,
help="user default values")
#parser_user_config.set_defaults(default_function=manager.show_user_config, keys=None)
user_config_subparsers = parser_user_config.add_subparsers(
description="User default values management.")
### Session subparser
parser_session = top_level_subparsers.add_parser("session",
aliases=[],
parents=[common_parser],
formatter_class=Formatter,
help="session management")
session_subparsers = parser_session.add_subparsers(
description="Session subcommand.")
parser_session_create = session_subparsers.add_parser("create",
aliases=[],
parents=[common_parser],
formatter_class=Formatter,
help="create a new session")
parser_session_create.set_defaults(function=manager.create_session)
parser_session_load = session_subparsers.add_parser("load",
aliases=[],
parents=[common_parser],
formatter_class=Formatter,
help="load an existing session")
parser_session_load.set_defaults(function=manager.load_session)
parser_session_delete = session_subparsers.add_parser("delete",
aliases=[],
parents=[common_parser],
formatter_class=Formatter,
help="delete existing sessions")
parser_session_delete.set_defaults(function=manager.delete_sessions)
parser_session_new = session_subparsers.add_parser("new",
parents=[common_parser],
formatter_class=Formatter,
help="create and load a new session")
parser_session_new.set_defaults(function=manager.new_session)
parser_session_available = session_subparsers.add_parser("avail",
aliases=[],
parents=[common_parser, session_format_parser],
formatter_class=Formatter,
help="list all available sessions")
parser_session_available.add_argument("--no-persistent", "-P",
dest="persistent",
action="store_true",
default=None,
help="list persistent sessions")
parser_session_available.add_argument("--no-temporary", "-T",
dest="temporary",
action="store_true",
default=None,
help="list temporary sessions")
parser_session_available.set_defaults(function=manager.show_available_sessions)
parser_session_info = session_subparsers.add_parser("info",
aliases=[],
parents=[common_parser],
formatter_class=Formatter,
help="show information about the current session")
parser_session_info.set_defaults(function=manager.session_info)
parser_session_copy = session_subparsers.add_parser("copy",
parents=[common_parser],
formatter_class=Formatter,
help="copy sessions")
parser_session_copy.set_defaults(function=manager.copy_sessions)
for subparser in (parser_session_load, ):
subparser.add_argument("session_name",
type=manager.SessionName,
default=None,
help="session name")
for subparser in (parser_session_create, parser_session_new, parser_session_info):
subparser.add_argument("session_name",
type=manager.SessionName,
nargs='?',
default=None,
help="session name")
for subparser in (parser_session_create, parser_session_new):
subparser.add_argument("--description",
metavar='D',
default='',
help="session description")
for subparser in (parser_session_copy, ):
subparser.add_argument("session_name",
type=manager.SessionName,
nargs='+',
default=None,
help="session names")
for subparser in (parser_session_delete, ):
subparser.add_argument("session_name",
type=manager.SessionName,
nargs='*',
default=None,
help="session names")
for subparser in (parser_session_info, parser_session_delete, parser_session_load):
subparser.set_defaults(complete_function=manager.complete_available_sessions)
for subparser in (parser_session_delete, parser_session_load):
subparser.set_defaults(complete_add_arguments=['dummy'])
parser_session_package_option = {}
session_package_option_subparsers = {}
for option, (parser_name, parser_aliases) in package_options.items():
parser_session_package_option[option] = session_subparsers.add_parser(parser_name,
aliases=parser_aliases,
formatter_class=Formatter,
help="session default packages' {0}".format(option))
session_package_option_subparsers[option] = parser_session_package_option[option].add_subparsers(
description="Session {0} management.".format(option.title()))
parser_session_config = session_subparsers.add_parser("config",
aliases=[],
formatter_class=Formatter,
help="session default values")
#parser_session_config.set_defaults(default_function=manager.show_session_config, keys=None)
session_config_subparsers = parser_session_config.add_subparsers(
description="Session default values management.")
### Update
for subparsers in session_subparsers, :
parser_sync = subparsers.add_parser(
"sync",
parents=[common_parser],
formatter_class=Formatter,
help="sync current session")
parser_sync.set_defaults(function=manager.sync_session)
for option in package_options:
parser_package_option_show = {}
for subparsers in package_option_subparsers[option], host_package_option_subparsers[option], user_package_option_subparsers[option], session_package_option_subparsers[option]:
parser_package_option_show[subparsers] = subparsers.add_parser("show",
parents=[common_parser],
formatter_class=Formatter,
help="show current packages {}".format(option))
parser_package_option_show[subparsers].add_argument("keys",
nargs='*',
help="show keys")
parser_package_option_show[package_option_subparsers[option]].set_defaults(function=manager.show_current_package_option, option=option)
parser_package_option_show[host_package_option_subparsers[option]].set_defaults(function=manager.show_host_package_option, option=option)
parser_package_option_show[user_package_option_subparsers[option]].set_defaults(function=manager.show_user_package_option, option=option)
parser_package_option_show[session_package_option_subparsers[option]].set_defaults(function=manager.show_session_package_option, option=option)
parser_package_option_set = {}
parser_package_option_reset = {}
mutable_package_option_subparsers = []
if admin_mode:
mutable_package_option_subparsers.append(host_package_option_subparsers[option])
mutable_package_option_subparsers.extend((user_package_option_subparsers[option], session_package_option_subparsers[option]))
for subparsers in mutable_package_option_subparsers:
parser_package_option_set[subparsers] = subparsers.add_parser("set",
parents=[common_parser],
formatter_class=Formatter,
help="set packages' {}".format(option))
parser_package_option_set[subparsers].add_argument("key_values",
nargs='*',
help="set key=value pairs")
parser_package_option_reset[subparsers] = subparsers.add_parser("reset",
parents=[common_parser],
help="reset packages' {}".format(option))
parser_package_option_reset[subparsers].add_argument("keys",
nargs='*',
help="reset keys")
if admin_mode:
parser_package_option_set[host_package_option_subparsers[option]].set_defaults(function=manager.set_host_package_option, option=option)
parser_package_option_reset[host_package_option_subparsers[option]].set_defaults(function=manager.reset_host_package_option, option=option)
parser_package_option_set[user_package_option_subparsers[option]].set_defaults(function=manager.set_user_package_option, option=option)
parser_package_option_reset[user_package_option_subparsers[option]].set_defaults(function=manager.reset_user_package_option, option=option)
parser_package_option_set[session_package_option_subparsers[option]].set_defaults(function=manager.set_session_package_option, option=option)
parser_package_option_reset[session_package_option_subparsers[option]].set_defaults(function=manager.reset_session_package_option, option=option)
for option in ('version_defaults', ):
parser_package_option_show[package_option_subparsers[option]].set_defaults(
complete_function=manager.complete_version_defaults)
if admin_mode:
parser_package_option_set[host_package_option_subparsers[option]].set_defaults(
complete_function=manager.complete_product_names,
complete_add_arguments=['dummy'])
parser_package_option_reset[host_package_option_subparsers[option]].set_defaults(
complete_function=manager.complete_host_version_defaults)
parser_package_option_set[user_package_option_subparsers[option]].set_defaults(
complete_function=manager.complete_product_names,
complete_add_arguments=['dummy'])
parser_package_option_reset[user_package_option_subparsers[option]].set_defaults(
complete_function=manager.complete_user_version_defaults)
parser_package_option_set[session_package_option_subparsers[option]].set_defaults(
complete_function=manager.complete_product_names,
complete_add_arguments=['dummy'])
parser_package_option_reset[session_package_option_subparsers[option]].set_defaults(
complete_function=manager.complete_session_version_defaults)
parser_config_show = {}
parser_config_get = {}
for subparsers in (config_subparsers, host_config_subparsers, user_config_subparsers, session_config_subparsers):
parser_config_show[subparsers] = subparsers.add_parser("show",
parents=[common_parser],
formatter_class=Formatter,
help="show current value")
parser_config_show[subparsers].add_argument("keys",
nargs='*',
help="show keys")
parser_config_get[subparsers] = subparsers.add_parser("get",
parents=[common_parser],
formatter_class=Formatter,
help="get current value")
parser_config_get[subparsers].add_argument("key",
help="get key")
parser_config_show[config_subparsers].set_defaults(
function=manager.show_current_config,
complete_function=manager.complete_config_keys)
parser_config_show[host_config_subparsers].set_defaults(
function=manager.show_host_config,
complete_function=manager.complete_host_config_keys)
parser_config_show[user_config_subparsers].set_defaults(
function=manager.show_user_config,
complete_function=manager.complete_user_config_keys)
parser_config_show[session_config_subparsers].set_defaults(
function=manager.show_session_config,
complete_function=manager.complete_session_config_keys)
parser_config_get[config_subparsers].set_defaults(
function=manager.get_current_config,
complete_function=manager.complete_config_keys,
complete_add_arguments=['dummy'])
parser_config_get[host_config_subparsers].set_defaults(
function=manager.get_host_config,
complete_function=manager.complete_host_config_keys,
complete_add_arguments=['dummy'])
parser_config_get[user_config_subparsers].set_defaults(
function=manager.get_user_config,
complete_function=manager.complete_user_config_keys,
complete_add_arguments=['dummy'])
parser_config_get[session_config_subparsers].set_defaults(
function=manager.get_session_config,
complete_function=manager.complete_session_config_keys,
complete_add_arguments=['dummy'])
parser_config_set = {}
parser_config_reset = {}
mutable_config_subparsers = []
if admin_mode:
mutable_config_subparsers.append(host_config_subparsers)
mutable_config_subparsers.extend((user_config_subparsers, session_config_subparsers))
for subparsers in mutable_config_subparsers:
parser_config_set[subparsers] = subparsers.add_parser("set",
parents=[common_parser],
formatter_class=Formatter,
help="set default values")
parser_config_set[subparsers].add_argument("key_values",
nargs='*',
help="set key=value pairs")
parser_config_reset[subparsers] = subparsers.add_parser("reset",
parents=[common_parser],
formatter_class=Formatter,
help="reset default values")
parser_config_reset[subparsers].add_argument("keys",
nargs='*',
help="reset keys")
if admin_mode:
parser_config_set[host_config_subparsers].set_defaults(
function=manager.set_host_config,
complete_function=manager.complete_host_config_keys,
complete_add_arguments=['dummy'])
parser_config_reset[host_config_subparsers].set_defaults(
function=manager.reset_host_config,
complete_function=manager.complete_host_config_keys)
parser_config_set[user_config_subparsers].set_defaults(
function=manager.set_user_config,
complete_function=manager.complete_user_config_keys,
complete_add_arguments=['dummy'])
parser_config_reset[user_config_subparsers].set_defaults(
function=manager.reset_user_config,
complete_function=manager.complete_user_config_keys)
parser_config_set[session_config_subparsers].set_defaults(
function=manager.set_session_config,
complete_function=manager.complete_session_config_keys,
complete_add_arguments=['dummy'])
parser_config_reset[session_config_subparsers].set_defaults(
function=manager.reset_session_config,
complete_function=manager.complete_session_config_keys)
### Package subparser
parser_package_show_available_packages = top_level_subparsers.add_parser("avail",
aliases=[],
parents=[common_parser, package_format_parser],
formatter_class=Formatter,
help="list available packages")
parser_package_show_available_packages.add_argument("package_labels",
default=[],
nargs='*',
help='package labels')
parser_package_show_available_packages.set_defaults(function=manager.show_available_packages)
parser_package_show_loaded_packages = top_level_subparsers.add_parser("list",
aliases=[],
parents=[common_parser, package_format_parser],
formatter_class=Formatter,
help="list loaded packages")
parser_package_show_loaded_packages.set_defaults(function=manager.show_loaded_packages)
parser_package_show_package = top_level_subparsers.add_parser("show",
aliases=[],
parents=[common_parser],
formatter_class=Formatter,
help="show package content")
parser_package_show_package.add_argument("package_label",
help="package label")
parser_package_show_package.set_defaults(function=manager.show_package)
parser_package_load = top_level_subparsers.add_parser("load",
parents=[common_parser],
formatter_class=Formatter,
help="add packages to current session",
epilog="""\
[Resolution aggressivity level]:
> 0: missing requirements are searched in available packages
> 1: missing requirements are searched in defined packages
""")
parser_package_load.set_defaults(function=manager.load_package_labels)
parser_package_unload = top_level_subparsers.add_parser("unload",
aliases=[],
parents=[common_parser],
formatter_class=Formatter,
help="remove packages from current session",
epilog="""\
[Resolution aggressivity level]:
> 0: packages with unsatisfied requirements after removal will be
automatically removed
""")
parser_package_unload.set_defaults(function=manager.unload_package_labels)
parser_package_clear = top_level_subparsers.add_parser("clear",
parents=[common_parser],
formatter_class=Formatter,
help="unload all loaded packages from current session")
parser_package_clear.set_defaults(function=manager.clear_packages)
for key, subparser in ('load', parser_package_load), ('unload', parser_package_unload):
subparser.add_argument("package_labels",
type=str,
nargs='+',
default=None,
help="package name")
subparser.add_argument("--resolve", "-r",
dest="resolution_level",
action="count",
default=manager.get_config_key('resolution_level'),
help="automatically resolve missing requirements (repeat to increase aggressivity level)")
subparser.add_argument("--subpackages", "-s",
dest="subpackages",
action="store_true",
default=manager.get_config_key('subpackages'),
help="automatically {0} all suite's packages".format(key))
for subparser_name, subparser in ('load', parser_package_load), ('unload', parser_package_unload), ('clear', parser_package_clear):
subparser.add_argument("--simulate",
dest="simulate",
action="store_true",
default=False,
help="show only changes")
subparser.add_argument("--sticky", "-S",
dest="sticky",
action="store_true",
default=False,
help="{0} sticky packages".format(subparser_name))
for parser in (parser_package_load, parser_package_show_package, parser_package_show_available_packages):
parser.set_defaults(complete_function=manager.complete_available_packages, complete_add_arguments=['dummy'])
for parser in (parser_package_unload, ):
parser.set_defaults(complete_function=manager.complete_loaded_packages, complete_add_arguments=['dummy'])
### No more parsers!
return top_level_parser
def zapper_main():
manager = create_manager()
top_level_parser = create_top_level_parser(manager)
try:
autocomplete_monkey_patch(top_level_parser)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print(exc_value)
# ignoring these exceptions, since this is not mandatory
pass
args = top_level_parser.parse_args()
_set_global_flags(
_ZAPPER_COMPLETE_FUNCTION,
quiet=args.quiet,
verbose=args.verbose,
debug=args.debug,
trace=args.trace,
)
manager.set_dry_run(args.dry_run)
manager.set_force(args.force)
manager.set_show_header(args.show_header, args.show_header_if_empty)
manager.set_show_translation(args.show_translation)
manager.set_package_format(args.package_format)
manager.set_package_dir_format(args.package_dir_format)
manager.set_session_format(args.session_format)
manager.set_package_sort_keys(args.package_sort_keys)
manager.set_package_dir_sort_keys(args.package_dir_sort_keys)
manager.set_session_sort_keys(args.session_sort_keys)
if 'persistent' in args and 'temporary' in args:
if args.persistent is None:
if args.temporary is None:
args.persistent = True
args.temporary = True
else:
args.persistent = False
if args.temporary is None:
args.temporary = False
p_args = args._get_args()
n_args = dict(args._get_kwargs())
for key in {'function', 'quiet', 'verbose', 'debug', 'trace', 'full_label',
'dry_run', 'force', 'show_header', 'show_header_if_empty', 'show_translation',
'package_format', 'session_format', 'package_dir_format',
'package_sort_keys', 'package_dir_sort_keys', 'session_sort_keys',
'complete_function', 'complete_add_arguments'}:
if key in n_args:
del n_args[key]
manager.initialize()
complete_function = getattr(args, 'complete_function', None)
if complete_function and os.environ.get("ZAPPER_COMPLETE_FUNCTION", ""):
try:
args.complete_function(*p_args, **n_args)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
trace()
LOGGER.critical("{0}: {1}".format(exc_type.__name__, exc_value))
else:
function = getattr(args, 'function', None)
if function is None:
LOGGER.critical("invalid command line (this is probably due to a bug in argparse)")
sys.exit(1)
try:
function(*p_args, **n_args)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
trace()
#sys.stderr.write("ERR: {}: {}\n".format(exc_type.__name__, exc_value))
LOGGER.critical("{0}: {1}".format(exc_type.__name__, exc_value))
sys.exit(1)
else:
manager.finalize()
if __name__ == "__main__":
zapper_main()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""learn_main tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import sys
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn import learn_runner
from tensorflow.contrib.learn.python.learn import run_config
from tensorflow.contrib.learn.python.learn.estimators import run_config as run_config_lib
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
patch = test.mock.patch
class TestExperiment(experiment.Experiment):
def __init__(self, default=None, config=None):
self.default = default
self.config = config
@property
def estimator(self):
class Estimator(object):
config = self.config
return Estimator()
def local_run(self):
return "local_run"
def train(self):
return "train"
def run_std_server(self):
return "run_std_server"
def train_and_evaluate(self):
return "train_and_evaluate"
def simple_task(self):
return "simple_task, default=%s." % self.default
# pylint: disable=unused-argument
def build_experiment(output_dir):
tf_logging.info("In default build_experiment.")
return TestExperiment()
def build_non_experiment(output_dir):
return "Ceci n'est pas un Experiment."
# pylint: enable=unused-argument
def build_distributed_cluster_spec():
return {
run_config_lib.TaskType.PS: ["localhost:1234", "localhost:1235"],
run_config_lib.TaskType.WORKER: ["localhost:1236", "localhost:1237"],
run_config_lib.TaskType.MASTER: ["localhost:1238"],
"foo_has_no_default_schedule": ["localhost:1239"]
}
def build_non_distributed_cluster_spec():
return {"foo": ["localhost:1234"]}
class MainTest(test.TestCase):
def setUp(self):
# Ensure the TF_CONFIG environment variable is unset for all tests.
os.environ.pop("TF_CONFIG", None)
def test_run_with_custom_schedule(self):
self.assertEqual(
"simple_task, default=None.",
learn_runner.run(build_experiment,
output_dir="/tmp",
schedule="simple_task"))
def test_run_with_explicit_local_run(self):
self.assertEqual(
"local_run",
learn_runner.run(build_experiment,
output_dir="/tmp",
schedule="local_run"))
def test_schedule_from_tf_config_runs_train_on_worker(self):
os.environ["TF_CONFIG"] = json.dumps({
"cluster": build_distributed_cluster_spec(),
"task": {
"type": run_config_lib.TaskType.WORKER
}
})
# RunConfig constructor will set job_name from TF_CONFIG.
config = run_config.RunConfig()
self.assertEqual(
"train",
learn_runner.run(lambda output_dir: TestExperiment(config=config),
output_dir="/tmp"))
def test_schedule_from_tf_config_runs_train_and_evaluate_on_master(self):
tf_config = {
"cluster": build_distributed_cluster_spec(),
"task": {
"type": run_config_lib.TaskType.MASTER
}
}
with patch.dict("os.environ", {"TF_CONFIG": json.dumps(tf_config)}):
config = run_config.RunConfig()
self.assertEqual(
"train_and_evaluate",
learn_runner.run(lambda output_dir: TestExperiment(config=config),
output_dir="/tmp"))
def test_schedule_from_tf_config_runs_serve_on_ps(self):
tf_config = {
"cluster": build_distributed_cluster_spec(),
"task": {
"type": run_config_lib.TaskType.PS
}
}
with patch.dict("os.environ", {"TF_CONFIG": json.dumps(tf_config)}):
config = run_config.RunConfig()
self.assertEqual(
"run_std_server",
learn_runner.run(lambda output_dir: TestExperiment(config=config),
output_dir="/tmp"))
def test_fail_no_output_dir(self):
self.assertRaisesRegexp(ValueError, "Must specify an output directory",
learn_runner.run, build_experiment, "",
"simple_task")
def test_no_schedule_and_no_config_runs_train_and_evaluate(self):
self.assertEqual(
"train_and_evaluate",
learn_runner.run(build_experiment, output_dir="/tmp"))
def test_no_schedule_and_non_distributed_runs_train_and_evaluate(self):
tf_config = {"cluster": build_non_distributed_cluster_spec()}
with patch.dict("os.environ", {"TF_CONFIG": json.dumps(tf_config)}):
config = run_config.RunConfig()
self.assertEqual(
"train_and_evaluate",
learn_runner.run(lambda output_dir: TestExperiment(config=config),
output_dir="/tmp"))
def test_fail_task_type_with_no_default_schedule(self):
tf_config = {
"cluster": build_distributed_cluster_spec(),
"task": {
"type": "foo_has_no_default_schedule"
}
}
with patch.dict("os.environ", {"TF_CONFIG": json.dumps(tf_config)}):
config = run_config.RunConfig()
create_experiment_fn = lambda output_dir: TestExperiment(config=config)
self.assertRaisesRegexp(ValueError, "No default schedule",
learn_runner.run, create_experiment_fn, "/tmp")
def test_fail_non_callable(self):
self.assertRaisesRegexp(TypeError, "Experiment builder .* is not callable",
learn_runner.run, "not callable", "/tmp",
"simple_test")
def test_fail_not_experiment(self):
self.assertRaisesRegexp(TypeError,
"Experiment builder did not return an Experiment",
learn_runner.run, build_non_experiment, "/tmp",
"simple_test")
def test_fail_non_existent_task(self):
self.assertRaisesRegexp(ValueError, "Schedule references non-existent task",
learn_runner.run, build_experiment, "/tmp",
"mirage")
def test_fail_non_callable_task(self):
self.assertRaisesRegexp(TypeError,
"Schedule references non-callable member",
learn_runner.run, build_experiment, "/tmp",
"default")
def test_fail_schedule_from_config_with_no_task_type(self):
tf_config = {"cluster": build_distributed_cluster_spec()}
with patch.dict("os.environ", {"TF_CONFIG": json.dumps(tf_config)}):
config = run_config.RunConfig()
self.assertRaisesRegexp(
ValueError,
"Must specify a schedule",
learn_runner.run,
lambda output_dir: TestExperiment(config=config),
output_dir="/tmp")
if __name__ == "__main__":
test.main()
|
|
# -*- coding: utf-8 -*-
#----------------------------------------------------------------------------#
# Imports.
#----------------------------------------------------------------------------#
from main import db
from main import bcrypt
# from carlae.main import db, bcrypt
import base64
import urlparse
import uuid
import random
import string
import hashlib
import datetime
import urllib
import os
import mail
import config
import shortener
#----------------------------------------------------------------------------#
# DB Config.
#----------------------------------------------------------------------------#
# Set your classes here.
class User(db.Model):
__tablename__ = "users"
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(120), unique=True)
password = db.Column(db.String(60), nullable=False)
def __init__(self, email):
self.email = email
self.get_user_details()
def get_user_details(self):
uname = User.query.filter_by(email=self.email).first()
if uname is not None:
for var, val in vars(uname).iteritems():
setattr(self, var, val)
def create_user(self, password):
user = User(self.email)
user.password = bcrypt.generate_password_hash(password)
db.session.add(user)
db.session.commit()
self.get_user_details() # update object with details
def delete_user(self):
user = User.query.get(self.id)
db.session.delete(user)
db.session.commit()
# TODO: reset instance state
self.reset()
def reset(self):
# how do we reset an instance (or kill it)
pass
def check_password(self, password):
return bcrypt.check_password_hash(self.password, password)
def generate_password(self, length=10):
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(length))
def change_password(self, password):
user = User.query.get(self.id)
user.password = bcrypt.generate_password_hash(password) # update pass
db.session.commit()
### Flask-Login required methods ###
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return unicode(self.id)
### END Flask-Login required methods ###
def __repr__(self):
return '<User %r>' % self.email
class InviteUser(db.Model):
__tablename__ = "invites"
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(120), unique=True)
activation_code = db.Column(db.String(64))
activation_date = db.Column(db.DateTime)
is_activated = db.Column(db.Boolean, default=False)
def __init__(self, email):
self.email = email
self.get_user_details()
if self.email_exists():
self.regenerate_activation_code()
self.generate_activation_url()
else:
self.generate_activation_code()
self.generate_activation_url()
self.commit()
self.get_user_details() # get user details if they exist
def get_user_details(self):
user = InviteUser.query.filter_by(email=self.email).first()
if user is not None:
for var, val in vars(user).iteritems():
setattr(self, var, val)
def email_exists(self):
user = InviteUser.query.filter_by(email=self.email).first()
if user is not None:
return True
return False
def generate_activation_code(self):
self.activation_code = base64.urlsafe_b64encode(hashlib.sha512 \
(str(random.getrandbits(1024))).digest()).rstrip('==')
# TODO: Ensure activation code is unique... loop until a unique solution is found. Actually, there's really no
# need for a unique activation_code, because it's paired with an email, and there is so much entropy there.
# Actually, it's best if unique... we'll deal with that later.
def generate_activation_url(self):
url = os.path.join(config.BASE_URL, 'activate')
params = {'email':self.email, "code":self.activation_code}
query = urllib.urlencode(params)
full_url = url + '?' + query
self.activation_url = full_url
def send_activation_email(self):
# GET THE LATEST FROM THE DATABASE
usr = InviteUser.query.get(self.id)
usr.generate_activation_url()
from_email = config.APP_EMAIL
to_list = [usr.email]
subject = "Activate your account"
message = config.INVITATION_EMAIL_TEMPLATE % (usr.activation_url)
return mail.send_simple_message(from_email, to_list, subject, message, from_name=config.APP_EMAIL_NAME)
def commit(self):
self.activation_date = datetime.datetime.now()
db.session.add(self)
db.session.commit()
def regenerate_activation_code(self):
usr = InviteUser.query.get(self.id)
usr.generate_activation_code()
usr.generate_activation_url()
usr.activation_date = datetime.datetime.now()
db.session.commit()
class Url(db.Model):
__tablename__ = "urls"
id = db.Column(db.Integer, primary_key=True)
source = db.Column(db.String(1500), unique=True, nullable=False)
shortlink = db.Column(db.String(6), nullable=True)
counter = db.Column(db.Integer, default=0)
def __init__(self, source_url):
self.source = self.cleanup_url(source_url)
self.already_exits()
if self.shortlink is None:
self.add_url_to_db() # commits to db, get an id this way
self.generate_shortlink()
db.session.commit() # update the db
def already_exits(self):
url = Url.query.filter_by(source=self.source).first()
if url is not None:
self.shortlink = url.shortlink
self.id = url.id
self.counter = url.counter
else:
self.shortlink = None
def generate_shortlink(self):
self.shortlink = shortener.encode_id(self.id)
def cleanup_url(self, source_url):
source_url = self.check_protocol(source_url)
return urlparse.urlsplit(source_url).geturl()
def is_valid_link(self):
"""Checks whether `source_url` is valid."""
pass
def check_protocol(self, url):
return url if (url.lower().startswith('http://') or url.lower().startswith('https://')) else "http://" + url
def _make_shortlink_unique(self):
shortlink = self.shortlink
while Url.query.filter_by(shortlink=shortlink).first() is not None:
# need to regenerate shortlink
random_seq = str(uuid.uuid4())
shortlink = self.generate_shortlink(random_seq)
self.shortlink = shortlink
def add_url_to_db(self):
db.session.add(self)
db.session.commit()
class ReverseUrl(object):
def __init__(self, shortlink):
self.shortlink = shortlink
def get_source_url(self):
shortlink = self.shortlink
source_url = Url.query.get(shortener.decode_id(shortlink))
if source_url is not None:
self.id = source_url.id
self.update_counter()
source_url = source_url.source
self.source = source_url
def update_counter(self):
rec = Url.query.get(self.id)
rec.counter = rec.counter + 1
db.session.commit()
|
|
# -*- coding: utf-8 -*-
import numpy as np
import warnings
from affine import Affine
from numpy import min_scalar_type
from rasterio import features
from rasterstats.io import read_features, Raster
from rasterstats.utils import (get_percentile, check_stats, remap_categories,
key_assoc_val, boxify_points)
from shapely.geometry import shape
# Code from https://github.com/perrygeo/python-rasterstats/pull/136
# Percent coverage selection and weighting
# Same license as rasterstats
def gen_zonal_stats(
vectors, raster,
layer=0,
band=1,
nodata=None,
affine=None,
stats=None,
all_touched=True,
percent_cover_selection=None,
percent_cover_weighting=True,
percent_cover_scale=20,
categorical=False,
category_map=None,
add_stats=None,
zone_func=None,
raster_out=False,
prefix=None,
geojson_out=False, **kwargs):
"""Zonal statistics of raster values aggregated to vector geometries.
Parameters
vectors: path to an vector source or geo-like python objects
raster: ndarray or path to a GDAL raster source
If ndarray is passed, the ``affine`` kwarg is required.
layer: int or string, optional
If `vectors` is a path to an fiona source,
specify the vector layer to use either by name or number.
defaults to 0
band: int, optional
If `raster` is a GDAL source, the band number to use (counting from 1).
defaults to 1.
nodata: float, optional
If `raster` is a GDAL source, this value overrides any NODATA value
specified in the file's metadata.
If `None`, the file's metadata's NODATA value (if any) will be used.
defaults to `None`.
affine: Affine instance
required only for ndarrays, otherwise it is read from src
stats: list of str, or space-delimited str, optional
Which statistics to calculate for each zone.
All possible choices are listed in ``utils.VALID_STATS``.
defaults to ``DEFAULT_STATS``, a subset of these.
all_touched: bool, optional
Whether to include every raster cell touched by a geometry, or only
those having a center point within the polygon.
defaults to `False`
percent_cover_selection: float, optional
Include only raster cells that have at least the given percent
covered by the vector feature. Requires percent_cover_scale argument
be used to specify scale at which to generate percent coverage
estimates
percent_cover_weighting: bool, optional
whether or not to use percent coverage of cells during calculations
to adjust stats (only applies to mean, count and sum)
percent_cover_scale: int, optional
Scale used when generating percent coverage estimates of each
raster cell by vector feature. Percent coverage is generated by
rasterizing the feature at a finer resolution than the raster
(based on percent_cover_scale value) then using a summation to aggregate
to the raster resolution and dividing by the square of percent_cover_scale
to get percent coverage value for each cell. Increasing percent_cover_scale
will increase the accuracy of percent coverage values; three orders
magnitude finer resolution (percent_cover_scale=1000) is usually enough to
get coverage estimates with <1% error in individual edge cells coverage
estimates, though much smaller values (e.g., percent_cover_scale=10) are often
sufficient (<10% error) and require less memory.
categorical: bool, optional
category_map: dict
A dictionary mapping raster values to human-readable categorical names.
Only applies when categorical is True
add_stats: dict
with names and functions of additional stats to compute, optional
zone_func: callable
function to apply to zone ndarray prior to computing stats
raster_out: boolean
Include the masked numpy array for each feature?, optional
Each feature dictionary will have the following additional keys:
mini_raster_array: The clipped and masked numpy array
mini_raster_affine: Affine transformation
mini_raster_nodata: NoData Value
prefix: string
add a prefix to the keys (default: None)
geojson_out: boolean
Return list of GeoJSON-like features (default: False)
Original feature geometry and properties will be retained
with zonal stats appended as additional properties.
Use with `prefix` to ensure unique and meaningful property names.
Returns
generator of dicts (if geojson_out is False)
Each item corresponds to a single vector feature and
contains keys for each of the specified stats.
generator of geojson features (if geojson_out is True)
GeoJSON-like Feature as python dict
"""
stats, run_count = check_stats(stats, categorical)
# check inputs related to percent coverage
percent_cover = False
if percent_cover_weighting or percent_cover_selection is not None:
percent_cover = True
if percent_cover_scale is None:
warnings.warn('No value for `percent_cover_scale` was given. '
'Using default value of 10.')
percent_cover_scale = 10
try:
if percent_cover_scale != int(percent_cover_scale):
warnings.warn('Value for `percent_cover_scale` given ({0}) '
'was converted to int ({1}) but does not '
'match original value'.format(
percent_cover_scale, int(percent_cover_scale)))
percent_cover_scale = int(percent_cover_scale)
if percent_cover_scale <= 1:
raise Exception('Value for `percent_cover_scale` must be '
'greater than one ({0})'.format(
percent_cover_scale))
except:
raise Exception('Invalid value for `percent_cover_scale` '
'provided ({0}). Must be type int.'.format(
percent_cover_scale))
if percent_cover_selection is not None:
try:
percent_cover_selection = float(percent_cover_selection)
except:
raise Exception('Invalid value for `percent_cover_selection` '
'provided ({0}). Must be able to be converted '
'to a float.'.format(percent_cover_selection))
# if not all_touched:
# warnings.warn('`all_touched` was not enabled but an option requiring '
# 'percent_cover calculations was selected. Automatically '
# 'enabling `all_touched`.')
# all_touched = True
with Raster(raster, affine, nodata, band) as rast:
features_iter = read_features(vectors, layer)
for _, feat in enumerate(features_iter):
geom = shape(feat['geometry'])
if 'Point' in geom.type:
geom = boxify_points(geom, rast)
percent_cover = False
geom_bounds = tuple(geom.bounds)
fsrc = rast.read(bounds=geom_bounds)
if percent_cover:
cover_weights = rasterize_pctcover_geom(
geom, shape=fsrc.shape, affine=fsrc.affine,
scale=percent_cover_scale,
all_touched=all_touched)
rv_array = cover_weights > (percent_cover_selection or 0)
else:
rv_array = rasterize_geom(
geom, shape=fsrc.shape, affine=fsrc.affine,
all_touched=all_touched)
# nodata mask
isnodata = (fsrc.array == fsrc.nodata)
# add nan mask (if necessary)
if np.issubdtype(fsrc.array.dtype, float) and \
np.isnan(fsrc.array.min()):
isnodata = (isnodata | np.isnan(fsrc.array))
# Mask the source data array
# mask everything that is not a valid value or not within our geom
masked = np.ma.MaskedArray(
fsrc.array,
mask=(isnodata | ~rv_array))
# execute zone_func on masked zone ndarray
if zone_func is not None:
if not callable(zone_func):
raise TypeError(('zone_func must be a callable '
'which accepts function a '
'single `zone_array` arg.'))
zone_func(masked)
if masked.compressed().size == 0:
# nothing here, fill with None and move on
feature_stats = dict([(stat, None) for stat in stats])
if 'count' in stats: # special case, zero makes sense here
feature_stats['count'] = 0
else:
if run_count:
keys, counts = np.unique(masked.compressed(), return_counts=True)
pixel_count = dict(zip([np.asscalar(k) for k in keys],
[np.asscalar(c) for c in counts]))
if categorical:
feature_stats = dict(pixel_count)
if category_map:
feature_stats = remap_categories(category_map, feature_stats)
else:
feature_stats = {}
if 'min' in stats:
feature_stats['min'] = float(masked.min())
if 'max' in stats:
feature_stats['max'] = float(masked.max())
if 'mean' in stats:
if percent_cover:
feature_stats['mean'] = float(
np.sum(masked * cover_weights) /
np.sum(~masked.mask * cover_weights))
else:
feature_stats['mean'] = float(masked.mean())
if 'count' in stats:
if percent_cover:
feature_stats['count'] = float(np.sum(~masked.mask * cover_weights))
else:
feature_stats['count'] = int(masked.count())
# optional
if 'sum' in stats:
if percent_cover:
feature_stats['sum'] = float(np.sum(masked * cover_weights))
else:
feature_stats['sum'] = float(masked.sum())
if 'std' in stats:
feature_stats['std'] = float(masked.std())
if 'median' in stats:
feature_stats['median'] = float(np.median(masked.compressed()))
if 'majority' in stats:
feature_stats['majority'] = float(key_assoc_val(pixel_count, max))
if 'minority' in stats:
feature_stats['minority'] = float(key_assoc_val(pixel_count, min))
if 'unique' in stats:
feature_stats['unique'] = len(list(pixel_count.keys()))
if 'range' in stats:
try:
rmin = feature_stats['min']
except KeyError:
rmin = float(masked.min())
try:
rmax = feature_stats['max']
except KeyError:
rmax = float(masked.max())
feature_stats['range'] = rmax - rmin
for pctile in [s for s in stats if s.startswith('percentile_')]:
q = get_percentile(pctile)
pctarr = masked.compressed()
feature_stats[pctile] = np.percentile(pctarr, q)
if 'nodata' in stats:
featmasked = np.ma.MaskedArray(fsrc.array, mask=np.logical_not(rv_array))
feature_stats['nodata'] = float((featmasked == fsrc.nodata).sum())
if add_stats is not None:
for stat_name, stat_func in add_stats.items():
feature_stats[stat_name] = stat_func(masked)
if raster_out:
feature_stats['mini_raster_array'] = masked
feature_stats['mini_raster_affine'] = fsrc.affine
feature_stats['mini_raster_nodata'] = fsrc.nodata
if prefix is not None:
prefixed_feature_stats = {}
for key, val in feature_stats.items():
newkey = "{}{}".format(prefix, key)
prefixed_feature_stats[newkey] = val
feature_stats = prefixed_feature_stats
if geojson_out:
for key, val in feature_stats.items():
if 'properties' not in feat:
feat['properties'] = {}
feat['properties'][key] = val
yield feat
else:
yield feature_stats
def rasterize_geom(geom, shape, affine, all_touched=False):
"""
Parameters
----------
geom: GeoJSON geometry
shape: desired shape
affine: desired transform
all_touched: rasterization strategy
Returns
-------
ndarray: boolean
"""
geoms = [(geom, 1)]
rv_array = features.rasterize(
geoms,
out_shape=shape,
transform=affine,
fill=0,
dtype='uint8',
all_touched=all_touched)
return rv_array.astype(bool)
# https://stackoverflow.com/questions/8090229/
# resize-with-averaging-or-rebin-a-numpy-2d-array/8090605#8090605
def rebin_sum(a, shape, dtype):
sh = shape[0],a.shape[0]//shape[0],shape[1],a.shape[1]//shape[1]
return a.reshape(sh).sum(-1, dtype=dtype).sum(1, dtype=dtype)
def rasterize_pctcover_geom(geom, shape, affine, scale=None, all_touched=False):
"""
Parameters
----------
geom: GeoJSON geometry
shape: desired shape
affine: desired transform
scale: scale at which to generate percent cover estimate
Returns
-------
ndarray: float32
"""
if scale is None:
scale = 10
min_dtype = min_scalar_type(scale ** 2)
new_affine = Affine(affine[0]/scale, 0, affine[2],
0, affine[4]/scale, affine[5])
new_shape = (shape[0] * scale, shape[1] * scale)
rv_array = rasterize_geom(geom, new_shape, new_affine, all_touched=all_touched)
rv_array = rebin_sum(rv_array, shape, min_dtype)
return rv_array.astype('float32') / (scale**2)
|
|
"""
Is Unique
Implement an algorithm o determine if a string has all unique characters.
What if you cannot use additional data structures?
"""
def is_unique(word):
# if no data structures then sort and count
return len(word) == len(set(word.lower()))
def test_is_uniqure():
assert is_unique('abcdefghi')
assert not is_unique('okoto')
assert not is_unique('alma matter calibre')
assert not is_unique('to the memory of the victims')
assert is_unique('UNCOPYRIGHTABLE')
assert is_unique('SUBDERMATOGLYPHIC')
assert not is_unique('epE')
assert is_unique('subdermatoglyphic')
assert is_unique('uncopyrightables')
assert is_unique('dermatoglyphics')
assert is_unique('hydropneumatics')
assert is_unique('misconjugatedly')
assert is_unique('uncopyrightable')
assert is_unique('ambidextrously')
assert is_unique('computerizably')
assert is_unique('croquetplaying')
assert is_unique('dermatoglyphic')
assert is_unique('hydromagnetics')
assert is_unique('hydropneumatic')
assert is_unique('pseudomythical')
assert is_unique('subformatively')
assert is_unique('troublemakings')
assert is_unique('undiscoverably')
assert is_unique('consumptively')
assert is_unique('copyrightable')
assert is_unique('documentarily')
assert is_unique('draughtswomen')
assert is_unique('endolymphatic')
assert is_unique('flamethrowing')
assert is_unique('flowchartings')
assert is_unique('hydromagnetic')
assert is_unique('lycanthropies')
assert is_unique('metalworkings')
assert is_unique('misconjugated')
assert is_unique('multibranched')
assert is_unique('subordinately')
assert is_unique('troublemaking')
assert is_unique('uncopyrighted')
assert is_unique('unmaledictory')
assert is_unique('unpredictably')
assert is_unique('unproblematic')
assert is_unique('unsympathized')
assert is_unique('adsorptively')
assert is_unique('ambidextrous')
assert is_unique('amblygonites')
assert is_unique('amylopectins')
assert is_unique('bankruptcies')
assert is_unique('blastodermic')
assert is_unique('bluestocking')
assert is_unique('cabinetworks')
assert is_unique('centrifugals')
assert is_unique('computerniks')
assert is_unique('configurated')
assert is_unique('considerably')
assert is_unique('counterplays')
assert is_unique('countervails')
assert is_unique('customizable')
assert is_unique('demographics')
assert is_unique('demonstrably')
assert is_unique('discountable')
assert is_unique('discrepantly')
assert is_unique('disreputably')
assert is_unique('doublethinks')
assert is_unique('drumbeatings')
assert is_unique('earthmovings')
assert is_unique('edulcorating')
assert is_unique('euchromatins')
assert is_unique('exclusionary')
assert is_unique('exculpations')
assert is_unique('expurgations')
assert is_unique('exhaustingly')
assert is_unique('farsightedly')
assert is_unique('flexographic')
assert is_unique('flowcharting')
assert is_unique('Francophiles')
assert is_unique('gourmandizes')
assert is_unique('granulocytes')
assert is_unique('hematoxylins')
assert is_unique('housewarming')
assert is_unique('hydromancies')
assert is_unique('hypnotizable')
assert is_unique('imponderably')
assert is_unique('incomputable')
assert is_unique('incomputably')
assert is_unique('kymographies')
assert is_unique('lexicography')
assert is_unique('Lubavitchers')
assert is_unique('lycanthropes')
assert is_unique('malnourished')
assert is_unique('mendaciously')
assert is_unique('metalworking')
assert is_unique('multipronged')
assert is_unique('nightwalkers')
assert is_unique('outpreaching')
assert is_unique('outsparkling')
assert is_unique('outspreading')
assert is_unique('overhaulings')
assert is_unique('overmatching')
assert is_unique('packinghouse')
assert is_unique('pelargoniums')
assert is_unique('phagocytized')
assert is_unique('phagocytizes')
assert is_unique('phytoalexins')
assert is_unique('polycentrism')
assert is_unique('postcardlike')
assert is_unique('problematics')
assert is_unique('productively')
assert is_unique('questionably')
assert is_unique('recognizably')
assert is_unique('stakeholding')
assert is_unique('stenographic')
assert is_unique('stickhandler')
assert is_unique('subnormality')
assert is_unique('subvocalized')
assert is_unique('thunderclaps')
assert is_unique('unforgivable')
assert is_unique('unglamorized')
assert is_unique('unhysterical')
assert is_unique('unprofitable')
assert is_unique('unprofitably')
assert is_unique('upholstering')
assert is_unique('voluntaryism')
assert is_unique('xylographies')
assert is_unique('abolishment')
assert is_unique('atmospheric')
assert is_unique('backgrounds')
assert is_unique('birthplaces')
assert is_unique('campgrounds')
assert is_unique('complainers')
assert is_unique('copyrighted')
assert is_unique('countryside')
assert is_unique('dangerously')
assert is_unique('designatory')
assert is_unique('disgraceful')
assert is_unique('disturbance')
assert is_unique('documentary')
assert is_unique('earthmoving')
assert is_unique('embracingly')
assert is_unique('facetiously')
assert is_unique('filmography')
assert is_unique('fluoridates')
assert is_unique('foremanship')
assert is_unique('geophysical')
assert is_unique('imprudently')
assert is_unique('importances')
assert is_unique('journalized')
assert is_unique('juxtaposing')
assert is_unique('keyboarding')
assert is_unique('lumberjacks')
assert is_unique('misanthrope')
assert is_unique('misanthropy')
assert is_unique('nefariously')
assert is_unique('overstaying')
assert is_unique('palindromes')
assert is_unique('percolating')
assert is_unique('personality')
assert is_unique('playgrounds')
assert is_unique('playwrights')
assert is_unique('precautions')
assert is_unique('predictably')
assert is_unique('problematic')
assert is_unique('quaveringly')
assert is_unique('regulations')
assert is_unique('republicans')
assert is_unique('secondarily')
assert is_unique('spaceflight')
assert is_unique('speculation')
assert is_unique('stenography')
assert is_unique('subcategory')
assert is_unique('Switzerland')
assert is_unique('thunderclap')
assert is_unique('trampolines')
assert is_unique('undesirably')
assert is_unique('unpolarized')
assert is_unique('vibraphones')
assert is_unique('vouchsafing')
assert is_unique('warehousing')
assert is_unique('workmanship')
assert is_unique('xylographic')
assert is_unique('aftershock')
assert is_unique('artichokes')
assert is_unique('authorizes')
assert is_unique('background')
assert is_unique('bankruptcy')
assert is_unique('binoculars')
assert is_unique('blackhorse')
assert is_unique('blacksmith')
assert is_unique('boyfriends')
assert is_unique('campground')
assert is_unique('clothespin')
assert is_unique('complaints')
assert is_unique('conjugated')
assert is_unique('copyrights')
assert is_unique('despicably')
assert is_unique('destroying')
assert is_unique('downstream')
assert is_unique('dumbwaiter')
assert is_unique('duplicates')
assert is_unique('farsighted')
assert is_unique('formidable')
assert is_unique('godparents')
assert is_unique('graciously')
assert is_unique('greyhounds')
assert is_unique('hospitable')
assert is_unique('importance')
assert is_unique('infamously')
assert is_unique('introduces')
assert is_unique('judgmental')
assert is_unique('juxtaposed')
assert is_unique('lawrencium')
assert is_unique('lumberjack')
assert is_unique('malnourish')
assert is_unique('mistakenly')
assert is_unique('monarchist')
assert is_unique('nightmares')
assert is_unique('noticeably')
assert is_unique('pathfinder')
assert is_unique('phlegmatic')
assert is_unique('quadriceps')
assert is_unique('Scunthorpe')
assert is_unique('shockingly')
assert is_unique('slumbering')
assert is_unique('trampoline')
assert is_unique('trapezoids')
assert is_unique('Volkswagen')
assert is_unique('waveringly')
|
|
#!/usr/bin/env python
#
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the MIT license.
# See the LICENSE file in the project root for more information.
#
##########################################################################
##########################################################################
#
# Module: run-corefx-tests.py
#
# Notes:
#
# Script to clone the CoreFx repo, build, and run its tests.
#
##########################################################################
##########################################################################
import argparse
import distutils.dir_util
import os
import re
import shutil
import subprocess
import sys
##########################################################################
# Globals
##########################################################################
Corefx_url = 'https://github.com/dotnet/corefx.git'
# This should be factored out of build.sh
Unix_name_map = {
'Linux': 'Linux',
'Darwin': 'OSX',
'FreeBSD': 'FreeBSD',
'OpenBSD': 'OpenBSD',
'NetBSD': 'NetBSD',
'SunOS': 'SunOS'
}
Is_windows = (os.name == 'nt')
##########################################################################
# Delete protocol
##########################################################################
def del_rw(action, name, exc):
os.chmod(name, 0651)
os.remove(name)
##########################################################################
# Argument Parser
##########################################################################
description = 'Tool to facilitate running CoreFx tests from the CoreCLR repo'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-arch', dest='arch', default='x64')
parser.add_argument('-build_type', dest='build_type', default='Debug')
parser.add_argument('-clr_root', dest='clr_root', default=None)
parser.add_argument('-fx_root', dest='fx_root', default=None)
parser.add_argument('-fx_branch', dest='fx_branch', default='master')
parser.add_argument('-fx_commit', dest='fx_commit', default=None)
parser.add_argument('-env_script', dest='env_script', default=None)
##########################################################################
# Helper Functions
##########################################################################
def validate_args(args):
""" Validate all of the arguments parsed.
Args:
args (argparser.ArgumentParser): Args parsed by the argument parser.
Returns:
(arch, build_type, clr_root, fx_root, fx_branch, fx_commit, env_script)
(str, str, str, str, str, str, str)
Notes:
If the arguments are valid then return them all in a tuple. If not, raise
an exception stating x argument is incorrect.
"""
arch = args.arch
build_type = args.build_type
clr_root = args.clr_root
fx_root = args.fx_root
fx_branch = args.fx_branch
fx_commit = args.fx_commit
env_script = args.env_script
def validate_arg(arg, check):
""" Validate an individual arg
Args:
arg (str|bool): argument to be validated
check (lambda: x-> bool): test that returns either True or False
: based on whether the check passes.
Returns:
is_valid (bool): Is the argument valid?
"""
helper = lambda item: item is not None and check(item)
if not helper(arg):
raise Exception('Argument: %s is not valid.' % (arg))
valid_archs = ['x86', 'x64', 'arm', 'arm64']
valid_build_types = ['Debug', 'Checked', 'Release']
arch = next((a for a in valid_archs if a.lower() == arch.lower()), arch)
build_type = next((b for b in valid_build_types if b.lower() == build_type.lower()), build_type)
validate_arg(arch, lambda item: item in valid_archs)
validate_arg(build_type, lambda item: item in valid_build_types)
validate_arg(fx_branch, lambda item: True)
if fx_commit is None:
fx_commit = 'HEAD'
if clr_root is None:
clr_root = nth_dirname(os.path.abspath(sys.argv[0]), 3)
else:
clr_root = os.path.normpath(clr_root)
validate_arg(clr_root, lambda item: os.path.isdir(clr_root))
if fx_root is None:
fx_root = os.path.join(clr_root, '_', 'fx')
else:
fx_root = os.path.normpath(fx_root)
if env_script is not None:
validate_arg(env_script, lambda item: os.path.isfile(env_script))
env_script = os.path.abspath(env_script)
args = (arch, build_type, clr_root, fx_root, fx_branch, fx_commit, env_script)
log('Configuration:')
log(' arch: %s' % arch)
log(' build_type: %s' % build_type)
log(' clr_root: %s' % clr_root)
log(' fx_root: %s' % fx_root)
log(' fx_branch: %s' % fx_branch)
log(' fx_commit: %s' % fx_commit)
log(' env_script: %s' % env_script)
return args
def nth_dirname(path, n):
""" Find the Nth parent directory of the given path
Args:
path (str): path name containing at least N components
n (int): num of basenames to remove
Returns:
outpath (str): path with the last n components removed
Notes:
If n is 0, path is returned unmodified
"""
assert n >= 0
for i in range(0, n):
path = os.path.dirname(path)
return path
def log(message):
""" Print logging information
Args:
message (str): message to be printed
"""
print '[%s]: %s' % (sys.argv[0], message)
##########################################################################
# Main
##########################################################################
def main(args):
global Corefx_url
global Unix_name_map
testing = False
arch, build_type, clr_root, fx_root, fx_branch, fx_commit, env_script = validate_args(
args)
clr_os = 'Windows_NT' if Is_windows else Unix_name_map[os.uname()[0]]
core_root = os.path.join(clr_root,
'bin',
'Product',
'%s.%s.%s' % (clr_os, arch, build_type))
# corefx creates both files that are read-only and files that include non-ascii
# characters. Using onerror=del_rw allows us to delete all of the read-only files.
# To delete the files with non-ascii characters, when rmtree fails due to those
# files, we then will call rd on Windows.
if not testing and os.path.exists(fx_root):
if Is_windows:
while True:
res = subprocess.check_output(['tasklist'])
if not 'VBCSCompiler.exe' in res:
break
os.chdir(fx_root)
os.system('git clean -fxd')
os.chdir(clr_root)
shutil.rmtree(fx_root, onerror=del_rw)
# Clone the corefx branch
command = 'git clone -b %s --single-branch %s %s' % (
fx_branch, Corefx_url, fx_root)
log(command)
if testing:
if not os.path.exists(fx_root):
os.makedirs(fx_root)
returncode = 0
else:
returncode = os.system(command)
# Change directory to the corefx root
cwd = os.getcwd()
log('[cd] ' + fx_root)
os.chdir(fx_root)
# Checkout the appropriate corefx commit
command = "git checkout %s" % fx_commit
log(command)
returncode = 0 if testing else os.system(command)
if not returncode == 0:
sys.exit(returncode)
# On Unix, coreFx build.sh requires HOME to be set, and it isn't by default
# under our CI system, so set it now.
if not Is_windows:
fx_home = os.path.join(fx_root, 'tempHome')
if not os.path.exists(fx_home):
os.makedirs(fx_home)
os.putenv('HOME', fx_home)
log('HOME=' + fx_home)
# Determine the RID to specify the to corefix build scripts. This seems to
# be way harder than it ought to be.
# Gather up some arguments to pass to both build and build-tests.
config_args = '-Release -os:%s -buildArch:%s' % (clr_os, arch)
# Run the primary (non-test) corefx build
command = ' '.join(('build.cmd' if Is_windows else './build.sh',
config_args,
'-- /p:CoreCLROverridePath=%s' % core_root))
log(command)
returncode = 0 if testing else os.system(command)
if returncode != 0:
sys.exit(returncode)
# Build the build-tests command line.
if Is_windows:
command = 'build-tests.cmd'
else:
command = './build-tests.sh'
command = ' '.join((
command,
config_args,
'--',
'/p:WithoutCategories=IgnoreForCI'
))
if env_script is not None:
command += (' /p:PreExecutionTestScript=%s' % env_script)
if not Is_windows:
command += ' /p:TestWithLocalNativeLibraries=true'
# Run the corefx test build and run the tests themselves.
log(command)
returncode = 0 if testing else os.system(command)
sys.exit(returncode)
##########################################################################
# setup for Main
##########################################################################
if __name__ == '__main__':
Args = parser.parse_args(sys.argv[1:])
main(Args)
|
|
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
The parts module defines the various binary pieces of the Chrome application
bundle that need to be signed, as well as providing utilities to sign them.
"""
import os.path
from . import commands, signing
from .model import CodeSignOptions, CodeSignedProduct, VerifyOptions
_PROVISIONPROFILE_EXT = '.provisionprofile'
_PROVISIONPROFILE_DEST = 'embedded.provisionprofile'
def get_parts(config):
"""Returns all the |model.CodeSignedProduct| objects to be signed for a
Chrome application bundle.
Args:
config: The |config.CodeSignConfig|.
Returns:
A dictionary of |model.CodeSignedProduct|. The keys are short
identifiers that have no bearing on the actual signing operations.
"""
# Inner parts of the bundle do not have the identifier customized with
# the channel's identifier fragment.
if hasattr(config, 'base_config'):
uncustomized_bundle_id = config.base_config.base_bundle_id
else:
uncustomized_bundle_id = config.base_bundle_id
verify_options = VerifyOptions.DEEP + VerifyOptions.STRICT
parts = {
'app':
CodeSignedProduct(
'{.app_product}.app'.format(config),
config.base_bundle_id,
options=CodeSignOptions.FULL_HARDENED_RUNTIME_OPTIONS,
requirements=config.codesign_requirements_outer_app,
identifier_requirement=False,
entitlements='app-entitlements.plist',
verify_options=verify_options),
'framework':
CodeSignedProduct(
# The framework is a dylib, so options= flags are meaningless.
config.framework_dir,
'{}.framework'.format(uncustomized_bundle_id),
verify_options=verify_options),
'crashpad':
CodeSignedProduct(
'{.framework_dir}/Helpers/chrome_crashpad_handler'.format(
config),
'chrome_crashpad_handler',
options=CodeSignOptions.FULL_HARDENED_RUNTIME_OPTIONS,
verify_options=verify_options),
'helper-app':
CodeSignedProduct(
'{0.framework_dir}/Helpers/{0.product} Helper.app'.format(
config),
'{}.helper'.format(uncustomized_bundle_id),
options=CodeSignOptions.FULL_HARDENED_RUNTIME_OPTIONS,
verify_options=verify_options),
'helper-renderer-app':
CodeSignedProduct(
'{0.framework_dir}/Helpers/{0.product} Helper (Renderer).app'
.format(config),
'{}.helper.renderer'.format(uncustomized_bundle_id),
# Do not use |CodeSignOptions.FULL_HARDENED_RUNTIME_OPTIONS|
# because library validation is incompatible with the JIT
# entitlement.
options=CodeSignOptions.RESTRICT + CodeSignOptions.KILL +
CodeSignOptions.HARDENED_RUNTIME,
entitlements='helper-renderer-entitlements.plist',
verify_options=verify_options),
'helper-gpu-app':
CodeSignedProduct(
'{0.framework_dir}/Helpers/{0.product} Helper (GPU).app'.format(
config),
'{}.helper'.format(uncustomized_bundle_id),
# Do not use |CodeSignOptions.FULL_HARDENED_RUNTIME_OPTIONS|
# because library validation is incompatible with more
# permissive code signing entitlements.
options=CodeSignOptions.RESTRICT + CodeSignOptions.KILL +
CodeSignOptions.HARDENED_RUNTIME,
entitlements='helper-gpu-entitlements.plist',
verify_options=verify_options),
'helper-plugin-app':
CodeSignedProduct(
'{0.framework_dir}/Helpers/{0.product} Helper (Plugin).app'
.format(config),
'{}.helper.plugin'.format(uncustomized_bundle_id),
# Do not use |CodeSignOptions.FULL_HARDENED_RUNTIME_OPTIONS|
# because library validation is incompatible with the
# disable-library-validation entitlement.
options=CodeSignOptions.RESTRICT + CodeSignOptions.KILL +
CodeSignOptions.HARDENED_RUNTIME,
entitlements='helper-plugin-entitlements.plist',
verify_options=verify_options),
'helper-alerts':
CodeSignedProduct(
'{0.framework_dir}/Helpers/{0.product} Helper (Alerts).app'
.format(config),
'{}.framework.AlertNotificationService'.format(
config.base_bundle_id),
options=CodeSignOptions.FULL_HARDENED_RUNTIME_OPTIONS,
verify_options=verify_options),
'app-mode-app':
CodeSignedProduct(
'{.framework_dir}/Helpers/app_mode_loader'.format(config),
'app_mode_loader',
options=CodeSignOptions.FULL_HARDENED_RUNTIME_OPTIONS,
verify_options=verify_options),
}
dylibs = (
'libEGL.dylib',
'libGLESv2.dylib',
'libswiftshader_libEGL.dylib',
'libswiftshader_libGLESv2.dylib',
'libvk_swiftshader.dylib',
)
for library in dylibs:
library_basename = os.path.basename(library)
parts[library_basename] = CodeSignedProduct(
'{.framework_dir}/Libraries/{library}'.format(
config, library=library),
library_basename.replace('.dylib', ''),
verify_options=verify_options)
return parts
def get_installer_tools(config):
"""Returns all the |model.CodeSignedProduct| objects to be signed for
creating the installer tools package.
Args:
config: The |config.CodeSignConfig|.
Returns:
A dictionary of |model.CodeSignedProduct|. The keys are short
identifiers that have no bearing on the actual signing operations.
"""
tools = {}
binaries = (
'goobsdiff',
'goobspatch',
'liblzma_decompress.dylib',
'xz',
'xzdec',
)
for binary in binaries:
options = (
CodeSignOptions.HARDENED_RUNTIME + CodeSignOptions.RESTRICT +
CodeSignOptions.LIBRARY_VALIDATION + CodeSignOptions.KILL)
tools[binary] = CodeSignedProduct(
'{.packaging_dir}/{binary}'.format(config, binary=binary),
binary.replace('.dylib', ''),
options=options if not binary.endswith('dylib') else None,
verify_options=VerifyOptions.DEEP + VerifyOptions.STRICT)
return tools
def sign_chrome(paths, config, sign_framework=False):
"""Code signs the Chrome application bundle and all of its internal nested
code parts.
Args:
paths: A |model.Paths| object.
config: The |model.CodeSignConfig| object. The |app_product| binary and
nested binaries must exist in |paths.work|.
sign_framework: True if the inner framework is to be signed in addition
to the outer application. False if only the outer application is to
be signed.
"""
parts = get_parts(config)
_sanity_check_version_keys(paths, parts)
if sign_framework:
# To sign an .app bundle that contains nested code, the nested
# components themselves must be signed. Each of these components is
# signed below. Note that unless a framework has multiple versions
# (which is discouraged), signing the entire framework is equivalent to
# signing the Current version.
# https://developer.apple.com/library/content/technotes/tn2206/_index.html#//apple_ref/doc/uid/DTS40007919-CH1-TNTAG13
for name, part in parts.items():
if name in ('app', 'framework'):
continue
signing.sign_part(paths, config, part)
# Sign the framework bundle.
signing.sign_part(paths, config, parts['framework'])
provisioning_profile_basename = config.provisioning_profile_basename
if provisioning_profile_basename:
commands.copy_files(
os.path.join(
paths.packaging_dir(config),
provisioning_profile_basename + _PROVISIONPROFILE_EXT),
os.path.join(paths.work, parts['app'].path, 'Contents',
_PROVISIONPROFILE_DEST))
# Sign the outer app bundle.
signing.sign_part(paths, config, parts['app'])
# Verify all the parts.
for part in parts.values():
signing.verify_part(paths, part)
# Display the code signature.
signing.validate_app(paths, config, parts['app'])
def _sanity_check_version_keys(paths, parts):
"""Verifies that the various version keys in Info.plists match.
Args:
paths: A |model.Paths| object.
parts: The dictionary returned from get_parts().
"""
app_plist_path = os.path.join(paths.work, parts['app'].path, 'Contents',
'Info.plist')
framework_plist_path = os.path.join(paths.work, parts['framework'].path,
'Resources', 'Info.plist')
with commands.PlistContext(
app_plist_path) as app_plist, commands.PlistContext(
framework_plist_path) as framework_plist:
if not 'KSVersion' in app_plist:
assert 'com.google.Chrome' not in app_plist['CFBundleIdentifier']
return
ks_version = app_plist['KSVersion']
cf_version = framework_plist['CFBundleShortVersionString']
if cf_version != ks_version:
raise ValueError(
'CFBundleVersion ({}) does not mach KSVersion ({})'.format(
cf_version, ks_version))
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Optimizer ops for use in layers and tf.learn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables as vars_
from tensorflow.python.training import optimizer as optimizer_
from tensorflow.python.training import training as train
OPTIMIZER_CLS_NAMES = {
"Adagrad": train.AdagradOptimizer,
"Adam": train.AdamOptimizer,
"Ftrl": train.FtrlOptimizer,
"Momentum": train.MomentumOptimizer,
"RMSProp": train.RMSPropOptimizer,
"SGD": train.GradientDescentOptimizer,
}
def optimize_loss(loss,
global_step,
learning_rate,
optimizer,
gradient_noise_scale=None,
gradient_multipliers=None,
clip_gradients=None,
moving_average_decay=0.9,
learning_rate_decay_fn=None,
update_ops=None,
variables=None,
name=None):
"""Given loss and parameters for optimizer, returns a training op.
Args:
loss: Tensor, 0 dimensional.
global_step: Tensor, step counter for each update.
learning_rate: float or Tensor, magnitude of update per each training step.
optimizer: string, class or optimizer instance, used as trainer.
string should be name of optimizer, like 'SGD',
'Adam', 'Adagrad'. Full list in OPTIMIZER_CLS_NAMES constant.
class should be sub-class of tf.Optimizer that implements
`compute_gradients` and `apply_gradients` functions.
optimizer instance should be instantion of tf.Optimizer sub-class
and have `compute_gradients` and `apply_gradients` functions.
gradient_noise_scale: float or None, adds 0-mean normal noise scaled by this
value.
gradient_multipliers: dict of variables or variable names to floats.
If present, gradients for specified
variables will be multiplied by given constant.
clip_gradients: float or `None`, clips gradients by this value.
moving_average_decay: float or None, takes into account previous loss
to make learning smoother due to outliers.
learning_rate_decay_fn: function, takes `learning_rate` and `global_step`
`Tensor`s, returns `Tensor`.
Can be used to implement any learning rate decay
functions.
For example: tf.train.exponential_decay.
update_ops: list of update `Operation`s to execute at each step. If `None`,
uses elements of UPDATE_OPS collection.
variables: list of variables to optimize or
`None` to use all trainable variables.
name: The name for this operation is used to scope operations and summaries.
Returns:
Training op.
Raises:
ValueError: if optimizer is wrong type.
"""
with vs.variable_op_scope([loss, global_step], name, "OptimizeLoss"):
# Update ops take UPDATE_OPS collection if not provided.
update_ops = (set(update_ops or []) or
set(ops.get_collection(ops.GraphKeys.UPDATE_OPS)))
# Make sure update ops are ran before computing loss.
if update_ops:
with ops.control_dependencies(update_ops):
barrier = control_flow_ops.no_op(name="update_barrier")
loss = control_flow_ops.with_dependencies([barrier], loss)
# Moving average of the loss with decay.
if moving_average_decay is not None:
# Generate moving averages of the loss.
loss_averages = train.ExponentialMovingAverage(moving_average_decay,
name="avg")
loss_averages_op = loss_averages.apply([loss])
logging_ops.scalar_summary("loss/mean", loss_averages.average(loss))
loss = control_flow_ops.with_dependencies([loss_averages_op], loss)
# Learning rate variable, with possible decay.
if (isinstance(learning_rate, ops.Tensor)
and learning_rate.get_shape().ndims == 0):
lr = learning_rate
elif isinstance(learning_rate, float):
lr = vs.get_variable(
"learning_rate", [], trainable=False,
initializer=init_ops.constant_initializer(learning_rate))
else:
raise ValueError("Learning rate should be 0d Tensor or float. "
"Got %s of type %s" % (
str(learning_rate), str(type(learning_rate))))
if learning_rate_decay_fn is not None:
lr = learning_rate_decay_fn(lr, global_step)
# Create optimizer, given specified parameters.
if isinstance(optimizer, six.string_types):
if optimizer not in OPTIMIZER_CLS_NAMES:
raise ValueError(
"Optimizer name should be one of [%s], you provided %s."
% (", ".join(OPTIMIZER_CLS_NAMES), optimizer))
opt = OPTIMIZER_CLS_NAMES[optimizer](learning_rate=lr)
elif isinstance(optimizer, type) and issubclass(optimizer,
optimizer_.Optimizer):
opt = optimizer(learning_rate=lr)
elif isinstance(optimizer, optimizer_.Optimizer):
opt = optimizer
else:
raise ValueError("Unrecognized optimizer: should be string, "
"subclass of Optimizer or instance of "
"subclass of Optimizer. Got %s." % str(optimizer))
# All trainable variables, if specific variables are not specified.
if variables is None:
variables = vars_.trainable_variables()
# Compute gradients.
gradients = opt.compute_gradients(loss, variables)
# Optionally add gradient noise.
if gradient_noise_scale is not None:
gradients = _add_scaled_noise_to_gradients(
gradients, gradient_noise_scale)
# Multiply some gradients.
if gradient_multipliers is not None:
gradients = _multiply_gradients(gradients, gradient_multipliers)
# Optionally clip gradients by global norm.
if clip_gradients is not None:
gradients = _clip_gradients_by_norm(gradients, clip_gradients)
# Add scalar summary for loss.
logging_ops.scalar_summary("loss", loss)
# Add histograms for variables, gradients and gradient norms.
for gradient, variable in gradients:
if isinstance(gradient, ops.IndexedSlices):
grad_values = gradient.values
else:
grad_values = gradient
if grad_values is not None:
logging_ops.histogram_summary(variable.name, variable)
logging_ops.histogram_summary(variable.name + "/gradients", grad_values)
logging_ops.histogram_summary(variable.name + "/gradient_norm",
clip_ops.global_norm([grad_values]))
# Create gradient updates.
grad_updates = opt.apply_gradients(gradients,
global_step=global_step,
name="train")
# Make sure total_loss is valid.
final_loss = array_ops.check_numerics(loss, "Loss is inf or nan")
# Ensure the train_tensor computes grad_updates.
train_tensor = control_flow_ops.with_dependencies(
[grad_updates], final_loss)
return train_tensor
def _clip_gradients_by_norm(grads_and_vars, clip_gradients):
"""Clips gradients by global norm."""
gradients, variables = zip(*grads_and_vars)
clipped_gradients, _ = clip_ops.clip_by_global_norm(gradients,
clip_gradients)
return list(zip(clipped_gradients, variables))
def _add_scaled_noise_to_gradients(grads_and_vars, gradient_noise_scale):
"""Adds scaled noise from a 0-mean normal distribution to gradients."""
gradients, variables = zip(*grads_and_vars)
noisy_gradients = []
for gradient in gradients:
if isinstance(gradient, ops.IndexedSlices):
gradient_shape = gradient.dense_shape
else:
gradient_shape = gradient.get_shape()
noise = random_ops.truncated_normal(gradient_shape) * gradient_noise_scale
noisy_gradients.append(gradient + noise)
return list(zip(noisy_gradients, variables))
def _multiply_gradients(grads_and_vars, gradient_multipliers):
"""Multiply specified gradients."""
multiplied_grads_and_vars = []
for grad, var in grads_and_vars:
if var in gradient_multipliers or var.name in gradient_multipliers:
key = var if var in gradient_multipliers else var.name
grad *= constant_op.constant(
gradient_multipliers[key], dtype=dtypes.float32)
multiplied_grads_and_vars.append((grad, var))
return multiplied_grads_and_vars
|
|
from __future__ import absolute_import
import os, datetime, urllib, re
import time, functools, cgi
import json
def timestamp():
"""
Returns a serializable UTC timestamp.
"""
return time.time()
def format_timestamp(s):
s = time.localtime(s)
d = datetime.datetime.fromtimestamp(time.mktime(s))
return d.strftime("%Y-%m-%d %H:%M:%S")
def isBin(s):
"""
Does this string have any non-ASCII characters?
"""
for i in s:
i = ord(i)
if i < 9:
return True
elif i > 13 and i < 32:
return True
elif i > 126:
return True
return False
def isXML(s):
for i in s:
if i in "\n \t":
continue
elif i == "<":
return True
else:
return False
def pretty_json(s):
try:
p = json.loads(s)
except ValueError:
return None
return json.dumps(p, sort_keys=True, indent=4).split("\n")
def urldecode(s):
"""
Takes a urlencoded string and returns a list of (key, value) tuples.
"""
return cgi.parse_qsl(s, keep_blank_values=True)
def urlencode(s):
"""
Takes a list of (key, value) tuples and returns a urlencoded string.
"""
s = [tuple(i) for i in s]
return urllib.urlencode(s, False)
def pretty_size(size):
suffixes = [
("B", 2**10),
("kB", 2**20),
("MB", 2**30),
]
for suf, lim in suffixes:
if size >= lim:
continue
else:
x = round(size/float(lim/2**10), 2)
if x == int(x):
x = int(x)
return str(x) + suf
class Data:
def __init__(self, name):
m = __import__(name)
dirname, _ = os.path.split(m.__file__)
self.dirname = os.path.abspath(dirname)
def path(self, path):
"""
Returns a path to the package data housed at 'path' under this
module.Path can be a path to a file, or to a directory.
This function will raise ValueError if the path does not exist.
"""
fullpath = os.path.join(self.dirname, path)
if not os.path.exists(fullpath):
raise ValueError, "dataPath: %s does not exist."%fullpath
return fullpath
pkg_data = Data(__name__)
class LRUCache:
"""
A decorator that implements a self-expiring LRU cache for class
methods (not functions!).
Cache data is tracked as attributes on the object itself. There is
therefore a separate cache for each object instance.
"""
def __init__(self, size=100):
self.size = size
def __call__(self, f):
cacheName = "_cached_%s"%f.__name__
cacheListName = "_cachelist_%s"%f.__name__
size = self.size
@functools.wraps(f)
def wrap(self, *args):
if not hasattr(self, cacheName):
setattr(self, cacheName, {})
setattr(self, cacheListName, [])
cache = getattr(self, cacheName)
cacheList = getattr(self, cacheListName)
if cache.has_key(args):
cacheList.remove(args)
cacheList.insert(0, args)
return cache[args]
else:
ret = f(self, *args)
cacheList.insert(0, args)
cache[args] = ret
if len(cacheList) > size:
d = cacheList.pop()
cache.pop(d)
return ret
return wrap
def parse_content_type(c):
"""
A simple parser for content-type values. Returns a (type, subtype,
parameters) tuple, where type and subtype are strings, and parameters
is a dict. If the string could not be parsed, return None.
E.g. the following string:
text/html; charset=UTF-8
Returns:
("text", "html", {"charset": "UTF-8"})
"""
parts = c.split(";", 1)
ts = parts[0].split("/", 1)
if len(ts) != 2:
return None
d = {}
if len(parts) == 2:
for i in parts[1].split(";"):
clause = i.split("=", 1)
if len(clause) == 2:
d[clause[0].strip()] = clause[1].strip()
return ts[0].lower(), ts[1].lower(), d
def hostport(scheme, host, port):
"""
Returns the host component, with a port specifcation if needed.
"""
if (port, scheme) in [(80, "http"), (443, "https")]:
return host
else:
return "%s:%s"%(host, port)
def unparse_url(scheme, host, port, path=""):
"""
Returns a URL string, constructed from the specified compnents.
"""
return "%s://%s%s"%(scheme, hostport(scheme, host, port), path)
def clean_hanging_newline(t):
"""
Many editors will silently add a newline to the final line of a
document (I'm looking at you, Vim). This function fixes this common
problem at the risk of removing a hanging newline in the rare cases
where the user actually intends it.
"""
if t and t[-1] == "\n":
return t[:-1]
return t
def parse_size(s):
"""
Parses a size specification. Valid specifications are:
123: bytes
123k: kilobytes
123m: megabytes
123g: gigabytes
"""
if not s:
return None
mult = None
if s[-1].lower() == "k":
mult = 1024**1
elif s[-1].lower() == "m":
mult = 1024**2
elif s[-1].lower() == "g":
mult = 1024**3
if mult:
s = s[:-1]
else:
mult = 1
try:
return int(s) * mult
except ValueError:
raise ValueError("Invalid size specification: %s"%s)
def safe_subn(pattern, repl, target, *args, **kwargs):
"""
There are Unicode conversion problems with re.subn. We try to smooth
that over by casting the pattern and replacement to strings. We really
need a better solution that is aware of the actual content ecoding.
"""
return re.subn(str(pattern), str(repl), target, *args, **kwargs)
|
|
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import *
import functools
from edb import errors
from edb.edgeql import ast as qlast
from edb.edgeql import qltypes
from . import abc as s_abc
from . import annos as s_anno
from . import delta as sd
from . import functions as s_func
from . import name as sn
from . import objects as so
from . import types as s_types
from . import utils
if TYPE_CHECKING:
from edb.schema import schema as s_schema
_NOT_REACHABLE = 10000000
def _is_reachable(
schema: s_schema.Schema,
cast_kwargs: Mapping[str, bool],
source: s_types.Type,
target: s_types.Type,
distance: int,
) -> int:
if source == target:
return distance
casts = schema.get_casts_to_type(target, **cast_kwargs)
if not casts:
return _NOT_REACHABLE
sources = {c.get_from_type(schema) for c in casts}
distance += 1
if source in sources:
return distance
else:
return min(
_is_reachable(schema, cast_kwargs, source, s, distance)
for s in sources
)
@functools.lru_cache()
def get_implicit_cast_distance(
schema: s_schema.Schema,
source: s_types.Type,
target: s_types.Type,
) -> int:
dist = _is_reachable(schema, {'implicit': True}, source, target, 0)
if dist == _NOT_REACHABLE:
return -1
else:
return dist
def is_implicitly_castable(
schema: s_schema.Schema,
source: s_types.Type,
target: s_types.Type,
) -> bool:
return get_implicit_cast_distance(schema, source, target) >= 0
@functools.lru_cache()
def find_common_castable_type(
schema: s_schema.Schema,
source: s_types.Type,
target: s_types.Type,
) -> Optional[s_types.Type]:
if get_implicit_cast_distance(schema, target, source) >= 0:
return source
if get_implicit_cast_distance(schema, source, target) >= 0:
return target
# Elevate target in the castability ladder, and check if
# source is castable to it on each step.
while True:
casts = schema.get_casts_from_type(target, implicit=True)
if not casts:
return None
targets = {c.get_to_type(schema) for c in casts}
if len(targets) > 1:
for t in targets:
candidate = find_common_castable_type(schema, source, t)
if candidate is not None:
return candidate
else:
return None
else:
target = next(iter(targets))
if get_implicit_cast_distance(schema, source, target) >= 0:
return target
@functools.lru_cache()
def is_assignment_castable(
schema: s_schema.Schema,
source: s_types.Type,
target: s_types.Type,
) -> bool:
# Implicitly castable implies assignment castable.
if is_implicitly_castable(schema, source, target):
return True
# Assignment casts are valid only as one-hop casts.
casts = schema.get_casts_to_type(target, assignment=True)
if not casts:
return False
for c in casts:
if c.get_from_type(schema) == source:
return True
return False
@functools.lru_cache()
def is_castable(
schema: s_schema.Schema,
source: s_types.Type,
target: s_types.Type,
) -> bool:
# Implicitly castable
if is_implicitly_castable(schema, source, target):
return True
elif is_assignment_castable(schema, source, target):
return True
else:
casts = schema.get_casts_to_type(target)
if not casts:
return False
else:
for c in casts:
if c.get_from_type(schema) == source:
return True
else:
return False
def get_cast_fullname_from_names(
module: str,
from_type: str,
to_type: str,
) -> sn.QualName:
quals = [from_type, to_type]
shortname = sn.QualName(module, 'cast')
return sn.QualName(
module=shortname.module,
name=sn.get_specialized_name(shortname, *quals),
)
def get_cast_fullname(
schema: s_schema.Schema,
module: str,
from_type: s_types.TypeShell[s_types.Type],
to_type: s_types.TypeShell[s_types.Type],
) -> sn.QualName:
return get_cast_fullname_from_names(
module,
str(from_type.get_name(schema)),
str(to_type.get_name(schema)),
)
class Cast(
so.QualifiedObject,
s_anno.AnnotationSubject,
s_func.VolatilitySubject,
s_abc.Cast,
qlkind=qltypes.SchemaObjectClass.CAST,
data_safe=True,
):
from_type = so.SchemaField(
s_types.Type, compcoef=0.5)
to_type = so.SchemaField(
s_types.Type, compcoef=0.5)
allow_implicit = so.SchemaField(
bool, default=False, compcoef=0.4)
allow_assignment = so.SchemaField(
bool, default=False, compcoef=0.4)
language = so.SchemaField(
qlast.Language, default=None, compcoef=0.4, coerce=True)
from_function = so.SchemaField(
str, default=None, compcoef=0.4)
from_expr = so.SchemaField(
bool, default=False, compcoef=0.4)
from_cast = so.SchemaField(
bool, default=False, compcoef=0.4)
code = so.SchemaField(
str, default=None, compcoef=0.4)
class CastCommandContext(sd.ObjectCommandContext[Cast],
s_anno.AnnotationSubjectCommandContext):
pass
class CastCommand(sd.QualifiedObjectCommand[Cast],
context_class=CastCommandContext):
def get_ast_attr_for_field(
self,
field: str,
astnode: Type[qlast.DDLOperation],
) -> Optional[str]:
if field in {'allow_assignment', 'allow_implicit'}:
return field
else:
return super().get_ast_attr_for_field(field, astnode)
@classmethod
def _cmd_tree_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: sd.CommandContext,
) -> sd.Command:
if not context.stdmode and not context.testmode:
raise errors.UnsupportedFeatureError(
'user-defined casts are not supported',
context=astnode.context
)
return super()._cmd_tree_from_ast(schema, astnode, context)
@classmethod
def _classname_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.NamedDDL,
context: sd.CommandContext,
) -> sn.QualName:
assert isinstance(astnode, qlast.CastCommand)
modaliases = context.modaliases
from_type = utils.ast_to_type_shell(
astnode.from_type,
metaclass=s_types.Type,
modaliases=modaliases,
schema=schema,
)
to_type = utils.ast_to_type_shell(
astnode.to_type,
metaclass=s_types.Type,
modaliases=modaliases,
schema=schema,
)
module = 'std' if context.stdmode else '__derived__'
return get_cast_fullname(schema, module, from_type, to_type)
def canonicalize_attributes(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = super().canonicalize_attributes(schema, context)
schema = s_types.materialize_type_in_attribute(
schema, context, self, 'from_type')
schema = s_types.materialize_type_in_attribute(
schema, context, self, 'to_type')
return schema
class CreateCast(CastCommand, sd.CreateObject[Cast]):
astnode = qlast.CreateCast
def _create_begin(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
fullname = self.classname
cast = schema.get(fullname, None)
if cast:
from_type = self.get_attribute_value('from_type')
to_type = self.get_attribute_value('to_type')
raise errors.DuplicateCastDefinitionError(
f'a cast from {from_type.get_displayname(schema)!r} '
f'to {to_type.get_displayname(schema)!r} is already defined',
context=self.source_context)
return super()._create_begin(schema, context)
@classmethod
def _cmd_tree_from_ast(
cls,
schema: s_schema.Schema,
astnode: qlast.DDLOperation,
context: sd.CommandContext,
) -> sd.Command:
assert isinstance(astnode, qlast.CreateCast)
cmd = super()._cmd_tree_from_ast(schema, astnode, context)
modaliases = context.modaliases
from_type = utils.ast_to_type_shell(
astnode.from_type,
metaclass=s_types.Type,
modaliases=modaliases,
schema=schema,
)
cmd.set_attribute_value('from_type', from_type)
to_type = utils.ast_to_type_shell(
astnode.to_type,
metaclass=s_types.Type,
modaliases=modaliases,
schema=schema,
)
cmd.set_attribute_value('to_type', to_type)
cmd.set_attribute_value('allow_implicit', astnode.allow_implicit)
cmd.set_attribute_value('allow_assignment', astnode.allow_assignment)
if astnode.code is not None:
cmd.set_attribute_value(
'language',
astnode.code.language,
)
if astnode.code.from_function is not None:
cmd.set_attribute_value(
'from_function',
astnode.code.from_function,
)
if astnode.code.code is not None:
cmd.set_attribute_value(
'code',
astnode.code.code,
)
if astnode.code.from_expr is not None:
cmd.set_attribute_value(
'from_expr',
astnode.code.from_expr,
)
if astnode.code.from_cast is not None:
cmd.set_attribute_value(
'from_cast',
astnode.code.from_cast,
)
return cmd
def _apply_field_ast(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
node: qlast.DDLOperation,
op: sd.AlterObjectProperty,
) -> None:
assert isinstance(node, qlast.CreateCast)
new_value: Any = op.new_value
if op.property == 'from_type':
# In a cast we can only have pure types, so this is going
# to be a TypeName.
node.from_type = cast(qlast.TypeName,
utils.typeref_to_ast(schema, new_value))
elif op.property == 'to_type':
# In a cast we can only have pure types, so this is going
# to be a TypeName.
node.to_type = cast(qlast.TypeName,
utils.typeref_to_ast(schema, new_value))
elif op.property == 'code':
if node.code is None:
node.code = qlast.CastCode()
node.code.code = new_value
elif op.property == 'language':
if node.code is None:
node.code = qlast.CastCode()
node.code.language = new_value
elif op.property == 'from_function' and new_value:
if node.code is None:
node.code = qlast.CastCode()
node.code.from_function = new_value
elif op.property == 'from_expr' and new_value:
if node.code is None:
node.code = qlast.CastCode()
node.code.from_expr = new_value
elif op.property == 'from_cast' and new_value:
if node.code is None:
node.code = qlast.CastCode()
node.code.from_cast = new_value
else:
super()._apply_field_ast(schema, context, node, op)
class RenameCast(CastCommand, sd.RenameObject[Cast]):
pass
class AlterCast(CastCommand, sd.AlterObject[Cast]):
astnode = qlast.AlterCast
class DeleteCast(CastCommand, sd.DeleteObject[Cast]):
astnode = qlast.DropCast
|
|
import pymongo
import bson
import random
import datetime
from pymongo import MongoClient
from bson.objectid import ObjectId
busStops = ['Flogsta','Polacksbacken','Central Station','Granby','Gamla Uppsala','Uppsala Science Park','Uppsala Centrum','Carolina Rediviva', 'Uppsala Hospital']
coordinates = [[59.851252, 17.593290], [59.840427, 17.647628], [59.858052, 17.644739], [59.875991, 17.674517], [59.897172, 17.636958], [59.842756, 17.638956], [59.854760, 17.632371], [59.847369, 17.641398]]
NUM_OF_ENTRIES = 10
YEAR = 2015
MONTH = 11
DAY = 30
TRIP_DURATION = 30
MIN_10 = datetime.timedelta(minutes = 10)
MIN_20 = datetime.timedelta(minutes = 20)
MIN_25 = datetime.timedelta(minutes = 25)
def createRoute(start, end):
wp1 = random.choice(busStops)
while (wp1 == start or wp1 == end):
wp1 = random.choice(busStops)
wp2 = random.choice(busStops)
while (wp2 == start or wp2 == end or wp2 == wp1):
wp2 = random.choice(busStops)
wp3 = random.choice(busStops)
while (wp3 == start or wp3 == end or wp3 == wp1 or wp3 == wp2):
wp3 = random.choice(busStops)
return [busStops.index(start), busStops.index(wp1), busStops.index(wp2),
busStops.index(wp3), busStops.index(end)]
client = MongoClient()
db = client.monad
for i in range(28):
original_id = ObjectId()
hour = 8
minute = random.randint(25, 50)
st = 0
ed = 1
start = busStops[0]
start_position_lat = coordinates[st][0]
start_position_lon = coordinates[st][1]
end_position_lat = coordinates[ed][0]
end_position_lon = coordinates[ed][1]
end = busStops[1]
route = createRoute(start, end)
startTime = datetime.datetime(YEAR, MONTH, DAY, hour, minute, 0)
endTime = datetime.datetime(
YEAR, MONTH, DAY, 9, random.randint(0, 10), 0)
new_record1 = {
"_id": original_id,
#"BusNo": random.randint(0, 59),
"StartTime": startTime,
"StartBusstop": start,
"EndTime": endTime,
"EndBusstop": end,
"start_position_lat": start_position_lat,
"start_position_lon": start_position_lon,
"end_position_lat": end_position_lat,
"end_position_lon": end_position_lon,
"Waypoints": [
{"BusStopID": route[0], "DptTime": startTime,
"PsgGetOn": 5, "PsgGetOff": 0, "latitude": start_position_lat,
"longitude": start_position_lon},
{"BusStopID": route[1], "DptTime": startTime + MIN_10,
"PsgGetOn": 5, "PsgGetOff": 5, "latitude": start_position_lat + 0.001,
"longitude": start_position_lon + 0.001},
{"BusStopID": route[2], "DptTime": startTime + MIN_20,
"PsgGetOn": 5, "PsgGetOff": 5, "latitude": start_position_lat + 0.002,
"longitude": start_position_lon + 0.002},
{"BusStopID": route[3], "DptTime": startTime + MIN_25,
"PsgGetOn": 5, "PsgGetOff": 5, "latitude": start_position_lat + 0.003,
"longitude": start_position_lon + 0.003},
{"BusStopID": route[4], "DptTime": endTime,
"PsgGetOn": 0, "PsgGetOff": 5, "latitude": end_position_lat,
"longitude": end_position_lon}],
"GeneratedTime": 0,
"VehicleID": random.randint(0, 59),
"DriverID": random.randint(0, 59)
}
for i in range(28):
original_id = ObjectId()
hour = 17
minute = random.randint(5,20)
st = 1
ed = 0
start = busStops[1]
start_position_lat = coordinates[st][0]
start_position_lon = coordinates[st][1]
end_position_lat = coordinates[ed][0]
end_position_lon = coordinates[ed][1]
end = busStops[0]
route = createRoute(start, end)
startTime = datetime.datetime(YEAR, MONTH, DAY, hour, minute, 0)
endTime = datetime.datetime(
YEAR, MONTH, DAY, 17, random.randint(40, 59), 0)
new_record2 = {
"_id": original_id,
#"BusNo": random.randint(0, 59),
"StartTime": startTime,
"StartBusstop": start,
"EndTime": endTime,
"EndBusstop": end,
"start_position_lat": start_position_lat,
"start_position_lon": start_position_lon,
"end_position_lat": end_position_lat,
"end_position_lon": end_position_lon,
"Waypoints": [
{"BusStopID": route[0], "DptTime": startTime,
"PsgGetOn": 5, "PsgGetOff": 0, "latitude": start_position_lat,
"longitude": start_position_lon},
{"BusStopID": route[1], "DptTime": startTime + MIN_10,
"PsgGetOn": 5, "PsgGetOff": 5, "latitude": start_position_lat + 0.001,
"longitude": start_position_lon + 0.001},
{"BusStopID": route[2], "DptTime": startTime + MIN_20,
"PsgGetOn": 5, "PsgGetOff": 5, "latitude": start_position_lat + 0.002,
"longitude": start_position_lon + 0.002},
{"BusStopID": route[3], "DptTime": startTime + MIN_25,
"PsgGetOn": 5, "PsgGetOff": 5, "latitude": start_position_lat + 0.003,
"longitude": start_position_lon + 0.003},
{"BusStopID": route[4], "DptTime": endTime,
"PsgGetOn": 0, "PsgGetOff": 5, "latitude": end_position_lat,
"longitude": end_position_lon}],
"GeneratedTime": 0,
"VehicleID": random.randint(0, 59),
"DriverID": random.randint(0, 59)
}
for i in range(28):
original_id = ObjectId()
hour = random.randint(0, 22)
minute = random.randint(0, 59)
st = random.randint(0,7)
ed = random.randint(0,7)
start = random.choice(busStops)
start_position_lat = coordinates[st][0]
start_position_lon = coordinates[st][1]
end_position_lat = coordinates[ed][0]
end_position_lon = coordinates[ed][1]
end = random.choice(busStops)
route = createRoute(start, end)
startTime = datetime.datetime(YEAR, MONTH, DAY, hour, minute, 0)
endTime = datetime.datetime(
YEAR, MONTH, DAY, hour+1, minute, 0)
new_record3 = {
"_id": original_id,
#"BusNo": random.randint(0, 59),
"StartTime": startTime,
"StartBusstop": start,
"EndTime": endTime,
"EndBusstop": end,
"start_position_lat": start_position_lat,
"start_position_lon": start_position_lon,
"end_position_lat": end_position_lat,
"end_position_lon": end_position_lon,
"Waypoints": [
{"BusStopID": route[0], "DptTime": startTime,
"PsgGetOn": 5, "PsgGetOff": 0, "latitude": start_position_lat,
"longitude": start_position_lon},
{"BusStopID": route[1], "DptTime": startTime + MIN_10,
"PsgGetOn": 5, "PsgGetOff": 5, "latitude": start_position_lat + 0.001,
"longitude": start_position_lon + 0.001},
{"BusStopID": route[2], "DptTime": startTime + MIN_20,
"PsgGetOn": 5, "PsgGetOff": 5, "latitude": start_position_lat + 0.002,
"longitude": start_position_lon + 0.002},
{"BusStopID": route[3], "DptTime": startTime + MIN_25,
"PsgGetOn": 5, "PsgGetOff": 5, "latitude": start_position_lat + 0.003,
"longitude": start_position_lon + 0.003},
{"BusStopID": route[4], "DptTime": endTime,
"PsgGetOn": 0, "PsgGetOff": 5, "latitude": end_position_lat,
"longitude": end_position_lon}],
"GeneratedTime": 0,
"VehicleID": random.randint(0, 59),
"DriverID": random.randint(0, 59)
}
for i in range(28):
original_id = ObjectId()
hour = random.randint(0, 22)
minute = random.randint(0, 59)
st = 4
ed = 1
start = busStops[st]
start_position_lat = coordinates[st][0]
start_position_lon = coordinates[st][1]
end_position_lat = coordinates[ed][0]
end_position_lon = coordinates[ed][1]
end = busStops[ed]
route = createRoute(start, end)
startTime = datetime.datetime(YEAR, MONTH, DAY, 12, minute, 0)
endTime = datetime.datetime(
YEAR, MONTH, DAY, 12, minute, 0)
new_record4 = {
"_id": original_id,
#"BusNo": random.randint(0, 59),
"StartTime": startTime,
"StartBusstop": start,
"EndTime": endTime,
"EndBusstop": end,
"start_position_lat": start_position_lat,
"start_position_lon": start_position_lon,
"end_position_lat": end_position_lat,
"end_position_lon": end_position_lon,
"Waypoints": [
{"BusStopID": route[0], "DptTime": startTime,
"PsgGetOn": 5, "PsgGetOff": 0, "latitude": start_position_lat,
"longitude": start_position_lon},
{"BusStopID": route[1], "DptTime": startTime + MIN_10,
"PsgGetOn": 5, "PsgGetOff": 5, "latitude": 59.875991,
"longitude": 17.674517},
{"BusStopID": route[2], "DptTime": startTime + MIN_20,
"PsgGetOn": 5, "PsgGetOff": 5, "latitude": 59.858052,
"longitude": 17.644739},
{"BusStopID": route[3], "DptTime": startTime + MIN_25,
"PsgGetOn": 5, "PsgGetOff": 5, "latitude": 59.875991,
"longitude": 17.674517},
{"BusStopID": route[4], "DptTime": endTime,
"PsgGetOn": 0, "PsgGetOff": 5, "latitude": end_position_lat,
"longitude": end_position_lon}],
"GeneratedTime": 0,
"VehicleID": random.randint(0, 59),
"DriverID": random.randint(0, 59)
}
result = db.TimeTable.insert_many([new_record1, new_record2, new_record3, new_record4])
|
|
# -*- coding: utf-8 -*-
import logging
import time
import json
import uuid
# Setup logger
logger = logging.getLogger()
logger.setLevel(logging.INFO)
SAMPLE_APPLIANCES = [
{
"applianceId": "endpoint-001",
"manufacturerName": "Sample Manufacturer",
"modelName": "Smart Switch",
"version": "1",
"friendlyName": "Switch",
"friendlyDescription": "001 Switch that can only be turned on/off",
"isReachable": True,
"actions": [
"turnOn",
"turnOff"
],
"additionalApplianceDetails": {
"detail1": "For simplicity, this is the only appliance",
"detail2": "that has some values in the additionalApplianceDetails"
}
},
{
"applianceId": "endpoint-002",
"manufacturerName": "Sample Manufacturer",
"modelName": "Smart Light",
"version": "1",
"friendlyName": "Light",
"friendlyDescription": "002 Light that is dimmable and can change color and color temperature",
"isReachable": True,
"actions": [
"turnOn",
"turnOff",
"setPercentage",
"incrementPercentage",
"decrementPercentage",
"setColor",
"setColorTemperature",
"incrementColorTemperature",
"decrementColorTemperature"
],
"additionalApplianceDetails": {}
},
{
"applianceId": "endpoint-003",
"manufacturerName": "Sample Manufacturer",
"modelName": "Smart White Light",
"version": "1",
"friendlyName": "White Light",
"friendlyDescription": "003 Light that is dimmable and can change color temperature only",
"isReachable": True,
"actions": [
"turnOn",
"turnOff",
"setPercentage",
"incrementPercentage",
"decrementPercentage",
"setColorTemperature",
"incrementColorTemperature",
"decrementColorTemperature"
],
"additionalApplianceDetails": {}
},
]
def lambda_handler(request, context):
"""Main Lambda handler.
"""
try:
logger.info("Directive:")
logger.info(json.dumps(request, indent=4, sort_keys=True))
version = get_directive_version(request)
if version == "3":
logger.info("Received v3 directive!")
if request["directive"]["header"]["name"] == "Discover":
response = handle_discovery_v3(request)
else:
response = handle_non_discovery_v3(request)
else:
logger.info("Received v2 directive!")
if request["header"]["namespace"] == "Alexa.ConnectedHome.Discovery":
response = handle_discovery()
else:
response = handle_non_discovery(request)
logger.info("Response:")
logger.info(json.dumps(response, indent=4, sort_keys=True))
return response
except ValueError as error:
logger.error(error)
raise
def handle_non_discovery(request):
request_name = request["header"]["name"]
if request_name == "TurnOnRequest":
header = {
"namespace": "Alexa.ConnectedHome.Control",
"name": "TurnOnConfirmation",
"payloadVersion": "2",
"messageId": get_uuid()
}
payload = {}
elif request_name == "TurnOffRequest":
header = {
"namespace": "Alexa.ConnectedHome.Control",
"name": "TurnOffConfirmation",
"payloadVersion": "2",
"messageId": get_uuid()
}
# other handlers omitted in this example
payload = {}
response = {
"header": header,
"payload": payload
}
return response
# v2 utility functions
def get_appliance_by_appliance_id(appliance_id):
for appliance in SAMPLE_APPLIANCES:
if appliance["applianceId"] == appliance_id:
return appliance
return None
def get_utc_timestamp(seconds=None):
return time.strftime("%Y-%m-%dT%H:%M:%S.00Z", time.gmtime(seconds))
def get_uuid():
return str(uuid.uuid4())
# v2 handlers
def handle_discovery():
header = {
"namespace": "Alexa.ConnectedHome.Discovery",
"name": "DiscoverAppliancesResponse",
"payloadVersion": "2",
"messageId": get_uuid()
}
payload = {
"discoveredAppliances": SAMPLE_APPLIANCES
}
response = {
"header": header,
"payload": payload
}
return response
def handle_non_discovery(request):
request_name = request["header"]["name"]
if request_name == "TurnOnRequest":
header = {
"namespace": "Alexa.ConnectedHome.Control",
"name": "TurnOnConfirmation",
"payloadVersion": "2",
"messageId": get_uuid()
}
payload = {}
elif request_name == "TurnOffRequest":
header = {
"namespace": "Alexa.ConnectedHome.Control",
"name": "TurnOffConfirmation",
"payloadVersion": "2",
"messageId": get_uuid()
}
# other handlers omitted in this example
payload = {}
response = {
"header": header,
"payload": payload
}
return response
# v3 handlers
def handle_discovery_v3(request):
endpoints = []
for appliance in SAMPLE_APPLIANCES:
endpoints.append(get_endpoint_from_v2_appliance(appliance))
response = {
"event": {
"header": {
"namespace": "Alexa.Discovery",
"name": "Discover.Response",
"payloadVersion": "3",
"messageId": get_uuid()
},
"payload": {
"endpoints": endpoints
}
}
}
return response
def handle_non_discovery_v3(request):
request_namespace = request["directive"]["header"]["namespace"]
request_name = request["directive"]["header"]["name"]
if request_namespace == "Alexa.PowerController":
if request_name == "TurnOn":
value = "ON"
else:
value = "OFF"
response = {
"context": {
"properties": [
{
"namespace": "Alexa.PowerController",
"name": "powerState",
"value": value,
"timeOfSample": get_utc_timestamp(),
"uncertaintyInMilliseconds": 500
}
]
},
"event": {
"header": {
"namespace": "Alexa",
"name": "Response",
"payloadVersion": "3",
"messageId": get_uuid(),
"correlationToken": request["directive"]["header"]["correlationToken"]
},
"endpoint": {
"scope": {
"type": "BearerToken",
"token": "access-token-from-Amazon"
},
"endpointId": request["directive"]["endpoint"]["endpointId"]
},
"payload": {}
}
}
return response
elif request_namespace == "Alexa.Authorization":
if request_name == "AcceptGrant":
response = {
"event": {
"header": {
"namespace": "Alexa.Authorization",
"name": "AcceptGrant.Response",
"payloadVersion": "3",
"messageId": "5f8a426e-01e4-4cc9-8b79-65f8bd0fd8a4"
},
"payload": {}
}
}
return response
# other handlers omitted in this example
# v3 utility functions
def get_endpoint_from_v2_appliance(appliance):
endpoint = {
"endpointId": appliance["applianceId"],
"manufacturerName": appliance["manufacturerName"],
"friendlyName": appliance["friendlyName"],
"description": appliance["friendlyDescription"],
"displayCategories": [],
"cookie": appliance["additionalApplianceDetails"],
"capabilities": []
}
endpoint["displayCategories"] = get_display_categories_from_v2_appliance(appliance)
endpoint["capabilities"] = get_capabilities_from_v2_appliance(appliance)
return endpoint
def get_directive_version(request):
try:
return request["directive"]["header"]["payloadVersion"]
except:
try:
return request["header"]["payloadVersion"]
except:
return "-1"
def get_endpoint_by_endpoint_id(endpoint_id):
appliance = get_appliance_by_appliance_id(endpoint_id)
if appliance:
return get_endpoint_from_v2_appliance(appliance)
return None
def get_display_categories_from_v2_appliance(appliance):
model_name = appliance["modelName"]
if model_name == "Smart Switch": displayCategories = ["SWITCH"]
elif model_name == "Smart Light": displayCategories = ["LIGHT"]
elif model_name == "Smart White Light": displayCategories = ["LIGHT"]
else: displayCategories = ["OTHER"]
return displayCategories
def get_capabilities_from_v2_appliance(appliance):
model_name = appliance["modelName"]
if model_name == 'Smart Switch':
capabilities = [
{
"type": "AlexaInterface",
"interface": "Alexa.PowerController",
"version": "3",
"properties": {
"supported": [
{ "name": "powerState" }
],
"proactivelyReported": True,
"retrievable": True
}
}
]
elif model_name == "Smart Light":
capabilities = [
{
"type": "AlexaInterface",
"interface": "Alexa.PowerController",
"version": "3",
"properties": {
"supported": [
{ "name": "powerState" }
],
"proactivelyReported": True,
"retrievable": True
}
},
{
"type": "AlexaInterface",
"interface": "Alexa.ColorController",
"version": "3",
"properties": {
"supported": [
{ "name": "color" }
],
"proactivelyReported": True,
"retrievable": True
}
},
{
"type": "AlexaInterface",
"interface": "Alexa.ColorTemperatureController",
"version": "3",
"properties": {
"supported": [
{ "name": "colorTemperatureInKelvin" }
],
"proactivelyReported": True,
"retrievable": True
}
},
{
"type": "AlexaInterface",
"interface": "Alexa.BrightnessController",
"version": "3",
"properties": {
"supported": [
{ "name": "brightness" }
],
"proactivelyReported": True,
"retrievable": True
}
},
{
"type": "AlexaInterface",
"interface": "Alexa.PowerLevelController",
"version": "3",
"properties": {
"supported": [
{ "name": "powerLevel" }
],
"proactivelyReported": True,
"retrievable": True
}
},
{
"type": "AlexaInterface",
"interface": "Alexa.PercentageController",
"version": "3",
"properties": {
"supported": [
{ "name": "percentage" }
],
"proactivelyReported": True,
"retrievable": True
}
}
]
elif model_name == "Smart White Light":
capabilities = [
{
"type": "AlexaInterface",
"interface": "Alexa.PowerController",
"version": "3",
"properties": {
"supported": [
{ "name": "powerState" }
],
"proactivelyReported": True,
"retrievable": True
}
},
{
"type": "AlexaInterface",
"interface": "Alexa.ColorTemperatureController",
"version": "3",
"properties": {
"supported": [
{ "name": "colorTemperatureInKelvin" }
],
"proactivelyReported": True,
"retrievable": True
}
},
{
"type": "AlexaInterface",
"interface": "Alexa.BrightnessController",
"version": "3",
"properties": {
"supported": [
{ "name": "brightness" }
],
"proactivelyReported": True,
"retrievable": True
}
},
{
"type": "AlexaInterface",
"interface": "Alexa.PowerLevelController",
"version": "3",
"properties": {
"supported": [
{ "name": "powerLevel" }
],
"proactivelyReported": True,
"retrievable": True
}
},
{
"type": "AlexaInterface",
"interface": "Alexa.PercentageController",
"version": "3",
"properties": {
"supported": [
{ "name": "percentage" }
],
"proactivelyReported": True,
"retrievable": True
}
}
]
else:
# in this example, just return simple on/off capability
capabilities = [
{
"type": "AlexaInterface",
"interface": "Alexa.PowerController",
"version": "3",
"properties": {
"supported": [
{ "name": "powerState" }
],
"proactivelyReported": True,
"retrievable": True
}
}
]
# additional capabilities that are required for each endpoint
endpoint_health_capability = {
"type": "AlexaInterface",
"interface": "Alexa.EndpointHealth",
"version": "3",
"properties": {
"supported":[
{ "name":"connectivity" }
],
"proactivelyReported": True,
"retrievable": True
}
}
alexa_interface_capability = {
"type": "AlexaInterface",
"interface": "Alexa",
"version": "3"
}
capabilities.append(endpoint_health_capability)
capabilities.append(alexa_interface_capability)
return capabilities
|
|
#!/usr/bin/env python
# ssterm - simple serial-port terminal
# https://github.com/vsergeev/ssterm
#
# Copyright (c) 2014-2016 Ivan (Vanya) A. Sergeev
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import sys
import os
import re
import select
import getopt
import string
import termios
###############################################################################
### Default Options
###############################################################################
# Default TTY Options
TTY_Options = {
'baudrate': 115200,
'databits': 8,
'stopbits': 1,
'parity': "none",
'flow_control': "none"
}
# Default Formatting Options
Format_Options = {
'output_mode': 'raw', # 'split', 'splitfull', 'hex', 'hexnl'
'input_mode': 'raw', # 'hex'
'transmit_newline': "raw", # 'cr', 'crlf', 'lf', 'none'
'receive_newline': "raw", # 'cr', 'crlf', 'lf', 'crorlf'
'echo': False,
'color_chars': b'', # e.g. b"\nA"
}
###############################################################################
### Program Constants
###############################################################################
# Quit Escape Character: Ctrl-] = 0x1D
Quit_Escape_Character = 0x1d if sys.version_info[0] >= 3 else "\x1d"
# Number of columns in hexadecimal print mode
Hexadecimal_Columns = 16
# Default color codes:
# Black/Red, Black/Green, Black/Yellow,
# White/Blue, White/Magenta, Black/Cyan,
# Black/White
Color_Codes = [ b"\x1b[1;30;41m", b"\x1b[1;30;42m", b"\x1b[1;30;43m",
b"\x1b[1;37;44m", b"\x1b[1;37;45m", b"\x1b[1;30;46m",
b"\x1b[1;30;47m"]
Color_Code_Reset = b"\x1b[0m"
# Read buffer size
READ_BUF_SIZE = 4096
# Newline Substitution tables
RX_Newline_Sub = {'raw': None, 'cr': b"\r", 'crlf': b"\r\n", 'lf': b"\n", 'crorlf': b"\r|\n"}
TX_Newline_Sub = {'raw': None, 'cr': b"\r", 'crlf': b"\r\n", 'lf': b"\n", 'none': b""}
###############################################################################
### Serial Helper Functions
###############################################################################
def serial_open(device_path, baudrate, databits, stopbits, parity, flow_control):
# Open the tty device
try:
fd = os.open(device_path, os.O_RDWR | os.O_NOCTTY)
except OSError as err:
raise Exception("%s" % str(err))
# Get current termios attributes
# [iflag, oflag, cflag, lflag, ispeed, ospeed, cc]
try:
tty_attr = termios.tcgetattr(fd)
except termios.error as err:
raise Exception("Getting serial port options: %s" % str(err))
######################################################################
### cflag, ispeed, ospeed
######################################################################
# Reset cflag: Enable receiver, ignore modem control lines
tty_attr[2] = (termios.CREAD | termios.CLOCAL)
# Look up the termios baudrate and set it in the attributes structure
# tty_attr[cflag], tty_attr[ispeed], tty_attr[ospeed]
termios_baudrates = {
50: termios.B50, 75: termios.B75, 110: termios.B110, 134: termios.B134,
150: termios.B150, 200: termios.B200, 300: termios.B300,
600: termios.B600, 1200: termios.B1200, 1800: termios.B1800,
2400: termios.B2400, 4800: termios.B4800, 9600: termios.B9600,
19200: termios.B19200, 38400: termios.B38400, 57600: termios.B57600,
115200: termios.B115200, 230400: termios.B230400,
# Linux baudrates bits missing in termios module included below
460800: 0x1004, 500000: 0x1005, 576000: 0x1006,
921600: 0x1007, 1000000: 0x1008, 1152000: 0x1009,
1500000: 0x100A, 2000000: 0x100B, 2500000: 0x100C,
3000000: 0x100D, 3500000: 0x100E, 4000000: 0x100F,
}
if baudrate in termios_baudrates:
tty_attr[2] |= termios_baudrates[baudrate]
tty_attr[4] = termios_baudrates[baudrate]
tty_attr[5] = termios_baudrates[baudrate]
else:
# Set alternate speed via BOTHER (=0x1000) cflag,
# Pass baudrate directly in ispeed, ospeed
tty_attr[2] |= 0x1000
tty_attr[4] = baudrate
tty_attr[5] = baudrate
# Look up and set the appropriate cflag bits in termios_options for a given
# option
def termios_cflag_map_and_set(termios_options, option, errmsg):
if not option in termios_options:
raise ValueError(errmsg)
tty_attr[2] |= termios_options[option]
# Look up the termios data bits and set it in the attributes structure
termios_databits = {5: termios.CS5, 6: termios.CS6, 7: termios.CS7, 8: termios.CS8}
termios_cflag_map_and_set(termios_databits, databits, "Invalid tty databits!")
# Look up the termios parity and set it in the attributes structure
termios_parity = {"none": 0, "even": termios.PARENB, "odd": termios.PARENB | termios.PARODD}
termios_cflag_map_and_set(termios_parity, parity, "Invalid tty parity!")
# Look up the termios stop bits and set it in the attributes structure
termios_stopbits = {1: 0, 2: termios.CSTOPB}
termios_cflag_map_and_set(termios_stopbits, stopbits, "Invalid tty stop bits!")
# Look up the termios flow control and set it in the attributes structure
termios_flowcontrol = {"none": 0, "rtscts": termios.CRTSCTS, "xonxoff": 0}
termios_cflag_map_and_set(termios_flowcontrol, flow_control, "Invalid tty flow control!")
######################################################################
### lflag
######################################################################
# Turn off signals generated for special characters, turn off canonical
# mode so we can have raw input -- tty_attr[lflag]
tty_attr[3] = 0
######################################################################
### oflag
######################################################################
# Turn off POSIX defined output processing and character mapping/delays
# so we can have raw output -- tty_attr[oflag]
tty_attr[1] = 0
######################################################################
### iflag
######################################################################
# Ignore break characters -- tty_attr[iflag]
tty_attr[0] = termios.IGNBRK
# Enable parity checking if we are using parity -- tty_attr[iflag]
if parity != "none":
tty_attr[0] |= (termios.INPCK | termios.ISTRIP)
# Enable XON/XOFF if we are using software flow control
if flow_control == "xonxoff":
tty_attr[0] |= (termios.IXON | termios.IXOFF | termios.IXANY)
# Set new termios attributes
try:
termios.tcsetattr(fd, termios.TCSANOW, tty_attr)
except termios.error as err:
raise Exception("Setting serial port options: %s" % str(err))
# Return the fd
return fd
def serial_close(fd):
os.close(fd)
###############################################################################
### TTY Helper Functions
###############################################################################
def stdin_raw_open(echo):
fd = sys.stdin.fileno()
# If stdin is not a TTY (e.g. is a pipe), we don't need to reconfigure the
# TTY
if not os.isatty(fd):
return fd
# Get the current stdin tty options
# [iflag, oflag, cflag, lflag, ispeed, ospeed, cc]
try:
stdin_attr = termios.tcgetattr(fd)
except termios.error as err:
raise Exception("Getting stdin tty options: %s" % str(err))
# Disable canonical input, so we can send characters without a line
# feed, disable signal interpretation, and disable echo
# -- stdin_attr[cflag]
stdin_attr[3] &= ~(termios.ICANON | termios.ECHO | termios.ECHOE | termios.ISIG)
# Enable echo if needed
if echo:
stdin_attr[3] |= termios.ECHO
# Turn off XON/XOFF interpretation so they pass through to the serial
# port -- stdin_attr[iflag]
stdin_attr[0] &= ~(termios.IXON | termios.IXOFF | termios.IXANY)
# Set the new stdin tty attributes
try:
termios.tcsetattr(fd, termios.TCSANOW, stdin_attr)
except termios.error as err:
raise Exception("Setting stdin tty options: %s" % str(err))
return fd
def stdout_raw_open():
# Re-open stdout in unbuffered mode
sys.stdout = os.fdopen(sys.stdout.fileno(), 'wb', 0)
return sys.stdout.fileno()
def stdin_reset():
fd = sys.stdin.fileno()
# If stdin is not a TTY (e.g. is a pipe), we don't need to reconfigure the
# TTY
if not os.isatty(fd):
return fd
# Reset stdin terminal for canonical input, echo, and signals
# Get the current stdin tty options
# [iflag, oflag, cflag, lflag, ispeed, ospeed, cc]
try:
stdin_attr = termios.tcgetattr(fd)
except termios.error as err:
raise Exception("Getting stdin tty options: %s" % str(err))
# Enable canonical input, echo, signals -- stdin_attr[cflag]
stdin_attr[3] |= (termios.ICANON | termios.ECHO | termios.ECHOE | termios.ISIG)
# Re-enable XON/XOFF interpretation -- stdin_attr[iflag]
stdin_attr[0] |= (termios.IXON | termios.IXOFF | termios.IXANY)
# Set the new stdin tty attributes
try:
termios.tcsetattr(fd, termios.TCSANOW, stdin_attr)
except termios.error as err:
raise Exception("Setting stdin tty options: %s" % str(err))
###############################################################################
### Input Processors
###############################################################################
def input_processor_newline(sub):
# Convert constants to byte strings
linesep = os.linesep.encode()
# Substitute console newline in buf with sub
def f(buf):
# FIXME: This assumes a single character platform newline.
return buf.replace(linesep, sub)
return f
def input_processor_hexadecimal():
# Convert constants to byte strings
hexdigits = string.hexdigits.encode()
# State to keep track of consecutive hex characters
state = [b""]
# Interpret hexadecimal characters in buf
def f(buf):
nbuf = b""
for i,_ in enumerate(buf):
c = buf[i:i+1]
# Accumulate hex digits in state buffer, and reset the state if we
# encounter a non-hex character
if c in hexdigits:
state[0] += c
else:
state[0] = b""
# Convert 2 consecutive hex characters
if len(state[0]) == 2:
nbuf += bytes(bytearray([int(state[0], 16)]))
state[0] = b""
return nbuf
return f
###############################################################################
### Output Processors
###############################################################################
def output_processor_newline(sub):
# Convert constants to byte strings
linesep = os.linesep.encode()
# State to keep track of cut-off newline sequences
state = [b""]
# Substitute sub in buf with console newline
def f(buf):
# Append our left-over newline character match from before
buf = state[0] + buf
state[0] = b""
# Substitute newline matches with console newline
buf = re.sub(sub, linesep, buf)
# If the last character is a part of a match, chop it off and save it
# for later
if len(buf) > 0 and buf[-1] == sub[0]:
state[0] = buf[-1:]
buf = buf[:-1]
return buf
return f
def output_processor_raw(color_chars=b''):
# If we're not color coding
if len(color_chars) == 0:
# Identity function
def f(buf):
return buf
return f
else:
# Color code characters in buf
def f(buf):
# Unfortunately, we can't do a global regex substitution on data with
# its color-coded version, since we could have potentially selected
# color code characters that are present in the ANSI color escape
# sequences in subsequent substitutions. So we operate on the data a
# character at time here.
nbuf = b""
for i,_ in enumerate(buf):
c = buf[i:i+1]
if c in color_chars:
nbuf += Color_Codes[color_chars.index(c)] + c + Color_Code_Reset
else:
nbuf += c
return nbuf
return f
def output_processor_hexadecimal(color_chars=b'', interpret_newlines=False):
# Convert constants to byte strings
linesep = os.linesep.encode()
# State to keep track of our x position
state = [0]
# Format buffer into 2-column hexadecimal representation, with optional
# color coding and newline interpretation.
def f(buf):
nbuf = b""
for i,_ in enumerate(buf):
c = buf[i:i+1]
# Color code this character if it's in our color chars dictionary
if len(color_chars) > 0 and c in color_chars:
nbuf += Color_Codes[color_chars.index(c)] + ("%02x" % ord(c)).encode() + Color_Code_Reset
else:
nbuf += ("%02x" % ord(c)).encode()
state[0] += 1
# Pretty print into two columns
if state[0] == Hexadecimal_Columns/2:
nbuf += b" "
elif state[0] == Hexadecimal_Columns:
nbuf += linesep
state[0] = 0
else:
nbuf += b" "
# Insert a newline if we encounter one and we're interpreting them
# FIXME: This assumes a single character platform newline.
if interpret_newlines and c == linesep:
nbuf += linesep
state[0] = 0
return nbuf
return f
def output_processor_split(color_chars=b'', partial_lines=True):
# Convert constants to byte strings
linesep = os.linesep.encode()
printable_characters = (string.ascii_letters + string.digits + string.punctuation + " ").encode()
# Helper function to format one line of split hexadecimal/ASCII
# representation with optional color coding.
def format_split_line(buf):
nbuf = b""
# Format the hexadecimal representation
for i,_ in enumerate(buf):
c = buf[i:i+1]
# Color code this character if it's in our color chars
if len(color_chars) > 0 and c in color_chars:
nbuf += Color_Codes[color_chars.index(c)] + ("%02x" % ord(c)).encode() + Color_Code_Reset
else:
nbuf += ("%02x" % ord(c)).encode()
# Pretty print into two columns
if (i+1) == Hexadecimal_Columns/2:
nbuf += b" "
else:
nbuf += b" "
# Format hexadecimal column blank spaces
if len(buf) < Hexadecimal_Columns/2:
# Account for the pretty print column separator
nbuf += b" " + b" "*(3*(Hexadecimal_Columns-len(buf)))
else:
nbuf += b" "*(3*(Hexadecimal_Columns-len(buf)))
# Format the ASCII representation
nbuf += b" |"
for i,_ in enumerate(buf):
c = buf[i:i+1]
# Use the character if it's an ASCII printable character, otherwise use
# a dot
if c in printable_characters:
d = buf[i:i+1]
else:
d = b"."
# Color code this character if it's in our color chars
if len(color_chars) > 0 and c in color_chars:
nbuf += Color_Codes[color_chars.index(c)] + d + Color_Code_Reset
else:
nbuf += d
# Format ASCII column blank spaces
if len(buf) < Hexadecimal_Columns:
nbuf += b" "*(Hexadecimal_Columns-len(buf))
nbuf += b"|"
return nbuf
# State to keep track of bytes on the current line
state = [b""]
# Format buf into a split hexadecimal/ASCII representation, with optional
# color coding.
def f(buf):
if len(buf) == 0:
return b""
nbuf = b""
state[0] += buf
# Erase current partial line with \r
if partial_lines and len(state[0]) > 0:
nbuf += b"\r"
# Process one full line at a time
for i in range(0, len(state[0]), Hexadecimal_Columns):
line = state[0][i:i+Hexadecimal_Columns]
if len(line) < Hexadecimal_Columns and partial_lines:
nbuf += format_split_line(line)
elif len(line) == Hexadecimal_Columns:
nbuf += format_split_line(line)
nbuf += linesep
# Remove processed full lines from our state
state[0] = state[0][len(state[0])-(len(state[0]) % Hexadecimal_Columns):len(state[0])]
return nbuf
return f
###############################################################################
### Main Read/Write Loop
###############################################################################
def read_write_loop(serial_fd, stdin_fd, stdout_fd):
### Prepare our input pipeline
input_pipeline = []
# Hexadecimal interpretation
if Format_Options['input_mode'] == "hex":
input_pipeline.append(input_processor_hexadecimal())
# Transmit newline substitution
if TX_Newline_Sub[Format_Options['transmit_newline']] is not None:
input_pipeline.append(input_processor_newline(TX_Newline_Sub[Format_Options['transmit_newline']]))
### Prepare our output pipeline
output_pipeline = []
# Receive newline substitution
if RX_Newline_Sub[Format_Options['receive_newline']] is not None:
output_pipeline.append(output_processor_newline(RX_Newline_Sub[Format_Options['receive_newline']]))
# Raw mode
if Format_Options['output_mode'] == 'raw':
output_pipeline.append(output_processor_raw(Format_Options['color_chars']))
# Split mode
elif Format_Options['output_mode'] == 'split':
output_pipeline.append(output_processor_split(Format_Options['color_chars']))
# Split full mode
elif Format_Options['output_mode'] == 'splitfull':
output_pipeline.append(output_processor_split(Format_Options['color_chars'], partial_lines=False))
# Hexadecimal mode
elif Format_Options['output_mode'] == 'hex':
output_pipeline.append(output_processor_hexadecimal(Format_Options['color_chars']))
# Hexadecimal with newlines mode
elif Format_Options['output_mode'] == 'hexnl':
output_pipeline.append(output_processor_hexadecimal(Format_Options['color_chars'], interpret_newlines=True))
# Select between serial port and stdin file descriptors
read_fds = [serial_fd, stdin_fd]
while True:
ready_read_fds, _, _ = select.select(read_fds, [], [])
if stdin_fd in ready_read_fds:
# Read a buffer from stdin
try:
buf = os.read(stdin_fd, READ_BUF_SIZE)
except Exception as err:
raise Exception("Error reading stdin: %s\n" % str(err))
# If we detect the escape character, quit
if Quit_Escape_Character in buf:
break
# Process the buffer through our input pipeline
for f in input_pipeline:
buf = f(buf)
# Write the buffer to the serial port
try:
os.write(serial_fd, buf)
except Exception as err:
raise Exception("Error writing to serial port: %s\n" % str(err))
if serial_fd in ready_read_fds:
# Read a buffer from the serial port
try:
buf = os.read(serial_fd, READ_BUF_SIZE)
except Exception as err:
raise Exception("Error reading serial port: %s\n" % str(err))
# Break if we hit EOF
if len(buf) == 0:
break
# Process the buffer through our output pipeline
for f in output_pipeline:
buf = f(buf)
# Write the buffer to stdout
try:
os.write(stdout_fd, buf)
except Exception as err:
raise Exception("Error writing to stdout: %s\n" % str(err))
###############################################################################
### Command-Line Options Parsing and Help
###############################################################################
def print_usage():
print("Usage: %s [options] <serial port device>\n"\
"\n"\
"ssterm - simple serial-port terminal\n"\
"https://github.com/vsergeev/ssterm\n"\
"\n"\
"Serial Port Options:\n"\
" -b, --baudrate <rate> Specify baudrate: e.g. 9600, 115200, etc.\n"\
" -d, --databits <number> Specify number of data bits: 5, 6, 7, 8\n"\
" -p, --parity <type> Specify parity: none, odd, even\n"\
" -t, --stopbits <number> Specify number of stop bits: 1, 2\n"\
" -f, --flow-control <type> Specify flow control: none, rtscts, xonxoff\n"\
"\n"\
"Output Formatting Options:\n"\
" -o, --output <mode> Specify output mode\n"\
" raw raw (default)\n"\
" split hex./ASCII split\n"\
" splitfull hex./ASCII split with full lines\n"\
" hex hex.\n"\
" hexnl hex. with newlines\n"\
"\n"\
" --rx-nl <substitution> Enable substitution of the specified newline\n"\
" for the system's newline upon reception\n"\
" cr, lf, crlf, crorlf\n"\
"\n"\
" -c, --color <list> Specify comma-delimited list of characters in\n"\
" ASCII or hex. to color code: A,$,0x0d,0x0a,...\n"\
"\n"\
"Input Formatting Options:\n"\
" -i, --input <mode> Specify input mode\n"\
" raw raw (default)\n"\
" hex hex. interpretation\n"\
"\n"\
" --tx-nl <substitution> Enable substitution of the system's newline\n"\
" for the specified newline upon transmission\n"\
" none, cr, lf, crlf\n"\
"\n"\
" -e, --echo Enable local character echo\n"\
"\n"\
"Miscellaneous:\n"\
" -h, --help Display this usage/help\n"\
" -v, --version Display the program's version\n\n"\
"Quit Escape Character: Ctrl-]\n"\
"\n"\
"Default Options:\n"\
" baudrate: 115200 | databits: 8 | parity: none | stopbits: 1 | flowctrl: none\n"\
" output mode: raw | rx newline: raw | color code: none\n"\
" input mode: raw | tx newline: raw | local echo: off" % sys.argv[0])
def print_version():
print("ssterm version 3.0.0")
def main():
# Parse options
try:
options, args = getopt.gnu_getopt(sys.argv[1:], "b:d:p:t:f:o:c:i:ehv", ["baudrate=", "databits=", "parity=", "stopbits=", "flow-control=", "output=", "color=", "rx-nl=", "input=", "tx-nl=", "echo", "help", "version"])
except getopt.GetoptError as err:
print(str(err), "\n")
print_usage()
sys.exit(-1)
# Update options containers
for opt, opt_arg in options:
# Serial port options
if opt in ("-b", "--baudrate"):
try:
TTY_Options['baudrate'] = int(opt_arg, 10)
except ValueError:
sys.stderr.write("Error: Invalid tty baudrate!\n")
sys.exit(-1)
elif opt in ("-d", "--databits"):
try:
TTY_Options['databits'] = int(opt_arg, 10)
except ValueError:
sys.stderr.write("Error: Invalid tty data bits!\n")
sys.exit(-1)
elif opt in ("-p", "--parity"):
TTY_Options['parity'] = opt_arg
elif opt in ("-t", "--stopbits"):
try:
TTY_Options['stopbits'] = int(opt_arg, 10)
except ValueError:
sys.stderr.write("Error: Invalid tty stop bits!\n")
sys.exit(-1)
elif opt in ("-f", "--flow-control"):
TTY_Options['flow_control'] = opt_arg
# Output Formatting Options
elif opt in ("-o", "--output"):
if not opt_arg in ["raw", "split", "splitfull", "hex", "hexnl"]:
sys.stderr.write("Error: Invalid output mode!\n")
print_usage()
sys.exit(-1)
Format_Options['output_mode'] = opt_arg
elif opt == "--tx-nl":
if not opt_arg in TX_Newline_Sub:
sys.stderr.write("Error: Invalid tx newline substitution!\n")
print_usage()
sys.exit(-1)
Format_Options['transmit_newline'] = opt_arg
elif opt in ("-c", "--color"):
opt_arg = [x for x in opt_arg.split(",") if len(x) >= 1]
if len(opt_arg) > len(Color_Codes):
sys.stderr.write("Error: Maximum number of color code characters (%d) exceeded!\n" % len(Color_Codes))
sys.exit(-1)
# Parse ASCII and hex encoded characters into our color_chars list
for c in opt_arg:
# ASCII character
if len(c) == 1:
Format_Options['color_chars'] += c.encode()
# Hexadecimal number
elif len(c) > 2 and c[0:2] == "0x":
try:
c_int = int(c, 16)
except ValueError:
sys.stderr.write("Error: Unknown color code character: \"%s\"\n" % c)
sys.exit(-1)
Format_Options['color_chars'] += bytes(bytearray([c_int]))
# Unknown
else:
sys.stderr.write("Error: Unknown color code character: \"%s\"\n" % c)
sys.exit(-1)
# Input Formatting Options
elif opt in ("-i", "--input"):
if not opt_arg in ["raw", "hex"]:
sys.stderr.write("Error: Invalid input mode!\n")
print_usage()
sys.exit(-1)
Format_Options['input_mode'] = opt_arg
elif opt == "--rx-nl":
if not opt_arg in RX_Newline_Sub:
sys.stderr.write("Error: Invalid rx newline substitution!\n")
print_usage()
sys.exit(-1)
Format_Options['receive_newline'] = opt_arg
elif opt in ("-e", "--echo"):
Format_Options['echo'] = True
# Miscellaneous Options
elif opt in ("-h", "--help"):
print_usage()
sys.exit(0)
elif opt in ("-v", "--version"):
print_version()
sys.exit(0)
# Make sure a serial port device is specified
if len(args) < 1:
print_usage()
sys.exit(-1)
# Open the serial port with our options
try:
serial_fd = serial_open(args[0], TTY_Options['baudrate'], TTY_Options['databits'], TTY_Options['stopbits'], TTY_Options['parity'], TTY_Options['flow_control'])
except Exception as err:
sys.stderr.write("Error opening serial port: %s\n" % str(err))
sys.exit(-1)
# Open stdin in raw mode
try:
stdin_fd = stdin_raw_open(Format_Options['echo'])
except Exception as err:
sys.stderr.write("Error opening stdin in raw mode: %s\n" % str(err))
sys.exit(-1)
# Open stdout in raw mode
try:
stdout_fd = stdout_raw_open()
except Exception as err:
sys.stderr.write("Error opening stdout in raw mode: %s\n" % str(err))
sys.exit(-1)
# Enter main read/write loop
try:
read_write_loop(serial_fd, stdin_fd, stdout_fd)
except Exception as err:
sys.stderr.write("Error: %s\n" % str(err))
raise
# Reset stdin to buffered mode
try:
stdin_reset()
except Exception as err:
sys.stderr.write("Error resetting stdin to buffered mode: %s\n" % str(err))
sys.exit(-1)
# Close the serial port
try:
serial_close(serial_fd)
except Exception as err:
sys.stderr.write("Error closing serial port: %s\n" % str(err))
sys.exit(-1)
if __name__ == '__main__':
main()
|
|
# coding: utf-8
import os
import socket
import gzip
from gppylib.commands.base import Command, REMOTE, WorkerPool, CommandResult
from gppylib.db import dbconn
from gppylib.test.behave_utils.utils import getRows, validate_parse_email_file
from gppylib.gparray import GpArray
from gppylib.operations.unix import CheckFile
from gppylib.test.behave_utils.utils import backup_data_to_file, check_table_exists, validate_restore_data_in_file
master_data_dir = os.environ.get('MASTER_DATA_DIRECTORY')
comment_start_expr = '-- '
comment_expr = '-- Name: '
comment_data_expr_a = '-- Data: '
comment_data_expr_b = '-- Data for Name: '
len_start_comment_expr = len(comment_start_expr)
@given('the user locks "{table_name}" in "{lock_mode}" using connection "{conn}" on "{dbname}"')
@when('the user locks "{table_name}" in "{lock_mode}" using connection "{conn}" on "{dbname}"')
@then('the user locks "{table_name}" in "{lock_mode}" using connection "{conn}" on "{dbname}"')
def impl(context, table_name, lock_mode, conn, dbname):
query = "begin; lock table %s in %s" % (table_name, lock_mode)
conn = dbconn.connect(dbconn.DbURL(dbname=dbname)) # todo not truthful about using conn parameter
dbconn.execSQL(conn, query)
context.conn = conn
@when('the user runs the query "{query}" in database "{dbname}" in a worker pool "{poolname}" as soon as pg_class is locked')
@then('the user runs the query "{query}" in database "{dbname}" in a worker pool "{poolname}" as soon as pg_class is locked')
def impl(context, query, dbname, poolname):
pool = WorkerPool(numWorkers=1)
cmd = on_unlock(query,dbname)
pool.addCommand(cmd)
if not hasattr(context, 'pool'):
context.pool = {}
context.pool[poolname] = pool
context.cmd = cmd
@when('the user runs the "{cmd}" in a worker pool "{poolname}"')
@then('the user runs the "{cmd}" in a worker pool "{poolname}"')
def impl(context, cmd, poolname):
command = Command(name='run gpcrondump in a separate thread', cmdStr=cmd)
pool = WorkerPool(numWorkers=1)
pool.addCommand(command)
if not hasattr(context, 'pool'):
context.pool = {}
context.pool[poolname] = pool
context.cmd = cmd
class on_unlock(Command):
def __init__(self, query, dbname):
self.dbname = dbname
self.query = query
self.result = 1
self.completed = False
self.halt = False
Command.__init__(self, 'on unlock', 'on unlock', ctxt=None, remoteHost=None)
def get_results(self):
return CommandResult(self.result, '', '', self.completed, self.halt)
def run(self):
while check_pg_class_lock(self.dbname) != 1:
pass
with dbconn.connect(dbconn.DbURL(dbname=self.dbname)) as conn:
dbconn.execSQL(conn, self.query)
self.result = 0
self.completed = True
self.halt = False
def check_pg_class_lock(dbname):
seg_count = 1
query = """select count(*)
from pg_locks
where relation in (select oid from pg_class where relname='pg_class')
and locktype='relation' and mode='ExclusiveLock'"""
row_count = getRows(dbname, query)[0][0]
return row_count
@given('the "{backup_pg}" has a lock on the pg_class table in "{dbname}"')
@when('the "{backup_pg}" has a lock on the pg_class table in "{dbname}"')
@then('the "{backup_pg}" has a lock on the pg_class table in "{dbname}"')
def impl(context, dbname, backup_pg):
seg_count = 1
timeout = 2
while timeout > 0:
row_count = check_pg_class_lock(dbname)
time.sleep(1)
timeout -= 1
if row_count != seg_count:
raise Exception("Incorrect (number of) lock/locks on pg_class, expected count = %s, received count = %s" % (seg_count, row_count))
@then('the worker pool "{poolname}" is cleaned up')
@when('the worker pool "{poolname}" is cleaned up')
def impl(context, poolname):
pool = context.pool[poolname]
if pool:
pool.join()
for c in pool.getCompletedItems():
result = c.get_results()
context.ret_code = result.rc
context.stdout_message = result.stdout
context.error_message = result.stderr
pool.haltWork()
pool.joinWorkers()
else:
raise Exception('Worker pool is None.Probably behave step to initialize the worker pool is missing.')
@given('the user drops "{tablename}" in "{dbname}" in a worker pool "{poolname}"')
@then('the user drops "{tablename}" in "{dbname}" in a worker pool "{poolname}"')
@when('the user drops "{tablename}" in "{dbname}" in a worker pool "{poolname}"')
def impl(context, tablename, dbname, poolname):
pool = WorkerPool(numWorkers=1)
cmd = Command(name='drop a table in a worker pool', cmdStr="""psql -c "DROP TABLE %s" -d %s""" % (tablename, dbname))
pool.addCommand(cmd)
if not hasattr(context, 'pool'):
context.pool = {}
context.pool[poolname] = pool
@given('the user closes the connection "{conn_name}"')
@when('the user closes the connection "{conn_name}"')
@then('the user closes the connection "{conn_name}"')
def impl(context, conn_name):
query = """ROLLBACK;"""
dbconn.execSQL(context.conn, query)
context.conn.close()
@given('verify that "{backup_pg}" has no lock on the pg_class table in "{dbname}"')
@when('verify that "{backup_pg}" has no lock on the pg_class table in "{dbname}"')
@then('verify that "{backup_pg}" has no lock on the pg_class table in "{dbname}"')
def impl(context, backup_pg, dbname):
query = """select count(*)
from pg_locks
where relation in (select oid from pg_class where relname='pg_class')
and locktype='relation' and mode='ExclusiveLock'"""
row_count = getRows(dbname, query)[0][0]
if row_count != 0:
raise Exception("Found a ExclusiveLock on pg_class")
@given('there is a "{tabletype}" table "{table_name}" with compression "{compression_type}" in "{dbname}" with data and {rowcount} rows')
@when('there is a "{tabletype}" table "{table_name}" with compression "{compression_type}" in "{dbname}" with data and {rowcount} rows')
@then('there is a "{tabletype}" table "{table_name}" with compression "{compression_type}" in "{dbname}" with data and {rowcount} rows')
def impl(context, tabletype, table_name, compression_type, dbname, rowcount):
populate_regular_table_data(context, tabletype, table_name, compression_type, dbname, int(rowcount))
@given('verify the metadata dump file syntax under "{directory}" for comments and types')
@when('verify the metadata dump file syntax under "{directory}" for comments and types')
@then('verify the metadata dump file syntax under "{directory}" for comments and types')
def impl(context, directory):
names = ["Name", "Data", "Data for Name"]
types = ["TABLE", "TABLE DATA", "EXTERNAL TABLE", "ACL", "CONSTRAINT", "COMMENT", "PROCEDURAL LANGUAGE", "SCHEMA", "AOSTORAGEOPTS"]
master_dump_dir = directory if len(directory.strip()) != 0 else master_data_dir
metadata_path = __get_dump_metadata_path(context, master_dump_dir)
# gzip in python 2.6 does not support __exit__, so it cannot be used in "with"
# with gzip.open(metadata_path, 'r') as fd:
fd = None
try:
fd = gzip.open(metadata_path, 'r')
line = None
for line in fd:
if (line[:3] == comment_start_expr):
if (line.startswith(comment_expr) or line.startswith(comment_data_expr_a) or line.startswith(comment_data_expr_b)):
name_k, type_k, schema_k = get_comment_keys(line)
if (name_k not in names and type_k != "Type" and schema_k != "Schema"):
raise Exception("Unknown key in the comment line of the metdata_file '%s'. Please check and confirm if the key is correct" % (metadata_file))
name_v, type_v, schema_v = get_comment_values(line)
if (type_v not in types):
raise Exception("Value of Type in the comment line '%s' of the metadata_file '%s' does not fall under the expected list %s. Please check if the value is correct" %(type_v, metadata_file, types))
if not line:
raise Exception('Metadata file has no data')
finally:
if fd:
fd.close()
@given('verify the metadata dump file does not contain "{target}"')
@when('verify the metadata dump file does not contain "{target}"')
@then('verify the metadata dump file does not contain "{target}"')
def impl(context, target):
metadata_path = __get_dump_metadata_path(context, master_data_dir)
fd = None
try:
fd = gzip.open(metadata_path, 'r')
line = None
for line in fd:
if target in line:
raise Exception("Unexpectedly found %s in metadata file %s" % (target, metadata_path))
if not line:
raise Exception('Metadata file has no data')
finally:
if fd:
fd.close()
@given('verify the metadata dump file does contain "{target}"')
@when('verify the metadata dump file does contain "{target}"')
@then('verify the metadata dump file does contain "{target}"')
def impl(context, target):
metadata_path = __get_dump_metadata_path(context, master_data_dir)
fd = None
try:
fd = gzip.open(metadata_path, 'r')
line = None
for line in fd:
if target in line:
return
if not line:
raise Exception('Metadata file has no data')
raise Exception("Missing text %s in metadata file %s" % (target, metadata_path))
finally:
if fd:
fd.close()
def __get_dump_metadata_path(context, dump_dir):
filename = "gp_dump_1_1_%s.gz" % context.backup_timestamp
metadata_path = os.path.join(dump_dir, "db_dumps", context.backup_timestamp[0:8], filename)
return metadata_path
def get_comment_keys(line):
try:
temp = line[len_start_comment_expr:]
tokens = temp.strip().split(';')
name = tokens[0].split(':')[0].strip()
type = tokens[1].split(':')[0].strip()
schema = tokens[2].split(':')[0].strip()
except:
return (None, None, None)
return (name, type, schema)
def get_comment_values(line):
try:
temp = line[len_start_comment_expr:]
tokens = temp.strip().split(';')
name = tokens[0].split(':')[1].strip()
type = tokens[1].split(':')[1].strip()
schema = tokens[2].split(':')[1].strip()
except:
return (None, None, None)
return (name, type, schema)
@given('{command} should print {out_msg} to stdout {num} times')
@when('{command} should print {out_msg} to stdout {num} times')
@then('{command} should print {out_msg} to stdout {num} times')
def impl(context, command, out_msg, num):
msg_list = context.stdout_message.split('\n')
msg_list = [x.strip() for x in msg_list]
count = msg_list.count(out_msg)
if count != int(num):
raise Exception("Expected %s to occur %s times. Found %d" % (out_msg, num, count))
@given('verify that {filetype} file is generated in {dir}')
@when('verify that {filetype} file is generated in {dir}')
@then('verify that {filetype} file is generated in {dir}')
def impl(context, filetype, dir):
if dir == 'master_data_directory':
dir = master_data_dir
if filetype == 'report':
filename = '%s/gp_restore_%s.rpt' % (dir, context.backup_timestamp)
if not os.path.isfile(filename):
raise Exception('Report file %s is not present in master data directory' % filename)
elif filetype == 'status':
gparray = GpArray.initFromCatalog(dbconn.DbURL())
if dir == 'segment_data_directory':
primary_segs = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary()]
for seg in primary_segs:
host = seg.getSegmentHostName()
seg_data_dir = seg.getSegmentDataDirectory()
cmd = Command('check status file', "ls %s/gp_restore_status_*_%s" % (seg_data_dir, context.backup_timestamp), ctxt=REMOTE, remoteHost=host)
cmd.run(validateAfter=True)
results = cmd.get_results()
if not results.stdout.strip():
raise Exception('Status file ending with timestamp %s is not present in segment %s data directory' % (context.backup_timestamp, host))
else:
count = 0
primary_segs = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary()]
for seg in primary_segs:
host = seg.getSegmentHostName()
cmd = Command('check status file', "ls %s/gp_restore_status_*_%s" % (dir, context.backup_timestamp), ctxt=REMOTE, remoteHost=host)
cmd.run(validateAfter=True)
results = cmd.get_results()
if results.stdout.strip():
count += 1
else:
raise Exception('Status file not found in segment: %s' % host)
segs = len(primary_segs)
if count != segs:
raise Exception('Expected %d status file but found %d' % (segs, count))
@given('there are no {filetype} files in "{dir}"')
@when('there are no {filetype} files in "{dir}"')
@then('there are no {filetype} files in "{dir}"')
def impl(context, filetype, dir):
if filetype == 'report':
if dir == 'master_data_directory':
dir = master_data_dir
filenames = os.listdir(dir)
for filename in filenames:
if filename.startswith('gp_restore') and filename.endswith('.rpt'):
filename = '%s/%s' % (dir, filename)
os.remove(filename)
if filetype == 'status':
gparray = GpArray.initFromCatalog(dbconn.DbURL())
if dir == 'segment_data_directory':
primary_segs = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary()]
for seg in primary_segs:
host = seg.getSegmentHostName()
seg_data_dir = seg.getSegmentDataDirectory()
cmd = Command('remove status file', "rm -f %s/gp_restore_status_*" % (seg_data_dir), ctxt=REMOTE, remoteHost=host)
cmd.run(validateAfter=True)
else:
primary_segs = [seg for seg in gparray.getDbList() if seg.isSegmentPrimary()]
for seg in primary_segs:
host = seg.getSegmentHostName()
cmd = Command('remove status file', "rm -f %s/gp_restore_status_*" % dir, ctxt=REMOTE, remoteHost=host)
cmd.run(validateAfter=True)
@given('the mail_contacts file does not exist')
@then('the mail_contacts file does not exist')
def impl(context):
if "HOME" in os.environ:
home_mail_file = os.path.join(os.environ["HOME"], "mail_contacts")
if CheckFile(home_mail_file).run():
os.remove(home_mail_file)
if "GPHOME" in os.environ:
mail_file = os.path.join(os.environ["GPHOME"], "bin", "mail_contacts")
if CheckFile(mail_file).run():
os.remove(mail_file)
@given('the mail_contacts file exists')
def impl(context):
context.email_contact = "example_test@gopivotal.com"
if "HOME" in os.environ:
home_mail_file = os.path.join(os.environ["HOME"], "mail_contacts")
mail_contact = home_mail_file
elif "GPHOME" in os.environ:
mail_file = os.path.join(os.environ["GPHOME"], "bin", "mail_contacts")
mail_contact = mail_file
f = file(mail_contact, 'w+')
f.write(context.email_contact)
f.close
@given('the yaml file "{email_file_path}" stores email details is in proper format')
def impl(context, email_file_path):
try:
validate_parse_email_file(context, email_file_path)
except Exception as e:
raise Exception(str(e))
@given('the yaml file "{email_file_path}" stores email details is not in proper format')
def impl(context, email_file_path):
exception_raised = False
try:
validate_parse_email_file(context, email_file_path)
except Exception as e:
exception_raised = True
if exception_raised == False:
raise Exception("File is in proper format")
@then('verify that emails are sent to the given contacts with appropriate messages after backup of "{dblist}"')
def impl(context, dblist):
cmd_list = []
sending_email_list = []
database_list = dblist.split(',')
stdout = context.stdout_message
for line in stdout.splitlines():
if "Sending mail to" in line:
str = line.split(':-')[1]
sending_email_list.append(str.strip())
if "Email command string=" in line:
log_msg, delim, txt = line.partition('=')
cmd_list.append(txt.strip())
if len(sending_email_list) != len(database_list):
raise Exception("Emails are not sent properly")
count = 0
for dbname in database_list:
#expected email details
for email in context.email_details:
if dbname in email['DBNAME']:
expected_from = email['FROM']
expected_sub = email['SUBJECT']
else:
expected_sub = "Report from gpcrondump on host %s [COMPLETED]" % socket.gethostname()
#original email details
result_cmd = cmd_list[count]
str = result_cmd[result_cmd.find("-s")+4:]
result_sub = (str[:str.find('"')]).strip()
if expected_sub != result_sub:
raise Exception("Subject of the sent email is not correct")
if result_cmd.find("-- -f") >= 0:
result_from = result_cmd[result_cmd.find("-- -f")+6:]
if expected_from != result_from:
raise Exception("ef : RF", expected_from, result_from, count)
#raise Exception("Sender of the sent email is not correct")
count += 1
@then('gpcrondump should print unable to send dump email notification to stdout as warning')
def impl(context):
stdout = context.stdout_message
found = False
for line in stdout.splitlines():
if "Unable to send dump email notification" in line:
found = True
if found is False:
raise Exception("'Unable to send dump email notification' exception is not raised")
@then('verify that function is backedup correctly in "{dumpfile}"')
def impl(context, dumpfile):
buf = """CREATE ORDERED AGGREGATE agg_array(anyelement) (
SFUNC = array_append,
STYPE = anyarray,
INITCOND = '{}'
);"""
if not buf in open(dumpfile).read():
raise Exception("pg_dump did not backup aggregate functions correctly.")
@given('verify that a role "{role_name}" exists in database "{dbname}"')
@then('verify that a role "{role_name}" exists in database "{dbname}"')
def impl(context, role_name, dbname):
query = "select rolname from pg_roles where rolname = '%s'" % role_name
conn = dbconn.connect(dbconn.DbURL(dbname=dbname))
try:
result = getRows(dbname, query)[0][0]
if result != role_name:
raise Exception("Role %s does not exist in database %s." % (role_name, dbname))
except:
raise Exception("Role %s does not exist in database %s." % (role_name, dbname))
@given('there is a list of files "{filenames}" of tables "{table_list}" in "{dbname}" exists for validation')
@when('there is a list of files "{filenames}" of tables "{table_list}" in "{dbname}" exists for validation')
@then('there is a list of files "{filenames}" of tables "{table_list}" in "{dbname}" exists for validation')
def impl(context, filenames, table_list, dbname):
files = [f for f in filenames.split(',')]
tables = [t for t in table_list.split(',')]
for t,f in zip(tables,files):
backup_data_to_file(context, t, dbname, f)
@when('verify with backedup file "{filename}" that there is a "{table_type}" table "{tablename}" in "{dbname}" with data')
@then('verify with backedup file "{filename}" that there is a "{table_type}" table "{tablename}" in "{dbname}" with data')
def impl(context, filename, table_type, tablename, dbname):
if not check_table_exists(context, dbname=dbname, table_name=tablename, table_type=table_type):
raise Exception("Table '%s' does not exist when it should" % tablename)
validate_restore_data_in_file(context, tablename, dbname, filename)
|
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from acq4.util import Qt
import pyqtgraph as pg
from acq4.util.debug import Profiler
Ui_Form = Qt.importTemplate('.atlasCtrlTemplate')
class Atlas(Qt.QObject):
DBIdentity = None
"""An Atlas is responsible for determining the position of images, cells, scan data, etc relative
to a common coordinate system."""
def __init__(self, state=None):
Qt.QObject.__init__(self)
if state is not None:
self.restoreState(state)
def ctrlWidget(self, host):
raise Exception("Must be reimplemented in subclass.")
def mapToAtlas(self, obj):
"""Maps obj into atlas coordinates. Obj can be any object mappable by QMatrix4x4"""
raise Exception("Must be reimplemented in subclass.")
def getState(self):
raise Exception("Must be reimplemented in subclass.")
def setState(self, state):
raise Exception("Must be reimplemented in subclass.")
def restoreState(self, state):
raise Exception("Must be reimplemented in subclass.")
def name(self):
"""Returns the name of the atlas"""
raise Exception("Must be reimplemented in subclass.")
#def close(self):
#pass
class AtlasCtrlWidget(Qt.QWidget):
def __init__(self, atlas, host):
Qt.QWidget.__init__(self)
self.sliceDir = None
#self.blockUpdate = 0 ## used in CNAtlas to block re-rendering
self.atlas = atlas
self.host = host
self.canvas = host.getElement('Canvas')
self.dataManager = host.dataManager()
self.dataModel = self.dataManager.dataModel()
self.loader = host.getElement('File Loader')
self.loader.sigBaseChanged.connect(self.baseDirChanged)
self.ctrl = Qt.QWidget()
self.ui = Ui_Form()
self.ui.setupUi(self)
self.ui.setSliceBtn.clicked.connect(self.setSliceClicked)
self.ui.storeBtn.clicked.connect(self.storeBtnClicked)
#self.baseDirChanged()
## set up two tables for storing atlas positions of cells and stimulation sites
if atlas.DBIdentity == None:
raise Exception("Atlas needs to have a DBIdentity specified." )
tables = {
atlas.DBIdentity+"_cell": "%s_Cell" %atlas.name(),
atlas.DBIdentity+"_protocol": "%s_Protocol" %atlas.name(),
}
self.ui.dbWidget.setDataManager(self.dataManager)
self.ui.dbWidget.setTables(tables)
def loadState(self):
raise Exception("Must be re-implemented in subclass.")
def saveState(self):
raise Exception("Must be re-implemented in subclass.")
def generateDataArray(self, positions, dirType):
"""Return a tuple (data, fields). Data should be a record array with the column names/values to be stored.
Fields should be an OrderedDict of column names : sql datatype."""
raise Exception("Must be re-implemented in subclass")
def baseDirChanged(self):
## file loader base dir changed; if it s a slice, set it now.
try:
self.setSliceDir(self.loader.baseDir())
except:
pass
def setSliceClicked(self):
dh = self.loader.selectedFiles()
if len(dh) != 1:
raise Exception('Select a slice directory from the file tree.')
self.setSliceDir(dh[0])
def setSliceDir(self, dh):
if not dh.isDir() or not self.dataModel.dirType(dh) == 'Slice':
#self.sliceRoi.setVisible(False)
self.sliceDir = None
self.ui.sliceLabel.setText('None')
raise Exception('Selected file is not a slice directory')
self.sliceDir = dh
#self.sliceRoi.setVisible(True)
base = self.loader.baseDir()
if dh is base:
name = dh.shortName()
else:
name = dh.name(relativeTo=base)
self.ui.sliceLabel.setText(name)
if self.atlas.name() in dh.info().get('atlas', {}):
self.loadState()
#else:
# self.updateAtlas()
def storeBtnClicked(self):
self.ui.storeBtn.processing("Storing...")
try:
self.storeToDB()
self.ui.storeBtn.success("Stored!")
except:
self.ui.storeBtn.failure()
raise
def storeToDB(self):
## collect list of cells and scans under this slice,
## read all positions with userTransform corrections
prof = Profiler("Atlas.storeToDB", disabled=True)
loaded = self.host.getLoadedFiles()
cells = []
prots = []
for f in loaded:
if not f.isDir() or not f.isGrandchildOf(self.sliceDir):
continue
if self.dataModel.dirType(f) == 'Cell':
info = f.info()
if 'userTransform' not in info:
continue
cells.append((f, info['userTransform']['pos']))
elif self.dataModel.dirType(f) == 'Protocol':
info = f.info()
scanInfo = info.get('Scanner', None)
if scanInfo is None:
continue
tr = pg.SRTTransform(info.get('userTransform', None))
pos = tr.map(*scanInfo['position'])
prots.append((f, pos))
elif self.dataModel.dirType(f) == 'ProtocolSequence':
info = f.info()
tr = pg.SRTTransform(info.get('userTransform', None))
for subName in f.subDirs():
subf = f[subName]
scanInfo = subf.info().get('Scanner', None)
if scanInfo is None:
continue
pos = tr.map(*scanInfo['position'])
prots.append((subf, pos))
prof.mark("made list of positions")
for ident, dirType, positions in [('_cell', 'Cell', cells), ('_protocol', 'Protocol', prots)]:
## map positions, build data tables
data, fields = self.generateDataArray(positions, dirType)
prof.mark("got data arrays for %s" %dirType)
#dirColumn = dirType + 'Dir'
#data = np.empty(len(positions), dtype=[('SliceDir', object), (dirColumn, object), ('right', float), ('anterior', float), ('dorsal', float)])
#for i in range(len(positions)):
#dh, pos = positions[i]
#mapped = self.atlas.mapToAtlas(pg.Point(pos))
##print dh, pos
##print " right:", mapped.x()
##print " anter:", mapped.y()
##print " dorsl:", mapped.z()
#data[i] = (self.sliceDir, dh, mapped.x(), mapped.y(), mapped.z())
## write to DB
db = self.ui.dbWidget.getDb()
prof.mark('got db')
table = self.ui.dbWidget.getTableName(self.atlas.DBIdentity+ident)
prof.mark('got table')
#fields = collections.OrderedDict([
#('SliceDir', 'directory:Slice'),
#(dirColumn, 'directory:'+dirType),
#('right', 'real'),
#('anterior', 'real'),
#('dorsal', 'real'),
#])
## Make sure target table exists and has correct columns
db.checkTable(table, owner=self.atlas.DBIdentity+ident, columns=fields, create=True)
prof.mark('checked table')
## delete old -- This is the slow part!
old = db.select(table, where={'SliceDir':self.sliceDir}, toArray=True)
if old is not None: ## only do deleting if there is already data stored for this slice -- try to speed things up
for source in set(data[dirType+'Dir']):
if source in old[dirType+'Dir']: ## check that source is in the old data before we delete it - try to speed things up
db.delete(table, where={dirType+'Dir': source})
prof.mark('deleted old data')
## write new
db.insert(table, data)
prof.mark("added %s data to db" %dirType)
prof.finish()
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MapDefunOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from tensorflow.python.client import session
from tensorflow.python.data.experimental.ops import map_defun
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
@test_util.run_v1_only("b/123903858: Add eager and V2 test coverage")
class MapDefunTest(test_base.DatasetTestBase):
def testNoIntraOpLimit(self):
@function.defun(input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)])
def simple_fn(x):
return x * 2 + 3
nums = [[1, 2], [3, 4], [5, 6]]
elems = constant_op.constant(nums, dtype=dtypes.int32, name="data")
r = map_defun.map_defun(
simple_fn, [elems], [dtypes.int32], [(2,)],
max_intra_op_parallelism=0)[0]
expected = elems * 2 + 3
self.assertAllEqual(self.evaluate(r), self.evaluate(expected))
def testMapDefunSimple(self):
@function.defun(input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)])
def simple_fn(x):
return x * 2 + 3
nums = [[1, 2], [3, 4], [5, 6]]
elems = constant_op.constant(nums, dtype=dtypes.int32, name="data")
r = map_defun.map_defun(simple_fn, [elems], [dtypes.int32], [(2,)])[0]
expected = elems * 2 + 3
self.assertAllEqual(self.evaluate(r), self.evaluate(expected))
def testMapDefunMismatchedTypes(self):
@function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.int32)])
def fn(x):
return math_ops.cast(x, dtypes.float64)
nums = [1, 2, 3, 4, 5, 6]
elems = constant_op.constant(nums, dtype=dtypes.int32, name="data")
r = map_defun.map_defun(fn, [elems], [dtypes.int32], [()])[0]
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(r)
def testMapDefunReduceDim(self):
# Tests where the output has a different rank from the input
@function.defun(input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)])
def fn(x):
return array_ops.gather(x, 0)
nums = [[1, 2], [3, 4], [5, 6]]
elems = constant_op.constant(nums, dtype=dtypes.int32, name="data")
r = map_defun.map_defun(fn, [elems], [dtypes.int32], [()])[0]
expected = constant_op.constant([1, 3, 5])
self.assertAllEqual(self.evaluate(r), self.evaluate(expected))
def testMapDefunMultipleOutputs(self):
@function.defun(input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)])
def fn(x):
return (x, math_ops.cast(x * 2 + 3, dtypes.float64))
nums = [[1, 2], [3, 4], [5, 6]]
elems = constant_op.constant(nums, dtype=dtypes.int32, name="data")
r = map_defun.map_defun(fn, [elems], [dtypes.int32, dtypes.float64], [(2,),
(2,)])
expected = [elems, elems * 2 + 3]
self.assertAllEqual(self.evaluate(r), self.evaluate(expected))
def testMapDefunShapeInference(self):
@function.defun(input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)])
def fn(x):
return x
nums = [[1, 2], [3, 4], [5, 6]]
elems = constant_op.constant(nums, dtype=dtypes.int32, name="data")
result = map_defun.map_defun(fn, [elems], [dtypes.int32], [(2,)])[0]
self.assertEqual(result.get_shape(), (3, 2))
def testMapDefunPartialShapeInference(self):
@function.defun(input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)])
def fn(x):
return x
elems = array_ops.placeholder(dtypes.int64, (None, 2))
result = map_defun.map_defun(fn, [elems], [dtypes.int32], [(2,)])
self.assertEqual(result[0].get_shape().as_list(), [None, 2])
def testMapDefunRaisesErrorOnRuntimeShapeMismatch(self):
@function.defun(input_signature=[
tensor_spec.TensorSpec(None, dtypes.int32),
tensor_spec.TensorSpec(None, dtypes.int32)
])
def fn(x, y):
return x, y
elems1 = array_ops.placeholder(dtypes.int32)
elems2 = array_ops.placeholder(dtypes.int32)
result = map_defun.map_defun(fn, [elems1, elems2],
[dtypes.int32, dtypes.int32], [(), ()])
with self.cached_session() as sess:
with self.assertRaisesWithPredicateMatch(
errors.InvalidArgumentError,
"All inputs must have the same dimension 0."):
sess.run(result, feed_dict={elems1: [1, 2, 3, 4, 5], elems2: [1, 2, 3]})
def testMapDefunRaisesDefunError(self):
@function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.int32)])
def fn(x):
with ops.control_dependencies([check_ops.assert_equal(x, 0)]):
return array_ops.identity(x)
elems = constant_op.constant([0, 0, 0, 37, 0])
result = map_defun.map_defun(fn, [elems], [dtypes.int32], [()])
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(result)
def testMapDefunCancelledCorrectly(self):
@function.defun(input_signature=[tensor_spec.TensorSpec([5], dtypes.int64)])
def defun(x):
# x has leading dimension 5, this will raise an error
return array_ops.gather(x, 10)
c = array_ops.tile(
array_ops.expand_dims(
constant_op.constant([1, 2, 3, 4, 5], dtype=dtypes.int64), 0),
[100, 1])
map_defun_op = map_defun.map_defun(defun, [c], [dtypes.int64], [()])[0]
with self.assertRaisesRegexp(errors.InvalidArgumentError,
r"indices = 10 is not in \[0, 5\)"):
self.evaluate(map_defun_op)
def testMapDefunWithUnspecifiedOutputShape(self):
@function.defun(input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)])
def simple_fn(x):
res = x * 2 + 3
return (res, res + 1, res + 2)
nums = [[1, 2], [3, 4], [5, 6]]
elems = constant_op.constant(nums, dtype=dtypes.int32, name="data")
r = map_defun.map_defun(simple_fn, [elems],
[dtypes.int32, dtypes.int32, dtypes.int32],
[None, (None,), (2,)])
expected = elems * 2 + 3
self.assertAllEqual(self.evaluate(r[0]), self.evaluate(expected))
self.assertAllEqual(self.evaluate(r[1]), self.evaluate(expected + 1))
self.assertAllEqual(self.evaluate(r[2]), self.evaluate(expected + 2))
def testMapDefunWithDifferentOutputShapeEachRun(self):
@function.defun(
input_signature=[tensor_spec.TensorSpec(None, dtypes.int32)])
def simple_fn(x):
return x * 2 + 3
elems = array_ops.placeholder(dtypes.int32, name="data")
r = map_defun.map_defun(simple_fn, [elems], [dtypes.int32], [None])[0]
with session.Session() as sess:
self.assertAllEqual(sess.run(r, feed_dict={elems: [0]}), [3])
self.assertAllEqual(
sess.run(r, feed_dict={elems: [[0], [1]]}), [[3], [5]])
def testMapDefunWithWrongOutputShape(self):
@function.defun(input_signature=[tensor_spec.TensorSpec([2], dtypes.int32)])
def simple_fn(x):
return x * 2 + 3
nums = [[1, 2], [3, 4], [5, 6]]
elems = constant_op.constant(nums, dtype=dtypes.int32, name="data")
r = map_defun.map_defun(simple_fn, [elems], [dtypes.int32], [(1,)])[0]
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(r)
def testMapDefunWithInvalidInput(self):
@function.defun(
input_signature=[tensor_spec.TensorSpec(None, dtypes.int32)])
def simple_fn(x):
return x * 2
c = constant_op.constant(2)
with self.assertRaises(ValueError):
# Fails at graph construction time for inputs with known shapes.
r = map_defun.map_defun(simple_fn, [c], [dtypes.int32], [None])[0]
p = array_ops.placeholder(dtypes.int32)
r = map_defun.map_defun(simple_fn, [p], [dtypes.int32], [None])[0]
with session.Session() as sess:
with self.assertRaises(errors.InvalidArgumentError):
sess.run(r, feed_dict={p: 0})
def _assert_op_cancelled(self, sess, map_defun_op):
with self.assertRaisesRegexp(errors.CancelledError, "was cancelled"):
self.evaluate(map_defun_op)
def testMapDefunWithParentCancellation(self):
# Checks that a cancellation of the parent graph is threaded through to
# MapDefunOp correctly.
@function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.int32)])
def simple_fn(x):
del x
queue = data_flow_ops.FIFOQueue(10, dtypes.int32, ())
# Blocking
return queue.dequeue_many(5)
c = constant_op.constant([1, 2, 3, 4, 5])
map_defun_op = map_defun.map_defun(simple_fn, [c], [dtypes.int32], [()])[0]
with self.cached_session() as sess:
thread = self.checkedThread(
self._assert_op_cancelled, args=(sess, map_defun_op))
thread.start()
time.sleep(0.2)
sess.close()
thread.join()
def testMapDefunWithCapturedInputs(self):
c = constant_op.constant(2)
@function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.int32)])
def fn(x):
return x + c
x = constant_op.constant([1, 2, 3, 4])
map_defun_op = map_defun.map_defun(fn, [x], [dtypes.int32], [()])[0]
expected = x + c
self.assertAllEqual(self.evaluate(expected), self.evaluate(map_defun_op))
def testMapDefunWithVariantTensor(self):
@function.defun(
input_signature=[tensor_spec.TensorSpec([], dtypes.variant)])
def fn(x):
return x
st = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
serialized = sparse_ops.serialize_sparse_v2(st, out_type=dtypes.variant)
serialized = array_ops.stack([serialized, serialized])
map_defun_op = map_defun.map_defun(fn, [serialized], [dtypes.variant],
[None])[0]
deserialized = sparse_ops.deserialize_sparse(map_defun_op, dtypes.int32)
expected = sparse_tensor.SparseTensorValue(
indices=[[0, 0, 0], [0, 1, 2], [1, 0, 0], [1, 1, 2]],
values=[1, 2, 1, 2],
dense_shape=[2, 3, 4])
actual = self.evaluate(deserialized)
self.assertSparseValuesEqual(expected, actual)
def testMapDefunWithVariantTensorAsCaptured(self):
st = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
serialized = sparse_ops.serialize_sparse_v2(st, out_type=dtypes.variant)
@function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.int32)])
def fn(x):
del x
return serialized
x = constant_op.constant([0, 0])
map_defun_op = map_defun.map_defun(fn, [x], [dtypes.variant], [None])[0]
deserialized = sparse_ops.deserialize_sparse(map_defun_op, dtypes.int32)
expected = sparse_tensor.SparseTensorValue(
indices=[[0, 0, 0], [0, 1, 2], [1, 0, 0], [1, 1, 2]],
values=[1, 2, 1, 2],
dense_shape=[2, 3, 4])
actual = self.evaluate(deserialized)
self.assertSparseValuesEqual(expected, actual)
def testMapDefunWithStrTensor(self):
@function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.string)])
def fn(x):
return x
st = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
serialized = sparse_ops.serialize_sparse_v2(st, out_type=dtypes.string)
serialized = array_ops.stack([serialized, serialized])
map_defun_op = map_defun.map_defun(fn, [serialized], [dtypes.string],
[None])[0]
deserialized = sparse_ops.deserialize_sparse(map_defun_op, dtypes.int32)
expected = sparse_tensor.SparseTensorValue(
indices=[[0, 0, 0], [0, 1, 2], [1, 0, 0], [1, 1, 2]],
values=[1, 2, 1, 2],
dense_shape=[2, 3, 4])
actual = self.evaluate(deserialized)
self.assertSparseValuesEqual(expected, actual)
if __name__ == "__main__":
test.main()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""VGG16 model for Keras.
# Reference
- [Very Deep Convolutional Networks for Large-Scale Image
Recognition](https://arxiv.org/abs/1409.1556)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.keras.python.keras import backend as K
from tensorflow.contrib.keras.python.keras.applications.imagenet_utils import _obtain_input_shape
from tensorflow.contrib.keras.python.keras.applications.imagenet_utils import decode_predictions # pylint: disable=unused-import
from tensorflow.contrib.keras.python.keras.applications.imagenet_utils import preprocess_input # pylint: disable=unused-import
from tensorflow.contrib.keras.python.keras.engine.topology import get_source_inputs
from tensorflow.contrib.keras.python.keras.layers import Conv2D
from tensorflow.contrib.keras.python.keras.layers import Dense
from tensorflow.contrib.keras.python.keras.layers import Flatten
from tensorflow.contrib.keras.python.keras.layers import GlobalAveragePooling2D
from tensorflow.contrib.keras.python.keras.layers import GlobalMaxPooling2D
from tensorflow.contrib.keras.python.keras.layers import Input
from tensorflow.contrib.keras.python.keras.layers import MaxPooling2D
from tensorflow.contrib.keras.python.keras.models import Model
from tensorflow.contrib.keras.python.keras.utils import layer_utils
from tensorflow.contrib.keras.python.keras.utils.data_utils import get_file
WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels.h5'
WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5'
def VGG16(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000):
"""Instantiates the VGG16 architecture.
Optionally loads weights pre-trained
on ImageNet. Note that when using TensorFlow,
for best performance you should set
`image_data_format="channels_last"` in your Keras config
at ~/.keras/keras.json.
The model and the weights are compatible with both
TensorFlow and Theano. The data format
convention used by the model is the one
specified in your Keras config file.
Arguments:
include_top: whether to include the 3 fully-connected
layers at the top of the network.
weights: one of `None` (random initialization)
or "imagenet" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` data format)
or `(3, 224, 224)` (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 48.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
Returns:
A Keras model instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
"""
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as imagenet with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = _obtain_input_shape(
input_shape,
default_size=224,
min_size=48,
data_format=K.image_data_format(),
include_top=include_top)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
img_input = Input(tensor=input_tensor, shape=input_shape)
# Block 1
x = Conv2D(
64, (3, 3), activation='relu', padding='same',
name='block1_conv1')(img_input)
x = Conv2D(
64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
x = Conv2D(
128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
x = Conv2D(
128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Block 3
x = Conv2D(
256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
x = Conv2D(
256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
x = Conv2D(
256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# Block 4
x = Conv2D(
512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
x = Conv2D(
512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
x = Conv2D(
512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
# Block 5
x = Conv2D(
512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
x = Conv2D(
512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)
x = Conv2D(
512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
if include_top:
# Classification block
x = Flatten(name='flatten')(x)
x = Dense(4096, activation='relu', name='fc1')(x)
x = Dense(4096, activation='relu', name='fc2')(x)
x = Dense(classes, activation='softmax', name='predictions')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='vgg16')
# load weights
if weights == 'imagenet':
if include_top:
weights_path = get_file(
'vgg16_weights_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH,
cache_subdir='models')
else:
weights_path = get_file(
'vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP,
cache_subdir='models')
model.load_weights(weights_path)
if K.backend() == 'theano':
layer_utils.convert_all_kernels_in_model(model)
if K.image_data_format() == 'channels_first':
if include_top:
maxpool = model.get_layer(name='block5_pool')
shape = maxpool.output_shape[1:]
dense = model.get_layer(name='fc1')
layer_utils.convert_dense_weights_data_format(dense, shape,
'channels_first')
return model
|
|
#!/usr/bin/env python2.7
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import hashlib
import itertools
import os
import sys
# configuration: a list of either strings or 2-tuples of strings
# a single string represents a static grpc_mdstr
# a 2-tuple represents a static grpc_mdelem (and appropriate grpc_mdstrs will
# also be created)
CONFIG = [
'grpc-timeout',
'grpc-internal-encoding-request',
'grpc-payload-bin',
':path',
'grpc-encoding',
'grpc-accept-encoding',
'user-agent',
':authority',
'host',
'grpc-message',
'grpc-status',
'grpc-tracing-bin',
'grpc-stats-bin',
'',
('grpc-status', '0'),
('grpc-status', '1'),
('grpc-status', '2'),
('grpc-encoding', 'identity'),
('grpc-encoding', 'gzip'),
('grpc-encoding', 'deflate'),
('te', 'trailers'),
('content-type', 'application/grpc'),
(':method', 'POST'),
(':status', '200'),
(':status', '404'),
(':scheme', 'http'),
(':scheme', 'https'),
(':scheme', 'grpc'),
(':authority', ''),
(':method', 'GET'),
(':method', 'PUT'),
(':path', '/'),
(':path', '/index.html'),
(':status', '204'),
(':status', '206'),
(':status', '304'),
(':status', '400'),
(':status', '500'),
('accept-charset', ''),
('accept-encoding', ''),
('accept-encoding', 'gzip, deflate'),
('accept-language', ''),
('accept-ranges', ''),
('accept', ''),
('access-control-allow-origin', ''),
('age', ''),
('allow', ''),
('authorization', ''),
('cache-control', ''),
('content-disposition', ''),
('content-encoding', ''),
('content-language', ''),
('content-length', ''),
('content-location', ''),
('content-range', ''),
('content-type', ''),
('cookie', ''),
('date', ''),
('etag', ''),
('expect', ''),
('expires', ''),
('from', ''),
('host', ''),
('if-match', ''),
('if-modified-since', ''),
('if-none-match', ''),
('if-range', ''),
('if-unmodified-since', ''),
('last-modified', ''),
('load-reporting-initial', ''),
('load-reporting-trailing', ''),
('link', ''),
('location', ''),
('max-forwards', ''),
('proxy-authenticate', ''),
('proxy-authorization', ''),
('range', ''),
('referer', ''),
('refresh', ''),
('retry-after', ''),
('server', ''),
('set-cookie', ''),
('strict-transport-security', ''),
('transfer-encoding', ''),
('user-agent', ''),
('vary', ''),
('via', ''),
('www-authenticate', ''),
]
COMPRESSION_ALGORITHMS = [
'identity',
'deflate',
'gzip',
]
# utility: mangle the name of a config
def mangle(elem):
xl = {
'-': '_',
':': '',
'/': 'slash',
'.': 'dot',
',': 'comma',
' ': '_',
}
def m0(x):
if not x: return 'empty'
r = ''
for c in x:
put = xl.get(c, c.lower())
if not put: continue
last_is_underscore = r[-1] == '_' if r else True
if last_is_underscore and put == '_': continue
elif len(put) > 1:
if not last_is_underscore: r += '_'
r += put
r += '_'
else:
r += put
if r[-1] == '_': r = r[:-1]
return r
if isinstance(elem, tuple):
return 'grpc_mdelem_%s_%s' % (m0(elem[0]), m0(elem[1]))
else:
return 'grpc_mdstr_%s' % (m0(elem))
# utility: generate some hash value for a string
def fake_hash(elem):
return hashlib.md5(elem).hexdigest()[0:8]
# utility: print a big comment block into a set of files
def put_banner(files, banner):
for f in files:
print >>f, '/*'
for line in banner:
print >>f, ' * %s' % line
print >>f, ' */'
print >>f
# build a list of all the strings we need
all_strs = set()
all_elems = set()
static_userdata = {}
for elem in CONFIG:
if isinstance(elem, tuple):
all_strs.add(elem[0])
all_strs.add(elem[1])
all_elems.add(elem)
else:
all_strs.add(elem)
compression_elems = []
for mask in range(1, 1<<len(COMPRESSION_ALGORITHMS)):
val = ','.join(COMPRESSION_ALGORITHMS[alg]
for alg in range(0, len(COMPRESSION_ALGORITHMS))
if (1 << alg) & mask)
elem = ('grpc-accept-encoding', val)
all_strs.add(val)
all_elems.add(elem)
compression_elems.append(elem)
static_userdata[elem] = 1 + (mask | 1)
all_strs = sorted(list(all_strs), key=mangle)
all_elems = sorted(list(all_elems), key=mangle)
# output configuration
args = sys.argv[1:]
H = None
C = None
D = None
if args:
if 'header' in args:
H = sys.stdout
else:
H = open('/dev/null', 'w')
if 'source' in args:
C = sys.stdout
else:
C = open('/dev/null', 'w')
if 'dictionary' in args:
D = sys.stdout
else:
D = open('/dev/null', 'w')
else:
H = open(os.path.join(
os.path.dirname(sys.argv[0]), '../../../src/core/lib/transport/static_metadata.h'), 'w')
C = open(os.path.join(
os.path.dirname(sys.argv[0]), '../../../src/core/lib/transport/static_metadata.c'), 'w')
D = open(os.path.join(
os.path.dirname(sys.argv[0]), '../../../test/core/end2end/fuzzers/hpack.dictionary'), 'w')
# copy-paste copyright notice from this file
with open(sys.argv[0]) as my_source:
copyright = []
for line in my_source:
if line[0] != '#': break
for line in my_source:
if line[0] == '#':
copyright.append(line)
break
for line in my_source:
if line[0] != '#':
break
copyright.append(line)
put_banner([H,C], [line[2:].rstrip() for line in copyright])
hex_bytes = [ord(c) for c in "abcdefABCDEF0123456789"]
def esc_dict(line):
out = "\""
for c in line:
if 32 <= c < 127:
if c != ord('"'):
out += chr(c)
else:
out += "\\\""
else:
out += "\\x%02X" % c
return out + "\""
put_banner([H,C],
"""WARNING: Auto-generated code.
To make changes to this file, change
tools/codegen/core/gen_static_metadata.py, and then re-run it.
See metadata.h for an explanation of the interface here, and metadata.c for
an explanation of what's going on.
""".splitlines())
print >>H, '#ifndef GRPC_CORE_LIB_TRANSPORT_STATIC_METADATA_H'
print >>H, '#define GRPC_CORE_LIB_TRANSPORT_STATIC_METADATA_H'
print >>H
print >>H, '#include "src/core/lib/transport/metadata.h"'
print >>H
print >>C, '#include "src/core/lib/transport/static_metadata.h"'
print >>C
print >>H, '#define GRPC_STATIC_MDSTR_COUNT %d' % len(all_strs)
print >>H, 'extern grpc_mdstr grpc_static_mdstr_table[GRPC_STATIC_MDSTR_COUNT];'
for i, elem in enumerate(all_strs):
print >>H, '/* "%s" */' % elem
print >>H, '#define %s (&grpc_static_mdstr_table[%d])' % (mangle(elem).upper(), i)
print >>H
print >>C, 'grpc_mdstr grpc_static_mdstr_table[GRPC_STATIC_MDSTR_COUNT];'
print >>C
print >>D, '# hpack fuzzing dictionary'
for i, elem in enumerate(all_strs):
print >>D, '%s' % (esc_dict([len(elem)] + [ord(c) for c in elem]))
for i, elem in enumerate(all_elems):
print >>D, '%s' % (esc_dict([0, len(elem[0])] + [ord(c) for c in elem[0]] +
[len(elem[1])] + [ord(c) for c in elem[1]]))
print >>H, '#define GRPC_STATIC_MDELEM_COUNT %d' % len(all_elems)
print >>H, 'extern grpc_mdelem grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT];'
print >>H, 'extern uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT];'
for i, elem in enumerate(all_elems):
print >>H, '/* "%s": "%s" */' % elem
print >>H, '#define %s (&grpc_static_mdelem_table[%d])' % (mangle(elem).upper(), i)
print >>H
print >>C, 'grpc_mdelem grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT];'
print >>C, 'uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT] = {'
print >>C, ' %s' % ','.join('%d' % static_userdata.get(elem, 0) for elem in all_elems)
print >>C, '};'
print >>C
def str_idx(s):
for i, s2 in enumerate(all_strs):
if s == s2:
return i
def md_idx(m):
for i, m2 in enumerate(all_elems):
if m == m2:
return i
print >>H, 'extern const uint8_t grpc_static_metadata_elem_indices[GRPC_STATIC_MDELEM_COUNT*2];'
print >>C, 'const uint8_t grpc_static_metadata_elem_indices[GRPC_STATIC_MDELEM_COUNT*2] = {'
print >>C, ','.join('%d' % str_idx(x) for x in itertools.chain.from_iterable([a,b] for a, b in all_elems))
print >>C, '};'
print >>C
print >>H, 'extern const char *const grpc_static_metadata_strings[GRPC_STATIC_MDSTR_COUNT];'
print >>C, 'const char *const grpc_static_metadata_strings[GRPC_STATIC_MDSTR_COUNT] = {'
print >>C, '%s' % ',\n'.join(' "%s"' % s for s in all_strs)
print >>C, '};'
print >>C
print >>H, 'extern const uint8_t grpc_static_accept_encoding_metadata[%d];' % (1 << len(COMPRESSION_ALGORITHMS))
print >>C, 'const uint8_t grpc_static_accept_encoding_metadata[%d] = {' % (1 << len(COMPRESSION_ALGORITHMS))
print >>C, '0,%s' % ','.join('%d' % md_idx(elem) for elem in compression_elems)
print >>C, '};'
print >>C
print >>H, '#define GRPC_MDELEM_ACCEPT_ENCODING_FOR_ALGORITHMS(algs) (&grpc_static_mdelem_table[grpc_static_accept_encoding_metadata[(algs)]])'
print >>H, '#endif /* GRPC_CORE_LIB_TRANSPORT_STATIC_METADATA_H */'
H.close()
C.close()
|
|
#-*- coding:utf-8-*-
"""
file: model.py
author: Yoann Dupont
MIT License
Copyright (c) 2018 Yoann Dupont
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import time, codecs
import itertools
from sem.storage import Coder
from .template import ListPattern
class Model(object):
def __init__(self, constraints={}):
self._tagset = Coder()
self._templates = []
self._observations = Coder()
self._uoff = []
self._boff = []
self._weights = [] # list
self._max_col = 0
def __call__(self, x):
return self.tag_viterbi(x)
@classmethod
def from_wapiti_model(cls, filename, encoding="utf-8", verbose=True):
MODEL = 0
READER = 1
READ_TEMPLATE = 2
LABELS = 3
OBSERVATIONS = 4
FEATURES = 5
model = Model()
n_weights = -1
n_patterns = -1
n_labels = -1
n_observations = -1
current_feature = 0
state = MODEL
line_index = 0
if encoding is None:
fd = open(filename, "rU")
else:
fd = codecs.open(filename, "rU", encoding)
lines = [line.strip() for line in fd.readlines()]
n_weights = int(lines[line_index].split(u"#")[-1])
line_index += 1
n_patterns, max_col, other = lines[line_index].split(u"#")[-1].split(u"/")
n_patterns = int(n_patterns)
max_col = int(max_col)
model._max_col = max_col
line_index += 1
for line in lines[line_index : line_index+n_patterns]:
line = line.split(":",1)[1][:-1]
model._templates.append(ListPattern.from_string(line))
assert len(model.templates) == n_patterns
line_index += n_patterns
state = LABELS
s = time.time()
n_labels = int(lines[line_index].strip().split("#")[-1])
line_index += 1
for line in lines[line_index : line_index+n_labels]:
line = line.strip()
model.tagset.add(line[line.index(":")+1 : -1])
line_index += n_labels
assert len(model.tagset) == n_labels
state = OBSERVATIONS
s = time.time()
n_observations = int(lines[line_index].split("#")[-1])
line_index += 1
model._uoff = [-1]*n_observations
model._boff = [-1]*n_observations
for line in lines[line_index : line_index+n_observations]:
obs = line[line.index(":")+1 : -1]
n_feats = (n_labels if obs[0] in "u*" else 0)
n_feats += (n_labels**2 if obs[0] in "b*" else 0)
model.observations.add(obs)
if obs[0] in "u*":
model.uoff[len(model.observations)-1] = current_feature
if obs[0] in "b*":
model.boff[len(model.observations)-1] = current_feature + (n_labels if obs[0]==u"*" else 0)
current_feature += n_feats
line_index += n_observations
model._weights = [0.0]*current_feature
state = FEATURES
s = time.time()
for line in lines[-n_weights : ]:
index, weight = line.split("=")
model.weights[int(index)] = float.fromhex(weight)#(float.fromhex(weight) if "x" in weight else float(weight))
return model
@property
def tagset(self):
return self._tagset
@property
def templates(self):
return self._templates
@property
def observations(self):
return self._observations
@property
def uoff(self):
"""
Unigram OFFset
"""
return self._uoff
@property
def boff(self):
"""
Bigram OFFset
"""
return self._boff
@property
def weights(self):
return self._weights
def tag_viterbi(self, sentence):
Y = len(self.tagset)
T = len(sentence)
range_Y = range(Y)
range_T = range(T)
psi = [[[0.0]*Y for _y1 in range_Y] for _t in range_T]
back = [[0]*Y for _t in range_T]
cur = [0.0]*Y
old = [0.0]*Y
psc = [0.0]*T
sc = -2**30
tag = [u"" for _t in range_T]
# avoiding dots
weights_ = self._weights
obs_encode = self._observations.encode
templates_ = self._templates
uoff_ = self._uoff
boff_ = self._boff
unigrams = []
bigrams = []
for t in range_T:
unigrams.append([])
bigrams.append([])
u_append = unigrams[-1].append
b_append = bigrams[-1].append
for template in templates_:
obs = template.instanciate(sentence, t)
o = obs_encode(obs)
if o != -1:
if obs[0] == 'u' and uoff_[o] != -1:
u_append(weights_[uoff_[o] : uoff_[o]+Y])
if obs[0] == 'b' and boff_[o] != -1:
b_append(weights_[boff_[o] : boff_[o]+Y*Y])
# compute scores in psi
for t in range_T:
unigrams_T = unigrams[t]
for y in range_Y:
sum_ = 0.0
for w in unigrams_T:
sum_ += w[y]
for yp in range_Y:
psi[t][yp][y] = sum_
for t in range(1,T):
bigrams_T = bigrams[t]
d = 0
for yp, y in itertools.product(range_Y, range_Y):
for w in bigrams_T:
psi[t][yp][y] += w[d]
d += 1
for y in range_Y:
cur[y] = psi[0][0][y]
for t in range(1,T):
for y in range_Y:
old[y] = cur[y]
for y in range_Y:
bst = -2**30
idx = 0
for yp in range_Y:
val = old[yp] + psi[t][yp][y]
if val > bst:
bst = val
idx = yp
back[t][y] = idx
cur[y] = bst
bst = 0
for y in range(1,Y):
if cur[y] > cur[bst]:
bst = y
sc = cur[bst]
for t in reversed(range_T):
yp = (back[t][bst] if t != 0 else 0)
y = bst
tag[t] = self._tagset.decode(y)
psc[t] = psi[t][yp][y]
bst = yp
return tag, psc, sc
def write(self, filename, encoding="utf-8"):
with codecs.open(filename, "w", "utf-8") as O:
O.write("#mdl#2#{0}\n".format(len([w for w in self.weights if w !=0.0])))
O.write("#rdr#{0}/{1}/0\n".format(len(self._templates), self._max_col))
for pattern in self._templates:
uni_pattern = unicode(pattern)
O.write("{0}:{1},\n".format(len(uni_pattern), uni_pattern))
O.write(u"#qrk#{0}\n".format(len(self._tagset)))
for tag in self.tagset:
O.write(u"{0}:{1}\n".format(len(tag), tag))
# observations
O.write(u"#qrk#{0}\n".format(len(self._observations)))
for obs in self._observations:
O.write(u"{0}:{1}\n".format(len(obs), obs))
for index, w in enumerate(self.weights):
if w != 0.0:
O.write(u"{0}={1}\n".format(index, float.hex(w)))
def dump(self, filename):
ntags = len(self.tagset)
with codecs.open(filename, "w", "utf-8") as O:
for i in range(len(self.observations)):
o = self.observations.decode(i)
written = False
if o[0] == u"u":
off = self.uoff[i]
for y in range(ntags):
w = self.weights[off+y]
if w != 0:
O.write(u"{0}\t{1}\t{2}\t{3:.5f}\n".format(o, u'#', self.tagset.decode(y), w))
written = True
else:
off = self.boff[i]
d = 0
for yp in range(ntags):
for y in range(ntags):
w = self.weights[off+d]
if w != 0:
O.write(u"{0}\t{1}\t{2}\t{3:.5f}\n".format(o, self.tagset.decode(yp), self.tagset.decode(y), w))
written = True
d += 1
if written:
O.write(u"\n")
|
|
"""Private module full of compatibility hacks.
Primarily this is for downstream redistributions of requests that unvendor
urllib3 without providing a shim.
.. warning::
This module is private. If you use it, and something breaks, you were
warned
"""
from collections import Mapping, MutableMapping
import sys
import requests
try:
from requests.packages.urllib3 import fields
from requests.packages.urllib3 import filepost
from requests.packages.urllib3 import poolmanager
except ImportError:
from urllib3 import fields
from urllib3 import filepost
from urllib3 import poolmanager
try:
from requests.packages.urllib3.connection import HTTPConnection
from requests.packages.urllib3 import connection
except ImportError:
try:
from urllib3.connection import HTTPConnection
from urllib3 import connection
except ImportError:
HTTPConnection = None
connection = None
if requests.__build__ < 0x020300:
timeout = None
else:
try:
from requests.packages.urllib3.util import timeout
except ImportError:
from urllib3.util import timeout
if requests.__build__ < 0x021000:
gaecontrib = None
else:
try:
from requests.packages.urllib3.contrib import appengine as gaecontrib
except ImportError:
from urllib3.contrib import appengine as gaecontrib
PY3 = sys.version_info > (3, 0)
if PY3:
import queue
from urllib.parse import urlencode, urljoin
else:
import Queue as queue
from urllib import urlencode
from urlparse import urljoin
try:
basestring = basestring
except NameError:
basestring = (str, bytes)
class HTTPHeaderDict(MutableMapping):
"""
:param headers:
An iterable of field-value pairs. Must not contain multiple field names
when compared case-insensitively.
:param kwargs:
Additional field-value pairs to pass in to ``dict.update``.
A ``dict`` like container for storing HTTP Headers.
Field names are stored and compared case-insensitively in compliance with
RFC 7230. Iteration provides the first case-sensitive key seen for each
case-insensitive pair.
Using ``__setitem__`` syntax overwrites fields that compare equal
case-insensitively in order to maintain ``dict``'s api. For fields that
compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``
in a loop.
If multiple fields that are equal case-insensitively are passed to the
constructor or ``.update``, the behavior is undefined and some will be
lost.
>>> headers = HTTPHeaderDict()
>>> headers.add('Set-Cookie', 'foo=bar')
>>> headers.add('set-cookie', 'baz=quxx')
>>> headers['content-length'] = '7'
>>> headers['SET-cookie']
'foo=bar, baz=quxx'
>>> headers['Content-Length']
'7'
"""
def __init__(self, headers=None, **kwargs):
super(HTTPHeaderDict, self).__init__()
self._container = {}
if headers is not None:
if isinstance(headers, HTTPHeaderDict):
self._copy_from(headers)
else:
self.extend(headers)
if kwargs:
self.extend(kwargs)
def __setitem__(self, key, val):
self._container[key.lower()] = (key, val)
return self._container[key.lower()]
def __getitem__(self, key):
val = self._container[key.lower()]
return ', '.join(val[1:])
def __delitem__(self, key):
del self._container[key.lower()]
def __contains__(self, key):
return key.lower() in self._container
def __eq__(self, other):
if not isinstance(other, Mapping) and not hasattr(other, 'keys'):
return False
if not isinstance(other, type(self)):
other = type(self)(other)
return (dict((k.lower(), v) for k, v in self.itermerged()) ==
dict((k.lower(), v) for k, v in other.itermerged()))
def __ne__(self, other):
return not self.__eq__(other)
if not PY3: # Python 2
iterkeys = MutableMapping.iterkeys
itervalues = MutableMapping.itervalues
__marker = object()
def __len__(self):
return len(self._container)
def __iter__(self):
# Only provide the originally cased names
for vals in self._container.values():
yield vals[0]
def pop(self, key, default=__marker):
"""D.pop(k[,d]) -> v, remove specified key and return its value.
If key is not found, d is returned if given, otherwise KeyError is
raised.
"""
# Using the MutableMapping function directly fails due to the private
# marker.
# Using ordinary dict.pop would expose the internal structures.
# So let's reinvent the wheel.
try:
value = self[key]
except KeyError:
if default is self.__marker:
raise
return default
else:
del self[key]
return value
def discard(self, key):
try:
del self[key]
except KeyError:
pass
def add(self, key, val):
"""Adds a (name, value) pair, doesn't overwrite the value if it already
exists.
>>> headers = HTTPHeaderDict(foo='bar')
>>> headers.add('Foo', 'baz')
>>> headers['foo']
'bar, baz'
"""
key_lower = key.lower()
new_vals = key, val
# Keep the common case aka no item present as fast as possible
vals = self._container.setdefault(key_lower, new_vals)
if new_vals is not vals:
# new_vals was not inserted, as there was a previous one
if isinstance(vals, list):
# If already several items got inserted, we have a list
vals.append(val)
else:
# vals should be a tuple then, i.e. only one item so far
# Need to convert the tuple to list for further extension
self._container[key_lower] = [vals[0], vals[1], val]
def extend(self, *args, **kwargs):
"""Generic import function for any type of header-like object.
Adapted version of MutableMapping.update in order to insert items
with self.add instead of self.__setitem__
"""
if len(args) > 1:
raise TypeError("extend() takes at most 1 positional "
"arguments ({} given)".format(len(args)))
other = args[0] if len(args) >= 1 else ()
if isinstance(other, HTTPHeaderDict):
for key, val in other.iteritems():
self.add(key, val)
elif isinstance(other, Mapping):
for key in other:
self.add(key, other[key])
elif hasattr(other, "keys"):
for key in other.keys():
self.add(key, other[key])
else:
for key, value in other:
self.add(key, value)
for key, value in kwargs.items():
self.add(key, value)
def getlist(self, key):
"""Returns a list of all the values for the named field. Returns an
empty list if the key doesn't exist."""
try:
vals = self._container[key.lower()]
except KeyError:
return []
else:
if isinstance(vals, tuple):
return [vals[1]]
else:
return vals[1:]
# Backwards compatibility for httplib
getheaders = getlist
getallmatchingheaders = getlist
iget = getlist
def __repr__(self):
return "%s(%s)" % (type(self).__name__, dict(self.itermerged()))
def _copy_from(self, other):
for key in other:
val = other.getlist(key)
if isinstance(val, list):
# Don't need to convert tuples
val = list(val)
self._container[key.lower()] = [key] + val
def copy(self):
clone = type(self)()
clone._copy_from(self)
return clone
def iteritems(self):
"""Iterate over all header lines, including duplicate ones."""
for key in self:
vals = self._container[key.lower()]
for val in vals[1:]:
yield vals[0], val
def itermerged(self):
"""Iterate over all headers, merging duplicate ones together."""
for key in self:
val = self._container[key.lower()]
yield val[0], ', '.join(val[1:])
def items(self):
return list(self.iteritems())
@classmethod
def from_httplib(cls, message): # Python 2
"""Read headers from a Python 2 httplib message object."""
# python2.7 does not expose a proper API for exporting multiheaders
# efficiently. This function re-reads raw lines from the message
# object and extracts the multiheaders properly.
headers = []
for line in message.headers:
if line.startswith((' ', '\t')):
key, value = headers[-1]
headers[-1] = (key, value + '\r\n' + line.rstrip())
continue
key, value = line.split(':', 1)
headers.append((key, value.strip()))
return cls(headers)
__all__ = (
'basestring',
'connection',
'fields',
'filepost',
'poolmanager',
'timeout',
'HTTPHeaderDict',
'queue',
'urlencode',
'gaecontrib',
'urljoin',
)
|
|
from __future__ import unicode_literals
from django.contrib import messages
from django.db import transaction
from django.shortcuts import get_object_or_404, redirect
from django.template.context_processors import csrf
from django.template.response import TemplateResponse
from django.utils.translation import pgettext_lazy
from django_prices.templatetags.prices_i18n import gross
from payments import PaymentStatus
from prices import Price
from ...core.utils import get_paginator_items
from ...order import OrderStatus
from ...order.models import Order, OrderedItem, OrderNote
from ...product.models import ProductVariant
from ...userprofile.i18n import AddressForm
from ..order.forms import OrderFilterForm
from ..views import staff_member_required
from .forms import (CancelGroupForm, CancelItemsForm, CancelOrderForm,
CapturePaymentForm, ChangeQuantityForm, MoveItemsForm,
OrderNoteForm, RefundPaymentForm, ReleasePaymentForm,
RemoveVoucherForm, ShipGroupForm)
@staff_member_required
def order_list(request):
orders_all = Order.objects.prefetch_related(
'groups', 'payments', 'groups__items', 'user').all()
active_status = request.GET.get('status')
if active_status:
orders = orders_all.filter(status=active_status)
else:
orders = orders_all
page = get_paginator_items(orders, 20, request.GET.get('page'))
form = OrderFilterForm(
request.POST or None, initial={'status': active_status or None})
ctx = {'object_list': page.object_list, 'orders_all': orders_all, 'page_obj': page,
'is_paginated': page.has_other_pages(), 'form': form}
return TemplateResponse(request, 'dashboard/order/list.html', ctx)
@staff_member_required
def order_details(request, order_pk):
qs = (Order.objects
.select_related('user', 'shipping_address', 'billing_address')
.prefetch_related('notes', 'payments', 'history',
'groups', 'groups__items'))
order = get_object_or_404(qs, pk=order_pk)
notes = order.notes.all()
all_payments = order.payments.exclude(status=PaymentStatus.INPUT)
payment = order.payments.last()
groups = list(order)
captured = preauthorized = Price(0, currency=order.get_total().currency)
balance = captured - order.get_total()
if payment:
can_capture = (
payment.status == PaymentStatus.PREAUTH and
order.status != OrderStatus.CANCELLED)
can_release = payment.status == PaymentStatus.PREAUTH
can_refund = payment.status == PaymentStatus.CONFIRMED
preauthorized = payment.get_total_price()
if payment.status == PaymentStatus.CONFIRMED:
captured = payment.get_captured_price()
balance = captured - order.get_total()
else:
can_capture = can_release = can_refund = False
ctx = {'order': order, 'all_payments': all_payments, 'payment': payment,
'notes': notes, 'groups': groups, 'captured': captured,
'preauthorized': preauthorized, 'can_capture': can_capture,
'can_release': can_release, 'can_refund': can_refund,
'balance': balance}
return TemplateResponse(request, 'dashboard/order/detail.html', ctx)
@staff_member_required
def order_add_note(request, order_pk):
order = get_object_or_404(Order, pk=order_pk)
note = OrderNote(order=order, user=request.user)
form = OrderNoteForm(request.POST or None, instance=note)
status = 200
if form.is_valid():
form.save()
msg = pgettext_lazy(
'Dashboard message related to an order',
'Added note')
order.create_history_entry(comment=msg, user=request.user)
messages.success(request, msg)
elif form.errors:
status = 400
ctx = {'order': order, 'form': form}
ctx.update(csrf(request))
template = 'dashboard/order/modal_add_note.html'
return TemplateResponse(request, template, ctx, status=status)
@staff_member_required
def capture_payment(request, order_pk, payment_pk):
order = get_object_or_404(Order, pk=order_pk)
payment = get_object_or_404(order.payments, pk=payment_pk)
amount = order.get_total().quantize('0.01').gross
form = CapturePaymentForm(request.POST or None, payment=payment,
initial={'amount': amount})
if form.is_valid() and form.capture():
amount = form.cleaned_data['amount']
msg = pgettext_lazy(
'Dashboard message related to a payment',
'Captured %(amount)s') % {'amount': gross(amount)}
payment.order.create_history_entry(comment=msg, user=request.user)
messages.success(request, msg)
return redirect('dashboard:order-details', order_pk=order.pk)
status = 400 if form.errors else 200
ctx = {'captured': payment.captured_amount, 'currency': payment.currency,
'form': form, 'order': order, 'payment': payment}
return TemplateResponse(request, 'dashboard/order/modal_capture.html', ctx,
status=status)
@staff_member_required
def refund_payment(request, order_pk, payment_pk):
order = get_object_or_404(Order, pk=order_pk)
payment = get_object_or_404(order.payments, pk=payment_pk)
amount = payment.captured_amount
form = RefundPaymentForm(request.POST or None, payment=payment,
initial={'amount': amount})
if form.is_valid() and form.refund():
amount = form.cleaned_data['amount']
msg = pgettext_lazy(
'Dashboard message related to a payment',
'Refunded %(amount)s') % {'amount': gross(amount)}
payment.order.create_history_entry(comment=msg, user=request.user)
messages.success(request, msg)
return redirect('dashboard:order-details', order_pk=order.pk)
status = 400 if form.errors else 200
ctx = {'captured': payment.captured_amount, 'currency': payment.currency,
'form': form, 'order': order, 'payment': payment}
return TemplateResponse(request, 'dashboard/order/modal_refund.html', ctx,
status=status)
@staff_member_required
def release_payment(request, order_pk, payment_pk):
order = get_object_or_404(Order, pk=order_pk)
payment = get_object_or_404(order.payments, pk=payment_pk)
form = ReleasePaymentForm(request.POST or None, payment=payment)
if form.is_valid() and form.release():
msg = pgettext_lazy('Dashboard message', 'Released payment')
payment.order.create_history_entry(comment=msg, user=request.user)
messages.success(request, msg)
return redirect('dashboard:order-details', order_pk=order.pk)
status = 400 if form.errors else 200
ctx = {'captured': payment.captured_amount, 'currency': payment.currency,
'form': form, 'order': order, 'payment': payment}
return TemplateResponse(request, 'dashboard/order/modal_release.html', ctx,
status=status)
@staff_member_required
def orderline_change_quantity(request, order_pk, line_pk):
order = get_object_or_404(Order, pk=order_pk)
item = get_object_or_404(OrderedItem.objects.filter(
delivery_group__order=order), pk=line_pk)
variant = get_object_or_404(
ProductVariant, sku=item.product_sku)
form = ChangeQuantityForm(
request.POST or None, instance=item, variant=variant)
status = 200
old_quantity = item.quantity
if form.is_valid():
with transaction.atomic():
form.save()
msg = pgettext_lazy(
'Dashboard message related to an order line',
'Changed quantity for product %(product)s from'
' %(old_quantity)s to %(new_quantity)s') % {
'product': item.product, 'old_quantity': old_quantity,
'new_quantity': item.quantity}
order.create_history_entry(comment=msg, user=request.user)
messages.success(request, msg)
return redirect('dashboard:order-details', order_pk=order.pk)
elif form.errors:
status = 400
ctx = {'order': order, 'object': item, 'form': form}
template = 'dashboard/order/modal_change_quantity.html'
return TemplateResponse(request, template, ctx, status=status)
@staff_member_required
def orderline_split(request, order_pk, line_pk):
order = get_object_or_404(Order, pk=order_pk)
item = get_object_or_404(OrderedItem.objects.filter(
delivery_group__order=order), pk=line_pk)
form = MoveItemsForm(request.POST or None, item=item)
line_pk = None
if item:
line_pk = item.pk
status = 200
if form.is_valid():
old_group = item.delivery_group
how_many = form.cleaned_data['quantity']
with transaction.atomic():
target_group = form.move_items()
if not old_group.pk:
old_group = pgettext_lazy(
'Dashboard message related to a delivery group',
'removed group')
msg = pgettext_lazy(
'Dashboard message related to delivery groups',
'Moved %(how_many)s items %(item)s from %(old_group)s'
' to %(new_group)s') % {
'how_many': how_many, 'item': item, 'old_group': old_group,
'new_group': target_group}
order.create_history_entry(comment=msg, user=request.user)
messages.success(request, msg)
return redirect('dashboard:order-details', order_pk=order.pk)
elif form.errors:
status = 400
ctx = {'order': order, 'object': item, 'form': form, 'line_pk': line_pk}
template = 'dashboard/order/modal_split_order_line.html'
return TemplateResponse(request, template, ctx, status=status)
@staff_member_required
def orderline_cancel(request, order_pk, line_pk):
order = get_object_or_404(Order, pk=order_pk)
item = get_object_or_404(OrderedItem.objects.filter(
delivery_group__order=order), pk=line_pk)
form = CancelItemsForm(data=request.POST or None, item=item)
status = 200
if form.is_valid():
msg = pgettext_lazy(
'Dashboard message related to an order line',
'Cancelled item %s') % item
with transaction.atomic():
form.cancel_item()
order.create_history_entry(comment=msg, user=request.user)
messages.success(request, msg)
return redirect('dashboard:order-details', order_pk=order.pk)
elif form.errors:
status = 400
ctx = {'order': order, 'item': item, 'form': form}
return TemplateResponse(
request, 'dashboard/order/modal_cancel_line.html',
ctx, status=status)
@staff_member_required
def ship_delivery_group(request, order_pk, group_pk):
order = get_object_or_404(Order, pk=order_pk)
group = get_object_or_404(order.groups.all(), pk=group_pk)
form = ShipGroupForm(request.POST or None, instance=group)
status = 200
if form.is_valid():
with transaction.atomic():
form.save()
msg = pgettext_lazy(
'Dashboard message related to a delivery group',
'Shipped %s') % group
messages.success(request, msg)
group.order.create_history_entry(comment=msg, user=request.user)
return redirect('dashboard:order-details', order_pk=order_pk)
elif form.errors:
status = 400
ctx = {'order': order, 'group': group, 'form': form}
template = 'dashboard/order/modal_ship_delivery_group.html'
return TemplateResponse(request, template, ctx, status=status)
@staff_member_required
def cancel_delivery_group(request, order_pk, group_pk):
order = get_object_or_404(Order, pk=order_pk)
group = get_object_or_404(order.groups.all(), pk=group_pk)
form = CancelGroupForm(request.POST or None, delivery_group=group)
status = 200
if form.is_valid():
with transaction.atomic():
form.cancel_group()
msg = pgettext_lazy(
'Dashboard message related to a delivery group',
'Cancelled %s') % group
messages.success(request, msg)
group.order.create_history_entry(comment=msg, user=request.user)
return redirect('dashboard:order-details', order_pk=order_pk)
elif form.errors:
status = 400
ctx = {'order': order, 'group': group}
template = 'dashboard/order/modal_cancel_delivery_group.html'
return TemplateResponse(request, template, ctx, status=status)
@staff_member_required
def address_view(request, order_pk, address_type):
order = Order.objects.get(pk=order_pk)
if address_type == 'shipping':
address = order.shipping_address
success_msg = pgettext_lazy(
'Dashboard message',
'Updated shipping address')
else:
address = order.billing_address
success_msg = pgettext_lazy(
'Dashboard message',
'Updated billing address')
form = AddressForm(request.POST or None, instance=address)
if form.is_valid():
form.save()
order.create_history_entry(comment=success_msg, user=request.user)
messages.success(request, success_msg)
return redirect('dashboard:order-details', order_pk=order_pk)
ctx = {'order': order, 'address_type': address_type, 'form': form}
return TemplateResponse(request, 'dashboard/order/address_form.html', ctx)
@staff_member_required
def cancel_order(request, order_pk):
status = 200
order = get_object_or_404(Order, pk=order_pk)
form = CancelOrderForm(request.POST or None, order=order)
if form.is_valid():
msg = pgettext_lazy('Dashboard message', 'Cancelled order')
with transaction.atomic():
form.cancel_order()
order.create_history_entry(comment=msg, user=request.user)
messages.success(request, 'Order cancelled')
return redirect('dashboard:order-details', order_pk=order.pk)
# TODO: send status confirmation email
elif form.errors:
status = 400
ctx = {'order': order}
return TemplateResponse(request, 'dashboard/order/modal_cancel_order.html',
ctx, status=status)
def remove_order_voucher(request, order_pk):
status = 200
order = get_object_or_404(Order, pk=order_pk)
form = RemoveVoucherForm(request.POST or None, order=order)
if form.is_valid():
msg = pgettext_lazy('Dashboard message', 'Removed voucher from order')
with transaction.atomic():
form.remove_voucher()
order.create_history_entry(comment=msg, user=request.user)
messages.success(request, msg)
return redirect('dashboard:order-details', order_pk=order.pk)
elif form.errors:
status = 400
ctx = {'order': order}
return TemplateResponse(request,
'dashboard/order/modal_order_remove_voucher.html',
ctx, status=status)
|
|
import smtk
# Common data structures & functions
ConfigData = type('ConfigData', (object,), dict())
def standard_section(attribute_type, title=None, group_name=None, comment=None):
config = ConfigData()
config.type = 'standard'
config.attribute_type = attribute_type
config.title = title
config.group_name = group_name
config.comment = comment
return config
ss = standard_section # shorthand
def boundary_condition_section(attribute_type, title=None):
config = ConfigData()
config.type = 'boundary_condition'
config.attribute_type = attribute_type
config.title = title
return config
bc = boundary_condition_section
def custom_section(section_name):
config = ConfigData()
config.type = 'custom'
config.section_name = section_name
return config
def item_format(item_name, keyword=None, item_format_list=None):
config = ConfigData()
config.name = item_name
config.keyword = item_name if keyword is None else keyword
config.item_format_list = item_format_list # conditional children
return config
fmt = item_format # shorthand
def group_format(group_name, item_format_list):
config = ConfigData()
config.group_name = group_name
config.name = group_name # TODO revist adding Config.type?
config.item_format_list = item_format_list
return config
groupfmt = group_format # shorthand
# Note to self: At some point, we may need to add an optional list to
# standard_section for the strings to write for Item instances
# represented as discrete values. So far haven't needed it.
# ==================================================
#
# List of output section config data
# Standard config is ss(attribute-type, [hydra-title, group-item-name, comment-line])
#
# ==================================================
section_table = [
ss('simulationtime'),
ss('solution_method', 'solution_method', 'SolutionMethod'),
ss('time_integration', 'time_integration', 'TimeIntegration'),
ss('LoadBalancer', 'load_balance'),
custom_section('output'),
ss('energy'),
custom_section('hydrostat'),
custom_section('turbulence'),
ss('Material', 'material', comment='Material model setup & assignment to sets'),
# TODO materialset, probably a custom_section (or part of a custom Material section)
custom_section('plotvar'),
custom_section('histvar'),
custom_section('plotstatvar'),
#ss('InitialConditions', 'initial', 'InitialConditions', comment='Simple IC\'s'),
custom_section('InitialConditions'),
custom_section('BodyForce'),
bc('distancebc', 'distance'),
bc('Pressure', 'pressure'),
#custom_section('distance'), # Wall and Penetration att types
bc('TurbulentViscosity', 'turbnu'),
bc('HeatFlux', 'heatflux'),
custom_section('velocity'), # 6 different att types
#vector_bc('velocity', [
# ('VelXBoundaryCondition', 'velx'),
# ('VelYBoundaryCondition', 'vely'),
# ('VelZBoundaryCondition', 'velz'),
# ],
#)
# TODO remaining boundary condition types
ss('ppesolver', 'ppesolver', 'PressurePoissonSolver'),
ss('momentumsolver', 'momentumsolver', 'MomentumSolver'),
ss('transportsolver', 'transportsolver', 'TransportSolver'),
]
# ==================================================
#
# Dictionary of format config data for items contained in attributes
# Format is fmt(item-name, hydra-keyword-if-different)
# The group_format (groupfmt) identifier should *only* be used for custom sections
#
# ==================================================
format_table = {
'simulationtime': [
fmt('nsteps'),
fmt('deltat'),
fmt('term')
],
'energy': [
fmt('energy')
],
'solution_method': [
fmt('strategy', 'strategy', [
fmt('error_norm'),
fmt('nvec')]
),
fmt('itmax'),
fmt('eps'),
fmt('eps_dist'),
fmt('eps_p0'),
fmt('subcycle'),
fmt('timestep_control'),
fmt('convergence'),
fmt('diagnostics')
],
'time_integration': [
fmt('type'),
fmt('CFLinit'),
fmt('CFLmax'),
fmt('dtmax'),
fmt('dtscale'),
fmt('thetaa'),
fmt('thetaK', 'thetak'),
fmt('thetaf'),
fmt('trimlast')
],
'LoadBalancer': [
fmt('Method', 'method'),
fmt('Load Balance Diagnostics', 'diagnostics')
],
'Output': [
fmt('type', 'filetype'),
groupfmt('FieldOutput', [
fmt('type', 'pltype'),
fmt('frequency', 'plti')
]),
groupfmt('RestartOutput', [
fmt('frequency', 'dump')
])
],
'StatusInformation': [
fmt('minmaxfrequency', 'ttyi'),
fmt('tifrequency', 'thti'),
fmt('PrintLevel', 'prtlev', [
fmt('hcfrequency', 'prti')
]),
],
'BasicTurbulenceModel': [
fmt('Method', 'turbulence', [
fmt('timescale_limiter'),
fmt('c_s'),
fmt('c_w'),
fmt('prandtl'),
fmt('schmidt'),
]),
],
'Material': [
fmt('Density', 'rho'),
fmt('mu')
],
'InitialConditions': [
fmt('Velocity', ['velx', 'vely', 'velz']),
fmt('tv', 'turbnu'),
fmt('tke'),
fmt('itdr', 'eps'),
fmt('temperature')
],
'ppesolver': [
fmt('ppetype', 'type'),
fmt('itmax'),
fmt('itchk'),
fmt('diagnostics'),
fmt('convergence'),
fmt('eps'),
fmt('pivot', 'zeropivot'),
fmt('ppetype', 'type', [
fmt('preconditioner', 'amgpc', [
fmt('hypre_coarsen_type'),
fmt('hypre_smoother'),
fmt('hypre_smoother_dn'),
fmt('hypre_smoother_up'),
fmt('hypre_smoother_co'),
fmt('interp_type'),
fmt('trunc_factor'),
fmt('pmax_elements'),
fmt('agg_num_levels'),
fmt('strong_threshold'),
fmt('max_rowsum'),
fmt('smoother'),
fmt('cycle'),
fmt('solver'),
fmt('pre_smooth'),
fmt('post_smooth'),
fmt('coarse_size'),
fmt('levels'),
])
])
],
'momentumsolver': [
fmt('type'),
fmt('restart'),
fmt('itmax'),
fmt('itchk'),
fmt('diagnostics'),
fmt('convergence'),
fmt('eps'),
],
'transportsolver': [
fmt('type'),
fmt('restart'),
fmt('itmax'),
fmt('itchk'),
fmt('diagnostics'),
fmt('convergence'),
fmt('eps'),
],
}
# Instantiate global dicationary for load curve functions
lcid_dictionary = dict()
# Entry point (main export function)
def ExportCMB(spec):
'''
Entry function, called by CMB to write export file
'''
manager = spec.getSimulationAttributes()
export_manager = spec.getExportAttributes()
#analysis_name = spec.getAnalysisNames()[0] # deprecated
#output_file_name = spec.getOutputPath() # deprecated
ok = True
if manager is None:
print 'No attribute manager found - no output generated'
return False
if export_manager is None:
print 'No export attributes found - no output generated'
return False
att_list = export_manager.findAttributes('ExportSpec')
if len(att_list) < 1:
print 'ERROR - missing ExportSpec attribute'
return False
elif len(att_list) > 1:
print 'ERROR - multiple ExportSpec attributes'
return False
spec_att = att_list[0]
item = spec_att.find('AnalysisTypes')
if item is None:
print 'ERROR - ExportSpec attribute missing AnalysisTypes item'
return False
types_item = smtk.attribute.to_concrete(item)
analysis_type = 'Default'
if types_item.numberOfValues() < 1:
print 'Warning: No analysis type specified'
else:
analysis_type = types_item.value(0)
if types_item.numberOfValues() > 1:
print 'More than 1 Analysis Type specified: using 1st one'
print 'Exporting analysis type', analysis_type
output_file_name = 'output.txt' # default
item = spec_att.find('OutputFile')
if item is not None:
output_item = smtk.attribute.to_concrete(item)
if output_item.isSet(0):
value = output_item.value(0)
if value != '':
output_file_name = value
print 'Writing output file', output_file_name
analysis_dict = {
'Incompressible Navier-Stokes Analysis': 'cc_navierstokes',
'NS and Energy Equation Analysis': 'cc_navierstokes'
}
if analysis_type not in analysis_dict:
print 'Unsupported analysis type \"%s\"" - no output generated' % \
analysis_type
return False
categories = list(manager.analysisCategories(analysis_type))
print 'categories', categories
if not categories:
print 'WARNING: No categories found for analysis \"%s\"' % \
analysis_type
#return False
# Instantiate output file and write contents
with open(output_file_name, 'w') as out:
out.write('title\n')
out.write('Hydra-TH control file generated by Kitware CMB\n')
out.write('\n')
title = analysis_dict.get(analysis_type, 'unknown_analysis')
out.write(title)
out.write('\n')
# Process elements in section_table
for section_config in section_table:
ok = write_section(manager, section_config, categories, out)
# Write load curves last, since ids are assigned when writing atts
write_load_curves(manager, out)
out.write('\n')
out.write('end\n')
out.write('\n')
out.write('exit\n')
print 'Export ok status: %s' % ok
return ok
def get_id_from_name(name):
'''
A hack by acbauer to get the sideset or cell block id from
the model entity's name. it assumes that the last token
in the string is the proper id to be used. This will be
replaced with GridInfo when we have time to do it properly.
'''
# Domain sets are named DomainSetX
domainset_prefix = 'DomainSet'
if name.startswith(domainset_prefix):
l = len(domainset_prefix)
return name[l:]
tokens = name.split()
if tokens: # checks if tokens is empty
return tokens[-1]
return "BAD_VALUE"
def write_output_section(manager, categories, out):
'''
Writes output section, which is "custom" because spans multiple attributes
'''
out.write('\n')
out.write(' # Output options\n')
# This is awkward - must put keyword as last item in the list, instead
# of Item name, because format_table[] is set up that way
# TODO Redo format table to put Item name first?
write_item(manager, categories, out, 'Output', 'FieldOutput', 'type') # pltype
#write_item(manager, categories, out, 'Output', 'RestartOutput', 'type') # filetype
write_item(manager, categories, out, 'Output', 'type') # filetype
write_item(manager, categories, out, 'Output', 'FieldOutput', 'frequency') # plti
write_item(manager, categories, out, 'StatusInformation', 'minmaxfrequency') # ttyi
write_item(manager, categories, out, 'StatusInformation', 'tifrequency') # thti
#write_item(manager, categories, out, 'StatusInformation', 'PrintLevel') # prtlev
# Because PrintLevel has conditional children, use write_item_tree() method
# Suggests some better refactoring of write_item() and write_item_tree()
item = find_item(manager, 'StatusInformation', 'PrintLevel')
if item.isMemberOf(categories):
item_config = find_item_config('StatusInformation', 'PrintLevel')
format_string = ' %s %s\n'
write_item_tree(item, item_config, format_string, out)
write_item(manager, categories, out, 'Output', 'RestartOutput', 'frequency') # dump
return True
def write_turbulence_section(manager, categories, out):
'''
Writes turbulence section for AdvancedTurbulenceModel attribute
Hydra-TH format is slightly nonstandard
'''
att_type = 'BasicTurbulenceModel'
turb_att_list = manager.findAttributes(att_type)
if len(turb_att_list) < 1:
return True
item_format_list = format_table.get(att_type)
if item_format_list is None:
print 'WARNING: No format info for', att_type
return False
attribute = turb_att_list[0] # there should only be a single instance of this attribute
if not attribute.isMemberOf(categories):
return True
item = attribute.find("Method")
if item is None:
return False
if not item.isEnabled():
return True
item = smtk.attribute.to_concrete(item)
out.write('\n')
if item.value(0) in ["WALE", "rng_ke", "smagorinsky"]:
format_string = ' %s %s\n'
for turb_att in turb_att_list:
out.write('\n')
for item_config in item_format_list:
item = turb_att.find(item_config.name)
if item is None:
continue
write_item_tree(item, item_config, format_string, out, indent=' ')
out.write(' end\n')
else:
out.write(' tmodel %s\n' % item.value(0))
return True
def write_plotvar_section(manager, categories, out, name):
'''
Writes plotvar section for [Node/Elem/SideSet]PlotVarOutput attributes
'''
config = {
'plotvar': ('NodePlotVarOutput', 'ElemPlotVarOutput', 'SideSetPlotVarOutput'),
'plotstatvar': ('NodeTempStatVarOutput', 'ElemTempStatVarOutput', 'SideSetTempStatVarOutput'),
}
node_att_list = manager.findAttributes(config[name][0])
elem_att_list = manager.findAttributes(config[name][1])
ss_att_list = manager.findAttributes(config[name][2])
if len(node_att_list) + len(elem_att_list) + len(ss_att_list) < 1:
return True
if name == 'plotstatvar':
out.write('\n')
out.write(' statistics\n')
plotstatvaratt = manager.findAttributes('TempStatVarStatistics')[0]
itemlabels = ['starttime', 'endtime', 'plotwinsize']
groupitem = plotstatvaratt.find('TemporalStatistics')
var_groupitem = smtk.attribute.to_concrete(groupitem)
for i in range(len(itemlabels)):
item = var_groupitem.item(i)
var_item = smtk.attribute.to_concrete(item)
out.write(' %s %s\n' % (itemlabels[i],var_item.value(0)) )
out.write(' end\n')
out.write('\n')
out.write(' %s\n' % name)
types = [ 'node', 'elem']
lists = [ node_att_list, elem_att_list]
# Create list of (type, varname) tuples
ne_tlist = list()
for i in range(len(lists)):
plot_type = types[i]
current_list = lists[i]
for att in current_list:
item = att.find('varname')
var_item = smtk.attribute.to_concrete(item)
t = (plot_type, var_item.value(0))
ne_tlist.append(t)
ne_tlist.sort()
for t in ne_tlist:
out.write(' %s %s\n' % t)
# Create list of (type, sideset id, varname) tuples
ss_tlist = list()
for att in ss_att_list:
item = att.find('varname')
var_item = smtk.attribute.to_concrete(item)
entities = att.associatedEntities()
for entity in entities:
#out.write(' block %s\n' % get_id_from_name(entity.name()))
t = ('side ', get_id_from_name(entity.name()), var_item.value(0))
ss_tlist.append(t)
ss_tlist.sort()
for t in ss_tlist:
out.write(' %s %s %s\n' % t)
out.write(' end\n')
return True
def write_histvar_section(manager, categories, out):
'''
Writes histvar section for [Node/Elem/SideSet]HistVarOutput attributes
'''
node_att_list = manager.findAttributes('NodeHistVarOutput')
elem_att_list = manager.findAttributes('ElemHistVarOutput')
ss_att_list = manager.findAttributes('SideSetHistVarOutput')
if len(node_att_list) + len(elem_att_list) + len(ss_att_list) < 1:
return True
out.write('\n')
out.write(' histvar\n')
types = [ 'node', 'elem']
lists = [ node_att_list, elem_att_list]
# Create list of (type, varname) tuples
ne_tlist = list()
for i in range(len(lists)):
plot_type = types[i]
for att in lists[i]:
item = att.find('varname')
var_item = smtk.attribute.to_concrete(item)
item = att.find('Id')
id_item = smtk.attribute.to_concrete(item)
t = (plot_type, id_item.value(0), var_item.value(0))
ne_tlist.append(t)
ne_tlist.sort()
for t in ne_tlist:
out.write(' %s %s %s\n' % t)
# Create list of (type, sideset id, varname) tuples
ss_tlist = list()
for att in ss_att_list:
item = att.find('varname')
var_item = smtk.attribute.to_concrete(item)
entities = att.associatedEntities()
for entity in entities:
#out.write(' block %s\n' % get_id_from_name(entity.name()))
t = ('side ', get_id_from_name(entity.name()), var_item.value(0))
ss_tlist.append(t)
ss_tlist.sort()
for t in ss_tlist:
out.write(' %s %s %s\n' % t)
out.write(' end\n')
return True
def write_hydrostat_section(manager, categories, out):
'''
Writes hydrostat section
'''
att_list = manager.findAttributes('hydrostat')
if not att_list:
print 'WARNING - expected hydrostat attribute'
return False
att = att_list[0]
if not att.isMemberOf(categories):
return True
item = att.find('Hydrostat')
if item is None:
return False
if not item.isEnabled():
return True
out.write('\n')
out.write(' # Hydrostatic pressure\n')
out.write(' hydrostat\n')
group_item = smtk.attribute.to_concrete(item)
item = group_item.find('NodesetId')
nsid_item = smtk.attribute.to_concrete(item)
nsid = nsid_item.value(0)
lcid = -1
item = group_item.find('Value')
if item.isEnabled():
lcid_item = smtk.attribute.to_concrete(item)
lcid = get_loadcurve_id(lcid_item)
item = group_item.find('Scale')
scale_item = smtk.attribute.to_concrete(item)
scale = scale_item.value(0)
output = ' nodesetid %d %d %s\n' % (nsid, lcid, scale)
out.write(output)
out.write(' end\n')
def write_bc_section(manager, section_config, categories, out):
'''
Writes boundary condition section
Most have the same general format
'''
att_list = manager.findAttributes(section_config.attribute_type)
if len(att_list) < 1:
return True
out.write('\n')
out.write(' %s\n' % section_config.title)
for att in att_list:
if not att.isMemberOf(categories):
continue
ent_set = att.associatedEntities()
# TODO sort by sideset number (is this a UserData thing?)
for ent in ent_set:
sideset = get_id_from_name(ent.name())
item = att.find('LoadCurve')
lcid = get_loadcurve_id(item)
if lcid is None:
lcid = -1
# TODO use format table? Only for non-standard form
item = att.find('Scale')
scale_item = smtk.attribute.to_concrete(item)
scale = get_item_value(scale_item)
out.write(' sideset %s %d %s\n' % (sideset, lcid, scale))
out.write(' end\n')
return True
def write_distance_section(manager, categories, out):
'''
Writes distance section using both Wall and Penetration attributes
'''
wlist = manager.findAttributes('Wall')
plist = manager.findAttributes('Penetration')
if (len(wlist) + len(plist)) < 1:
return True
out.write('\n')
out.write(' distance\n')
# Write walls first (no load curve, zero scale)
for att in wlist:
if not att.isMemberOf(categories):
continue
ent_set = att.associatedEntities()
# TODO sort by sideset number
for ent in ent_set:
sideset = get_id_from_name(ent.name())
out.write(' sideset %s -1 0.0\n' % sideset)
# Then write penetration atts
for att in plist:
ent_set = att.associatedEntities()
# TODO sort by sideset number
for ent in ent_set:
sideset = get_id_from_name(ent.name())
item = att.find('LoadCurve')
lcid = get_loadcurve_id(item)
if lcid is None:
lcid = -1
item = att.find('Scale')
scale_item = smtk.attribute.to_concrete(item)
scale = get_item_value(scale_item)
out.write(' sideset %s %d %s\n' % (sideset, lcid, scale))
out.write(' end\n')
return True
def write_velocity_section(manager, categories, out):
'''
Writes velocity boundary conditions as a custom section
Calls generic write_vector_bc_section() function
TODO - migrate to section_table
'''
config = ConfigData()
config.section_title = 'velocity'
config.attribute_types_labels = (
('VelXBoundaryCondition', 'velx'),
('VelYBoundaryCondition', 'vely'),
('VelZBoundaryCondition', 'velz')
)
return write_vector_bc_section(manager, config, categories, out)
def write_vector_bc_section(manager, config, categories, out):
'''
Internal method for writing multiple/labeled BCs in same section
'''
# Traverse all att types to generate dictionary of association info
bc_dict = dict() # key = sset number, value = list(att1, att2, ...)
for att_type, label in config.attribute_types_labels:
att_list = manager.findAttributes(att_type)
for att in att_list:
if not att.isMemberOf(categories):
continue
ent_set = att.associatedEntities()
for ent in ent_set:
sideset = get_id_from_name(ent.name())
ent_att_list = bc_dict.get(sideset)
if ent_att_list is None:
ent_att_list = list()
bc_dict[sideset] = ent_att_list
ent_att_list.append(att)
# Check that at least one attribute was found
if len(bc_dict) < 1:
return True
# Create dictionary of <attribute type, label>
label_dict = dict()
for att_type, label in config.attribute_types_labels:
label_dict[att_type] = label
out.write('\n')
out.write(' %s\n' % config.section_title)
# Traverse bc_dict to write output, sorted by sideset number
sideset_list = sorted(bc_dict.keys())
for sideset in sideset_list:
ent_att_list = bc_dict.get(sideset)
for att in ent_att_list:
label = label_dict.get(att.type())
item = att.find('LoadCurve')
lcid = get_loadcurve_id(item)
if lcid is None:
lcid = -1
item = att.find('Scale')
double_item = smtk.attribute.to_concrete(item)
scale = get_item_value(double_item)
out.write(' %s sideset %s %d %s\n' % \
(label, sideset, lcid, scale))
out.write(' end\n')
return True
def write_initial_conditions_section(manager, categories, out):
'''
Writes initial conditions section
Has custom logic for different turbulence models
'''
att_type = 'InitialConditions'
att_list = manager.findAttributes(att_type)
att = att_list[0]
if not att.isMemberOf(categories):
True
item = att.find('InitialConditions')
group_item = smtk.attribute.to_concrete(item)
# Get the turbulence model
turb_att_list = manager.findAttributes('BasicTurbulenceModel')
turb_att = turb_att_list[0]
item = turb_att.find('Method')
turb_method_item = smtk.attribute.to_concrete(item)
if turb_method_item.isEnabled():
turb_method = turb_method_item.value(0)
else:
turb_method = None
out.write('\n')
out.write(' # Initial Conditions\n')
out.write(' initial\n')
format_string = ' %s %s\n' # for individual items
# Traverse items in format table
format_list = format_table.get(att.type())
if format_list is None:
print 'WARNING: empty format list for %s' % att.type()
return False
for item_config in format_list:
item = group_item.find(item_config.name)
if item is None:
#print 'WARNING: No %s item found' % item_config.name
continue
if not item.isMemberOf(categories):
continue
# Filter turbulence ICs based on turb_method
if item_config.name == 'tke' and turb_method not in ['rng_ke', 'sst_kw']:
continue
elif item_config.name == 'itdr' and turb_method != 'rng_ke':
continue
elif item_config.name == 'itds' and turb_method != 'sst_kw':
continue
elif item_config.name == 'tv' and turb_method not in ['spalart_allmaras', 'spalart_allmaras_des']:
continue
concrete_item = smtk.attribute.to_concrete(item)
if isinstance(item_config.keyword, (list, tuple)):
# TODO Check that Item has enough values
for i in range(len(item_config.keyword)):
value = get_item_value(concrete_item, i)
out.write(format_string % (item_config.keyword[i], value))
else:
value = get_item_value(concrete_item)
out.write(format_string % (item_config.keyword, value))
out.write(' end\n')
return True
def write_body_force_section(manager, categories, out):
'''
Write the body_force (gravity source), heat_source, boussinesqforce and porous_drag
section of the cntl file. If no domain set is associated with a body force
then it is associated with all domain sets with a -1 for the set id. If there
are other attributes of the same type that are associated with a domain set then
the unassociated domain set is not written out.
'''
att_types = ['GravityForce', 'BoussinesqForce', 'porous_drag', 'HeatSource']
for att_type in att_types:
att_list = manager.findAttributes(att_type)
if not att_list:
continue
# Traverse once to find any unassociated attributes
# These become "default" value for domain sets
unassociated_att = None
for att in att_list:
if not att.isMemberOf(categories):
continue
if 0 == att.numberOfAssociatedEntities():
if unassociated_att:
msg = 'WARNING: more than one unassociated %s attribute.' % \
att_type
msg += ' Using \"%s\" and ignoring \"%s\"' % \
(unassociated_att.name(), att.name())
print msg
unassociated_att = att
# Traverse again to actually write the output.
# Keep track of which domains get output.
have_associated_entities = False
for att in att_list:
entities = att.associatedEntities()
for entity in entities:
write_body_force(att, entity, out)
have_associated_entities = True
# Write default values for unassociated domain sets
if unassociated_att is not None:
if have_associated_entities:
msg = 'WARNING: Cannot write body force %s for unassociated attribute %s.' % \
(unassociated_att.type(), unassociated_att.name())
msg += ' This is because there exists an associated body force attribute that would conflict.'
print msg
else:
write_body_force(att, None, out)
def write_body_force(att, entity, out):
'''Writes body force for one attribute-entity pair.
'''
#print 'Writing', att.type(), 'for', entity.name()
att_keywords = {'GravityForce' : ['fx', 'fy', 'fz'],
'BoussinesqForce' : ['gx', 'gy', 'gz'],
'porous_drag' : ['amp'],
'HeatSource' : ['Q'] }
att_cards = {'GravityForce' : 'body_force',
'BoussinesqForce' : 'boussinesqforce',
'porous_drag' : 'porous_drag',
'HeatSource' : 'heat_source' }
att_type = att.type()
out.write('\n')
out.write(' %s\n' % att_cards[att_type])
if entity:
out.write(' set %s\n' % get_id_from_name(entity.name()))
else:
out.write(' set -1\n') # all entities
# Load curve item has same name as attribute (our policy)
item = att.find(att_type)
loadcurve_id = get_loadcurve_id(item)
if loadcurve_id is not None:
out.write(' lcid %d\n' % loadcurve_id)
# Value is 'Scale' item
keywords = att_keywords[att_type]
item = att.find('Scale')
double_item = smtk.attribute.to_concrete(item)
for i, keyword in enumerate(keywords):
value = double_item.value(i)
out.write(' %s %f\n' % (keyword, value))
out.write(' end\n')
def write_item(manager, categories, out, attribute_type, *item_names):
'''
Used for custom sections, retrieves and writes one item
The code traverses the manager and format_table in lockstep
'''
item = find_item(manager, attribute_type, *item_names)
if item is None:
print 'Item %s:%s not found' % (attribute_type, '/'.join(item_names))
return False
# Check categories
if not item.isMemberOf(categories):
return
item_config = find_item_config(attribute_type, *item_names)
if item_config is None:
print 'Format for Item %s:%s not found' % \
(attribute_type, '/'.join(item_names))
return False
out.write(' %s %s\n' % (item_config.keyword, item.value(0)))
def write_item_tree(item, item_config, format_string, out, indent=None):
'''
Writes item plus any conditional children (recursively)
'''
item = smtk.attribute.to_concrete(item)
# Keyword may be single string *or* list of strings
if isinstance(item_config.keyword, (list, tuple)):
# TODO Check that Item has enough values
for i in range(len(item_config.keyword)):
value = get_item_value(concrete_item, i)
out.write(format_string % (item_config.keyword[i], value))
else:
value = get_item_value(item)
out.write(format_string % (item_config.keyword, value))
# Process any conditional children
if item_config.item_format_list is not None:
if indent is not None:
format_string = '%s%s' % (indent, format_string)
# Construct dictionary of subitems by name
subitem_dict = dict()
num_subitems = item.numberOfActiveChildrenItems()
for i in range(num_subitems):
subitem = item.activeChildItem(i)
subitem_dict[subitem.name()] = subitem
# Traverse subitem config instances
for subitem_config in item_config.item_format_list:
subitem = subitem_dict.get(subitem_config.name)
if subitem is None:
continue
write_item_tree(subitem, subitem_config, format_string, out)
materialCounter = 1 # acbauer -- global counter for materials
materialSetCounter = 1 # acbauer -- global counter for material sets
def write_section(manager, section_config, categories, out):
'''
Writes one section of output file
Returns boolean reflecting success/fail
'''
if section_config.type == 'custom':
custom_dict = {
#'distance': write_distance_section,
'output': write_output_section,
'turbulence': write_turbulence_section,
'histvar': write_histvar_section,
'hydrostat': write_hydrostat_section,
'velocity': write_velocity_section,
'InitialConditions': write_initial_conditions_section,
'BodyForce': write_body_force_section
}
f = custom_dict.get(section_config.section_name)
if f:
return f(manager, categories, out)
plotvar_dict = {
'plotvar': write_plotvar_section,
'plotstatvar': write_plotvar_section
}
f = plotvar_dict.get(section_config.section_name)
if f is None:
print 'WARNING - Cannot find custom function for %s' % \
section_config.section_name
return False
return f(manager, categories, out, section_config.section_name)
elif section_config.type == 'boundary_condition':
return write_bc_section(manager, section_config, categories, out)
att_list = manager.findAttributes(section_config.attribute_type)
if not att_list:
print 'WARNING - NO %s attribute found' % section_config.attribute_type
return False
elif len(att_list) > 1:
print 'WARNING - Found %d attributes of type %s - using first one' % \
(len(att_list), section_config.attribute_type)
att = att_list[0]
# If not in categories, don't write the section
if not att.isMemberOf(categories):
return True
parent = att
if section_config.group_name is not None:
group = att.find(section_config.group_name)
if group is None:
print 'WARNING - NO %s group item found' % section_config.group_name
return False
parent = smtk.attribute.to_concrete(group)
out.write('\n')
if section_config.comment is not None:
out.write(' # %s\n' % section_config.comment)
format_string = ' %s %s\n'
if section_config.title is not None:
out.write(' %s\n' % section_config.title)
format_string = ' ' + format_string
# Special logic for material models (id)
if att.type() == 'Material':
global materialCounter
matstring = ' id ' + str(materialCounter) + '\n'
att.materialId = materialCounter
materialCounter = materialCounter + 1
out.write(matstring)
format_list = format_table.get(section_config.attribute_type)
if format_list is None:
print 'WARNING: empty format list for %s' % att.type()
return False
for item_config in format_list:
item = parent.find(item_config.name)
if item is None:
print 'WARNING: No %s item found' % item_config.name
continue
# Check item categories
if not att.isMemberOf(categories):
continue
write_item_tree(item, item_config, format_string, out)
if section_config.title is not None:
out.write(' end\n')
# Special logic for materialset -- hacked by acbauer
if att.type() == 'Material':
global materialSetCounter
out.write('\n materialset\n')
out.write(' id %i\n' % materialSetCounter)
materialSetCounter = materialSetCounter+1
out.write(' material %i\n' % att.materialId)
entities = att.associatedEntities()
for entity in entities:
out.write(' block %s\n' % get_id_from_name(entity.name()))
out.write(' end\n')
return True
def get_item_value(item, index=0):
'''
Returns Item value, handling VoidItem as a special case
'''
value = None
if item.type() == smtk.attribute.Item.VOID:
value = 'on' if item.isEnabled() else 'off'
else:
value = item.value(index)
return value
def find_item(manager, attribute_type, *item_names):
'''
Finds and returns Item specified by attribute and list (path) of ancestor Items
'''
# Get attribute
att_list = manager.findAttributes(attribute_type)
if not att_list:
return None
elif len(att_list) > 1:
print 'WARNING - Found %d attributes of type %s - using first one' % \
(len(att_list), attribute_type)
att = att_list[0]
parent = att
# Get items in sequence
for item_name in item_names:
item = parent.find(item_name)
if item is None:
return None
parent = smtk.attribute.to_concrete(item)
# Need to check that parent is Group?
return parent
def find_item_config(attribute_type, *item_names):
'''
Finds and returns Item config from format_table
'''
#print 'attribute_type', attribute_type, 'item_names', item_names
config_list = format_table.get(attribute_type)
if config_list is None:
return None
#print 'config_list', config_list
# Traverse item_names in sequence
matching_config = None
for item_name in item_names:
# Traverse format_table[] for current item_name
matching_config = None
for config in config_list:
#print 'config', config.__dict__
if config.name == item_name:
matching_config = config
break
if matching_config is None:
return None
elif hasattr(matching_config, 'item_format_list'):
config_list = matching_config.item_format_list
return matching_config
def get_loadcurve_id(item):
'''Returns load curve id for smtk.attribute.DoubleItem
Uses global lcid_dictionary.
'''
if item is None or not item.isEnabled():
return None
double_item = smtk.attribute.to_concrete(item)
if double_item is None:
print 'ERROR: %s item is not DoubleItem' % item.name()
return None
if not double_item.isExpression(0):
print 'WARNING: %s item is not expression' % item.name()
return None
if not double_item.isSet(0):
return None
exp_ref = double_item.expressionReference(0)
exp_att = exp_ref.value(0)
name = exp_att.name()
global lcid_dictionary
lcid = lcid_dictionary.get(name)
if lcid is None:
lcid = len(lcid_dictionary) + 1
print 'Assign lcid %d to function \"%s\"' % (lcid, name)
lcid_dictionary[name] = lcid
return lcid
def write_load_curves(manager, out):
'''Writes all load curves in the lcid_dictionary.
'''
# Sort by id (dictionary value)
lc_tuples = sorted(lcid_dictionary.items(), key=lambda t:t[1])
for name, lcid in lc_tuples:
att = manager.findAttribute(name)
out.write('\n')
out.write(' # Load Curve \"%s\"\n' % name)
out.write(' load_curve\n')
out.write(' id %s\n' % lcid)
val_pairs_item = att.find('ValuePairs')
val_pairs_group = smtk.attribute.GroupItem.CastTo(val_pairs_item)
x_item = val_pairs_group.find('X')
x_item = smtk.attribute.DoubleItem.CastTo(x_item)
val_item = val_pairs_group.find('Value')
val_item = smtk.attribute.DoubleItem.CastTo(val_item)
num_vals = x_item.numberOfValues()
for i in range(num_vals):
out.write(' %.10e %.10e\n' % (x_item.value(i), val_item.value(i)))
out.write(' end\n')
|
|
import urllib2
import re
from bs4 import BeautifulSoup
from urlparse import urljoin
from pysqlite2 import dbapi2 as sqlite
import nn
mynet = nn.searchnet('nn.db')
# Create a list of words to ignore
ignorewords = {'the': 1, 'of': 1, 'to': 1,
'and': 1, 'a': 1, 'in': 1, 'is': 1, 'it': 1}
class crawler:
# Initialize the crawler with the name of database
def __init__(self, dbname):
self.con = sqlite.connect(dbname, timeout=10)
def __del__(self):
self.con.close()
def dbcommit(self):
self.con.commit()
# Auxilliary function for getting an entry id and adding
# it if it's not present
def getentryid(self, table, field, value, createnew=True):
cur = self.con.execute(
"select rowid from %s where %s='%s'" % (table, field, value))
res = cur.fetchone()
if res == None:
cur = self.con.execute(
"insert into %s (%s) values ('%s')" % (table, field, value))
return cur.lastrowid
else:
return res[0]
# Index an individual page
def addtoindex(self, url, soup):
if self.isindexed(url):
return
print 'Indexing ' + url
# Get the individual words
text = self.gettextonly(soup)
words = self.separatewords(text)
# Get the URL id
urlid = self.getentryid('urllist', 'url', url)
# Link each word to this url
for i in xrange(len(words)):
word = words[i]
if word in ignorewords:
continue
wordid = self.getentryid('wordlist', 'word', word)
self.con.execute(
"insert into wordlocation(urlid,wordid,location) values (%d,%d,%d)" % (urlid, wordid, i))
# Extract the text from an HTML page (no tags)
def gettextonly(self, soup):
v = soup.string
if v == None:
c = soup.contents
resulttext = ''
for t in c:
subtext = self.gettextonly(t)
resulttext += subtext + '\n'
return resulttext
else:
return v.strip()
# Seperate the words by any non-whitespace character
def separatewords(self, text):
splitter = re.compile('\\W*')
return [s.lower() for s in splitter.split(text) if s != '']
# Return true if this url is already indexed
def isindexed(self, url):
u = self.con.execute(
"select rowid from urllist where url='%s'" % url).fetchone()
if u != None:
v = self.con.execute(
"select * from wordlocation where urlid=%d" % u[0]).fetchone()
if v != None:
return True
return False
# Add a link between two pages
def addlinkref(self, urlFrom, urlTo, linkText):
words = self.separatewords(linkText)
fromid = self.getentryid('urllist', 'url', urlFrom)
toid = self.getentryid('urllist', 'url', urlTo)
if fromid == toid:
return
cur = self.con.execute(
"insert into link(fromid,toid) values (%d,%d)" % (fromid, toid))
linkid = cur.lastrowid
for word in words:
if word in ignorewords:
continue
wordid = self.getentryid('wordlist', 'word', word)
self.con.execute(
"insert into linkwords(linkid,wordid) values (%d,%d)" % (linkid, wordid))
# Starting with a list of pages, do a breadth
# first search to the given depth, indexing pages
# as we go
def crawl(self, pages, depth=2):
for i in xrange(depth):
newpages = set()
for page in pages:
try:
c = urllib2.urlopen(page)
except:
print "Could not open %s" % page
continue
try:
soup = BeautifulSoup(c.read())
self.addtoindex(page, soup)
links = soup('a')
for link in links:
if ('href' in dict(link.attrs)):
url = urljoin(page, link['href'])
if url.find("'") != -1:
continue
url = url.split('#')[0] # remove location portion
if url[0:4] == 'http' and not self.isindexed(url):
newpages.add(url)
linkText = self.gettextonly(link)
self.addlinkref(page, url, linkText)
self.dbcommit()
except Exception as e:
raise e
print "Could not parse page %s" % page
pages = newpages
# Create the database tables
def createindextables(self):
self.con.execute('create table urllist(url)')
self.con.execute('create table wordlist(word)')
self.con.execute('create table wordlocation(urlid,wordid,location)')
self.con.execute('create table link(fromid integer,toid integer)')
self.con.execute('create table linkwords(wordid,linkid)')
self.con.execute('create index wordidx on wordlist(word)')
self.con.execute('create index urlidx on urllist(url)')
self.con.execute('create index wordurlidx on wordlocation(wordid)')
self.con.execute('create index urltoidx on link(toid)')
self.con.execute('create index urlfromidx on link(fromid)')
self.dbcommit()
def calculatepagerank(self, iterations=20):
# clear out the current page rank tables
self.con.execute('drop table if exists pagerank')
self.con.execute('create table pagerank(urlid primary key,score)')
# initialize every url with a page rank of 1
self.con.execute('insert into pagerank select rowid, 1.0 from urllist')
self.dbcommit()
for i in xrange(iterations):
print "Iteration %d" % (i)
for (urlid,) in self.con.execute('select rowid from urllist'):
pr = 0.15
# Loop through all the pages that link to this one
for (linker,) in self.con.execute(
'select distinct fromid from link where toid=%d' % urlid):
# Get the page rank of the linker
linkingpr = self.con.execute(
'select score from pagerank where urlid=%d' % linker).fetchone()[0]
# Get the total number of links from the linker
linkingcount = self.con.execute(
'select count(*) from link where fromid=%d' % linker).fetchone()[0]
pr += 0.85 * (linkingpr / linkingcount)
self.con.execute(
'update pagerank set score=%f where urlid=%d' % (pr, urlid))
self.dbcommit()
class searcher:
def __init__(self, dbname):
self.con = sqlite.connect(dbname)
def __del__(self):
self.con.close()
def getmatchrows(self, q):
# Strings to build the query
fieldlist = 'w0.urlid'
tablelist = ''
clauselist = ''
wordids = []
# Split the words by spaces
words = q.split(' ')
tablenumber = 0
for word in words:
# Get the word ID
wordrow = self.con.execute(
"select rowid from wordlist where word='%s'" % word).fetchone()
if wordrow != None:
wordid = wordrow[0]
wordids.append(wordid)
if tablenumber > 0:
tablelist += ','
clauselist += ' and '
clauselist += 'w%d.urlid=w%d.urlid and ' % (
tablenumber - 1, tablenumber)
fieldlist += ',w%d.location' % tablenumber
tablelist += 'wordlocation w%d' % tablenumber
clauselist += 'w%d.wordid=%d' % (tablenumber, wordid)
tablenumber += 1
# Create the query from the separate parts
fullquery = 'select %s from %s where %s' % (
fieldlist, tablelist, clauselist)
print fullquery
cur = self.con.execute(fullquery)
rows = [row for row in cur]
return rows, wordids
def getscoredlist(self, rows, wordids):
totalscores = dict([(row[0], 0) for row in rows])
# This is where we'll put our scoring functions
weights = [(1.0, self.locationscore(rows)),
(1.0, self.frequencyscore(rows)),
(1.0, self.pagerankscore(rows)),
(1.0, self.linktextscore(rows, wordids)),
(5.0, self.nnscore(rows, wordids))]
for (weight, scores) in weights:
for url in totalscores:
totalscores[url] += weight * scores[url]
return totalscores
def geturlname(self, id):
return self.con.execute(
"select url from urllist where rowid=%d" % id).fetchone()[0]
def query(self, q):
rows, wordids = self.getmatchrows(q)
scores = self.getscoredlist(rows, wordids)
rankedscores = [(score, url) for (url, score) in scores.items()]
rankedscores.sort()
rankedscores.reverse()
for (score, urlid) in rankedscores[0:10]:
print '%f\t%s' % (score, self.geturlname(urlid))
return wordids, [r[1] for r in rankedscores[0:10]]
def normalizescores(self, scores, smallIsBetter=0):
vsmall = 0.00001 # Avoid division by zero errors
if smallIsBetter:
minscore = min(scores.values())
return dict([(u, float(minscore) / max(vsmall, l)) for (u, l) in scores.items()])
else:
maxscore = max(scores.values())
if maxscore == 0:
maxscore = vsmall
return dict([(u, float(c) / maxscore) for (u, c) in scores.items()])
def frequencyscore(self, rows):
counts = dict([(row[0], 0) for row in rows])
for row in rows:
counts[row[0]] += 1
return self.normalizescores(counts)
def locationscore(self, rows):
locations = dict([(row[0], 1000000) for row in rows])
for row in rows:
loc = sum(row[1:])
if loc < locations[row[0]]:
locations[row[0]] = loc
return self.normalizescores(locations, smallIsBetter=1)
def distancescore(self, rows):
# If there's only one word, everyone wins!
if len(rows[0]) <= 2:
return dict([(row[0], 1.0) for row in rows])
# Initialize the dictionary with large values
mindistance = dict([(row[0], 1000000) for row in rows])
for row in rows:
dist = sum([abs(row[i] - row[i - 1]) for i in xrange(2, len(row))])
if dist < mindistance[row[0]]:
mindistance[row[0]] = dist
return self.normalizescores(mindistance, smallIsBetter=1)
def inboundlinkscore(self, rows):
uniqueurls = dict([(row[0], 1) for row in rows])
inboundcount = dict([(u, self.con.execute(
'select count(*) from link where toid=%d' % u).fetchone()[0]) for u in uniqueurls])
return self.normalizescores(inboundcount)
def linktextscore(self, rows, wordids):
linkscores = dict([(row[0], 0) for row in rows])
for wordid in wordids:
cur = self.con.execute(
'select link.fromid,link.toid from linkwords,link where wordid=%d and linkwords.linkid=link.rowid' % wordid)
for (fromid, toid) in cur:
if toid in linkscores:
pr = self.con.execute(
'select score from pagerank where urlid=%d' % fromid).fetchone()[0]
linkscores[toid] += pr
maxscore = max(linkscores.values())
normalizedscores = dict([(u, float(l) / maxscore)
for (u, l) in linkscores.items()])
return normalizedscores
def pagerankscore(self, rows):
pageranks = dict([(row[0], self.con.execute(
'select score from pagerank where urlid=%d' % row[0]).fetchone()[0]) for row in rows])
maxrank = max(pageranks.values())
normalizedscores = dict([(u, float(l) / maxrank)
for (u, l) in pageranks.items()])
return normalizedscores
def nnscore(self, rows, wordids):
# Get unique URL IDs as an ordered list
urlids = [urlid for urlid in dict([(row[0], 1) for row in rows])]
nnres = mynet.getresult(wordids, urlids)
scores = dict([(urlids[i], nnres[i]) for i in xrange(len(urlids))])
return self.normalizescores(scores)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions to add support for magnitude-based model pruning.
# Adds variables and ops to the graph to enable
# elementwise masking of weights
apply_mask(weights)
# Returns a list containing the sparsity of each of the weight tensors
get_weight_sparsity()
# Returns a list of all the masked weight tensorflow variables
get_masked_weights()
# Returns a list of all the mask tensorflow variables
get_masks()
# Returns a list of all the thresholds
get_thresholds()
# Returns a list of all the weight tensors that have been masked
get_weights()
The Pruning class uses a proto (defined in pruning.proto) to set up the
parameters for a pruning specification. Here's a typical usage:
# Initialize a pruning spec from a proto
pruning_spec = '/tmp/pruning.pb'
p = Pruning(pruning_spec)
# Add mask update ops to the graph
mask_update_op = p.conditional_mask_update_op()
# Add the summaries
p.add_pruning_summaries()
# Run the op
session.run(mask_update_op)
# An object of the pruning also accepts externally defined sparsity:
sparsity = tf.Variable(0.5, name = "ConstantSparsity")
pruning_spec = '/tmp/pruning.pb'
p = Pruning(pruning_spec, sparsity=sparsity)
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.model_pruning.python.layers import core_layers as core
from tensorflow.contrib.training.python.training import hparam
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import training_util
_MASK_COLLECTION = core.MASK_COLLECTION
_THRESHOLD_COLLECTION = core.THRESHOLD_COLLECTION
_MASKED_WEIGHT_COLLECTION = core.MASKED_WEIGHT_COLLECTION
_WEIGHT_COLLECTION = core.WEIGHT_COLLECTION
_MASKED_WEIGHT_NAME = core.MASKED_WEIGHT_NAME
def _weight_mask_variable(var, scope):
"""Create a mask for the weights.
This function adds a variable 'mask' to the graph.
Args:
var: the weight variable that needs to be masked
scope: The variable scope of the variable var
Returns:
the mask variable of the same size and shape as var, initialized to all 1s.
"""
with variable_scope.variable_scope(scope):
mask = variable_scope.get_variable(
'mask',
var.get_shape(),
initializer=init_ops.ones_initializer(),
trainable=False,
dtype=var.dtype)
return mask
def _weight_threshold_variable(var, scope):
"""Create a scalar threshold for the weights.
This function adds a variable
'threshold' to the graph.
Args:
var: The weight variable that needs to be masked
scope: The variable scope of the variable var
Returns:
a scalar threshold variable initialized to 0.
"""
with variable_scope.variable_scope(scope):
threshold = variable_scope.get_variable(
'threshold', [],
initializer=init_ops.zeros_initializer(),
trainable=False,
dtype=var.dtype)
return threshold
def _histogram(values, value_range, nbins=100, dtype=np.int32, name=None):
"""Return histogram of values.
Given the tensor `values`, this operation returns a rank 1 histogram counting
the number of entries in `values` that fell into every bin. The bins are
equal width and determined by the arguments `value_range` and `nbins`.
Args:
values: Numeric `Tensor`.
value_range: Shape [2] `Tensor` of same `dtype` as `values`.
values <= value_range[0] will be mapped to hist[0],
values >= value_range[1] will be mapped to hist[-1].
nbins: Scalar `int32 Tensor`. Number of histogram bins.
dtype: dtype for returned histogram.
name: A name for this operation (defaults to 'histogram').
Returns:
A 1-D `Tensor` holding histogram of values.
"""
with ops.name_scope(name, 'histogram', [values, value_range, nbins]) as scope:
values = ops.convert_to_tensor(values, name='values')
values = gen_array_ops.reshape(values, [-1])
value_range = ops.convert_to_tensor(value_range, name='value_range')
nbins = ops.convert_to_tensor(nbins, dtype=np.int32, name='nbins')
nbins_float = math_ops.cast(nbins, values.dtype)
# Map tensor values that fall within value_range to [0, 1].
scaled_values = math_ops.truediv(
values - value_range[0],
value_range[1] - value_range[0],
name='scaled_values')
# map tensor values within the open interval value_range to {0,.., nbins-1},
# values outside the open interval will be zero or less, or nbins or more.
indices = math_ops.floor(nbins_float * scaled_values, name='indices')
# Clip edge cases (e.g. value = value_range[1]) or "outliers."
indices = math_ops.cast(
clip_ops.clip_by_value(indices, 0, nbins_float - 1), np.int32)
return math_ops.unsorted_segment_sum(
array_ops.ones_like(indices, dtype=dtype), indices, nbins, name=scope)
def _determine_partitioned_axis(partitioned_variable):
partitioned_axis = 0
concatenated_variable_shape = partitioned_variable.get_shape()
for partition in partitioned_variable:
partition_shape = partition.get_shape()
maybe_partitioned_axis = np.less(partition_shape,
concatenated_variable_shape)
# Sanity check: make sure number of partitioned axis == 1
if np.count_nonzero(maybe_partitioned_axis) != 1:
raise ValueError('Number of partitioned axes %s not equal to 1' %
np.count_nonzero(maybe_partitioned_axis))
partitioned_axis = np.where(maybe_partitioned_axis)[0][0]
return partitioned_axis
def _variable_assign(var, new_value):
return state_ops.assign(var, new_value, name=var.op.name + '_assign')
def _partitioned_variable_assign(partitioned_var, new_value):
"""Assign op for partitioned variables.
Args:
partitioned_var: A partitioned tensotflow variable
new_value: Value to be assigned to the variable var
Returns:
A tensorflow op that groups the assign ops for each of the variable slices
"""
# Determine which axis was used to partition the variable. Currently
# tensorflow allows partitioning variable only along 1 axis.
axis = 0 if len(partitioned_var) == 1 else _determine_partitioned_axis(
partitioned_var)
partition_sizes = np.array(
[partition.get_shape()[axis] for partition in partitioned_var])
new_partitioned_values = array_ops.split(
new_value,
ops.convert_to_tensor(partition_sizes, dtype=np.int32),
axis=axis)
op_list = []
for partition in partitioned_var:
op_list.append(
_variable_assign(partition, new_partitioned_values[len(op_list)]))
return control_flow_ops.group(
*op_list, name=partitioned_var.name + '_group_assign')
def apply_mask(x, scope=''):
"""Apply mask to a given weight tensor.
Args:
x: Input weight tensor
scope: The current variable scope. Defaults to ""
Returns:
Tensor representing masked_weights
"""
mask = _weight_mask_variable(x, scope)
threshold = _weight_threshold_variable(x, scope)
# Add masked_weights in the weights namescope so as to make it easier
# for the quantization library to add quant ops.
masked_weights = math_ops.multiply(mask, x, _MASKED_WEIGHT_NAME)
# Make sure the mask for a given variable are not added multiple times to the
# collection. This is particularly important when applying mask to RNN's
# weight variables
if mask not in ops.get_collection_ref(_MASK_COLLECTION):
ops.add_to_collection(_THRESHOLD_COLLECTION, threshold)
ops.add_to_collection(_MASK_COLLECTION, mask)
ops.add_to_collection(_MASKED_WEIGHT_COLLECTION, masked_weights)
ops.add_to_collection(_WEIGHT_COLLECTION, x)
return masked_weights
def get_masked_weights():
return ops.get_collection(_MASKED_WEIGHT_COLLECTION)
def get_masks():
return ops.get_collection(_MASK_COLLECTION)
def get_thresholds():
return ops.get_collection(_THRESHOLD_COLLECTION)
def get_weights():
return ops.get_collection(_WEIGHT_COLLECTION)
def get_weight_sparsity():
"""Get sparsity of the weights.
Args:
None
Returns:
A list containing the sparsity of each of the weight tensors
"""
masks = get_masks()
return [nn_impl.zero_fraction(mask) for mask in masks]
def get_pruning_hparams():
"""Get a tf.HParams object with the default values for the hyperparameters.
name: string
name of the pruning specification. Used for adding summaries and ops under
a common tensorflow name_scope
begin_pruning_step: integer
the global step at which to begin pruning
end_pruning_step: integer
the global step at which to terminate pruning. Defaults to -1 implying
that pruning continues till the training stops
do_not_prune: list of strings
list of layers that are not pruned
threshold_decay: float
the decay factor to use for exponential decay of the thresholds
pruning_frequency: integer
How often should the masks be updated? (in # of global_steps)
nbins: integer
number of bins to use for histogram computation
initial_sparsity: float
initial sparsity value
target_sparsity: float
target sparsity value
sparsity_function_begin_step: integer
the global step at this which the gradual sparsity function begins to
take effect
sparsity_function_end_step: integer
the global step used as the end point for the gradual sparsity function
sparsity_function_exponent: float
exponent = 1 is linearly varying sparsity between initial and final.
exponent > 1 varies more slowly towards the end than the beginning
We use the following sparsity function:
num_steps = (sparsity_function_end_step -
sparsity_function_begin_step)/pruning_frequency
sparsity(step) = (initial_sparsity - target_sparsity)*
[1-step/(num_steps -1)]**exponent + target_sparsity
Args:
None
Returns:
tf.HParams object initialized to default values
"""
return hparam.HParams(
name='model_pruning',
begin_pruning_step=0,
end_pruning_step=-1,
do_not_prune=[''],
threshold_decay=0.9,
pruning_frequency=10,
nbins=255,
initial_sparsity=0,
target_sparsity=0.5,
sparsity_function_begin_step=0,
sparsity_function_end_step=100,
sparsity_function_exponent=3)
class Pruning(object):
def __init__(self, spec=None, global_step=None, sparsity=None):
"""Set up the specification for model pruning.
If a spec is provided, the sparsity is set up based on the sparsity_function
in the spec. The effect of sparsity_function is overridden if the sparsity
variable is passed to the constructor. This enables setting up arbitrary
sparsity profiles externally and passing it to this pruning functions.
Args:
spec: Pruning spec as defined in pruning.proto
global_step: A tensorflow variable that is used while setting up the
sparsity function
sparsity: A tensorflow scalar variable storing the sparsity
"""
# Pruning specification
self._spec = spec if spec else get_pruning_hparams()
# A tensorflow variable that tracks the sparsity function.
# If not provided as input, the graph must already contain the global_step
# variable before calling this constructor.
self._global_step = self._setup_global_step(global_step)
# Stores the tensorflow sparsity variable.
# Built using self._setup_sparsity() or provided externally
self._sparsity = sparsity if sparsity else self._setup_sparsity()
# List of tensorflow assignments ops for new masks and thresholds
self._assign_ops = []
# Tensorflow variable keeping track of the last global step when the masks
# were updated
self._last_update_step = self._setup_last_update_step()
def _setup_global_step(self, global_step):
graph_global_step = global_step
if graph_global_step is None:
graph_global_step = training_util.get_global_step()
return math_ops.cast(graph_global_step, np.int32)
def _setup_sparsity(self):
begin_step = self._spec.sparsity_function_begin_step
end_step = self._spec.sparsity_function_end_step
initial_sparsity = self._spec.initial_sparsity
target_sparsity = self._spec.target_sparsity
exponent = self._spec.sparsity_function_exponent
if begin_step >= end_step:
raise ValueError(
'Pruning must begin before it can end. begin_step=%d, end_step=%d' %
(begin_step, end_step))
with ops.name_scope(self._spec.name):
p = math_ops.minimum(1.0,
math_ops.maximum(
0.0,
math_ops.div(
math_ops.cast(self._global_step - begin_step,
np.float32),
end_step - begin_step)))
sparsity = math_ops.add(
math_ops.multiply(initial_sparsity - target_sparsity,
math_ops.pow(1 - p, exponent)),
target_sparsity,
name='sparsity')
return sparsity
def _setup_last_update_step(self):
with variable_scope.variable_scope(self._spec.name) as scope:
try:
last_update_step = variable_scope.get_variable(
'last_mask_update_step', [],
initializer=init_ops.zeros_initializer(),
trainable=False,
dtype=np.int32)
except ValueError:
scope.reuse_variables()
last_update_step = variable_scope.get_variable(
'last_mask_update_step', dtype=np.int32)
return last_update_step
def _exists_in_do_not_prune_list(self, tensor_name):
do_not_prune_list = self._spec.do_not_prune
if not do_not_prune_list[0]:
return False
for layer_name in do_not_prune_list:
if tensor_name.find(layer_name) != -1:
return True
return False
def _update_mask(self, weights, threshold):
"""Updates the mask for a given weight tensor.
This functions first computes the cdf of the weight tensor, and estimates
the threshold value such that 'desired_sparsity' fraction of weights
have magnitude less than the threshold.
Args:
weights: The weight tensor that needs to be masked.
threshold: The current threshold value. The function will compute a new
threshold and return the exponential moving average using the current
value of threshold
Returns:
new_threshold: The new value of the threshold based on weights, and
desired_sparsity
new_mask: A n-D numpy array containing 0 or 1 to indicate which of the
values in weights falls below the threshold
Raises:
ValueError: if sparsity is not defined
"""
if self._sparsity is None:
raise ValueError('Sparsity variable undefined')
with ops.name_scope(weights.op.name + '_pruning_ops'):
abs_weights = math_ops.abs(weights)
max_value = math_ops.reduce_max(abs_weights)
histogram = _histogram(
abs_weights, [0.0, max_value],
nbins=self._spec.nbins,
dtype=np.float32)
cdf = math_ops.cumsum(histogram)
norm_cdf = math_ops.div(cdf, math_ops.reduce_sum(histogram))
current_threshold = math_ops.multiply(
math_ops.div(
math_ops.reduce_sum(
math_ops.cast(
math_ops.less(norm_cdf, self._sparsity), np.float32)),
float(self._spec.nbins)), max_value)
smoothed_threshold = math_ops.add_n([
math_ops.multiply(current_threshold, 1 - self._spec.threshold_decay),
math_ops.multiply(threshold, self._spec.threshold_decay)
])
new_mask = math_ops.cast(
math_ops.greater(abs_weights, smoothed_threshold), np.float32)
return smoothed_threshold, new_mask
def _get_mask_assign_ops(self):
# Make sure the assignment ops have not already been added to the list
if self._assign_ops:
raise ValueError(
'Assign op list not empty. _get_mask_assign_ops() called twice?')
masks = get_masks()
weights = get_weights()
thresholds = get_thresholds()
if len(masks) != len(thresholds):
raise ValueError(
'Number of masks %s and number of thresholds %s mismatch' %
(len(masks), len(thresholds)))
for index, mask in enumerate(masks):
threshold = thresholds[index]
weight = weights[index]
is_partitioned = isinstance(weight, variables.PartitionedVariable)
if is_partitioned:
weight = weight.as_tensor()
if self._spec.do_not_prune:
if self._exists_in_do_not_prune_list(mask.name):
continue
new_threshold, new_mask = self._update_mask(weight, threshold)
self._assign_ops.append(_variable_assign(threshold, new_threshold))
self._assign_ops.append(
_partitioned_variable_assign(mask, new_mask)
if is_partitioned else _variable_assign(mask, new_mask))
def mask_update_op(self):
with ops.name_scope(self._spec.name):
if not self._assign_ops:
self._get_mask_assign_ops()
with ops.control_dependencies([
state_ops.assign(
self._last_update_step,
self._global_step,
name='last_mask_update_step_assign')
]):
with ops.control_dependencies(self._assign_ops):
logging.info('Updating masks.')
return control_flow_ops.no_op('mask_update')
def conditional_mask_update_op(self):
def maybe_update_masks():
with ops.name_scope(self._spec.name):
is_step_within_pruning_range = math_ops.logical_and(
math_ops.greater_equal(self._global_step,
self._spec.begin_pruning_step),
# If end_pruning_step is negative, keep pruning forever!
math_ops.logical_or(
math_ops.less_equal(self._global_step,
self._spec.end_pruning_step),
math_ops.less(self._spec.end_pruning_step, 0)))
is_pruning_step = math_ops.less_equal(
math_ops.add(self._last_update_step, self._spec.pruning_frequency),
self._global_step)
return math_ops.logical_and(is_step_within_pruning_range,
is_pruning_step)
def mask_update_op():
return self.mask_update_op()
def no_update_op():
return control_flow_ops.no_op()
return control_flow_ops.cond(maybe_update_masks(), mask_update_op,
no_update_op)
def add_pruning_summaries(self):
"""Adds summaries for this pruning spec.
Args: none
Returns: none
"""
with ops.name_scope(self._spec.name + '_summaries'):
summary.scalar('sparsity', self._sparsity)
summary.scalar('last_mask_update_step', self._last_update_step)
masks = get_masks()
thresholds = get_thresholds()
for index, mask in enumerate(masks):
if not self._exists_in_do_not_prune_list(mask.name):
summary.scalar(mask.name + '/sparsity', nn_impl.zero_fraction(mask))
summary.scalar(thresholds[index].op.name + '/threshold',
thresholds[index])
def print_hparams(self):
logging.info(self._spec.to_json())
|
|
#!/usr/bin/env python
#
# Copyright 2010 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
# This is an utility for plotting charts based on GC traces produced by V8 when
# run with flags --trace-gc --trace-gc-nvp. Relies on gnuplot for actual
# plotting.
#
# Usage: gc-nvp-trace-processor.py <GC-trace-filename>
#
from __future__ import with_statement
import sys, types, subprocess, math
import gc_nvp_common
def flatten(l):
flat = []
for i in l: flat.extend(i)
return flat
def gnuplot(script):
gnuplot = subprocess.Popen(["gnuplot"], stdin=subprocess.PIPE)
gnuplot.stdin.write(script)
gnuplot.stdin.close()
gnuplot.wait()
x1y1 = 'x1y1'
x1y2 = 'x1y2'
x2y1 = 'x2y1'
x2y2 = 'x2y2'
class Item(object):
def __init__(self, title, field, axis = x1y1, **keywords):
self.title = title
self.axis = axis
self.props = keywords
if type(field) is types.ListType:
self.field = field
else:
self.field = [field]
def fieldrefs(self):
return self.field
def to_gnuplot(self, context):
args = ['"%s"' % context.datafile,
'using %s' % context.format_fieldref(self.field),
'title "%s"' % self.title,
'axis %s' % self.axis]
if 'style' in self.props:
args.append('with %s' % self.props['style'])
if 'lc' in self.props:
args.append('lc rgb "%s"' % self.props['lc'])
if 'fs' in self.props:
args.append('fs %s' % self.props['fs'])
return ' '.join(args)
class Plot(object):
def __init__(self, *items):
self.items = items
def fieldrefs(self):
return flatten([item.fieldrefs() for item in self.items])
def to_gnuplot(self, ctx):
return 'plot ' + ', '.join([item.to_gnuplot(ctx) for item in self.items])
class Set(object):
def __init__(self, value):
self.value = value
def to_gnuplot(self, ctx):
return 'set ' + self.value
def fieldrefs(self):
return []
class Context(object):
def __init__(self, datafile, field_to_index):
self.datafile = datafile
self.field_to_index = field_to_index
def format_fieldref(self, fieldref):
return ':'.join([str(self.field_to_index[field]) for field in fieldref])
def collect_fields(plot):
field_to_index = {}
fields = []
def add_field(field):
if field not in field_to_index:
fields.append(field)
field_to_index[field] = len(fields)
for field in flatten([item.fieldrefs() for item in plot]):
add_field(field)
return (fields, field_to_index)
def is_y2_used(plot):
for subplot in plot:
if isinstance(subplot, Plot):
for item in subplot.items:
if item.axis == x1y2 or item.axis == x2y2:
return True
return False
def get_field(trace_line, field):
t = type(field)
if t is types.StringType:
return trace_line[field]
elif t is types.FunctionType:
return field(trace_line)
def generate_datafile(datafile_name, trace, fields):
with open(datafile_name, 'w') as datafile:
for line in trace:
data_line = [str(get_field(line, field)) for field in fields]
datafile.write('\t'.join(data_line))
datafile.write('\n')
def generate_script_and_datafile(plot, trace, datafile, output):
(fields, field_to_index) = collect_fields(plot)
generate_datafile(datafile, trace, fields)
script = [
'set terminal png',
'set output "%s"' % output,
'set autoscale',
'set ytics nomirror',
'set xtics nomirror',
'set key below'
]
if is_y2_used(plot):
script.append('set autoscale y2')
script.append('set y2tics')
context = Context(datafile, field_to_index)
for item in plot:
script.append(item.to_gnuplot(context))
return '\n'.join(script)
def plot_all(plots, trace, prefix):
charts = []
for plot in plots:
outfilename = "%s_%d.png" % (prefix, len(charts))
charts.append(outfilename)
script = generate_script_and_datafile(plot, trace, '~datafile', outfilename)
print 'Plotting %s...' % outfilename
gnuplot(script)
return charts
def reclaimed_bytes(row):
return row['total_size_before'] - row['total_size_after']
def other_scope(r):
if r['gc'] == 's':
# there is no 'other' scope for scavenging collections.
return 0
return r['pause'] - r['mark'] - r['sweep'] - r['external']
def scavenge_scope(r):
if r['gc'] == 's':
return r['pause'] - r['external']
return 0
def real_mutator(r):
return r['mutator'] - r['steps_took']
plots = [
[
Set('style fill solid 0.5 noborder'),
Set('style histogram rowstacked'),
Set('style data histograms'),
Plot(Item('Scavenge', scavenge_scope, lc = 'green'),
Item('Marking', 'mark', lc = 'purple'),
Item('Sweep', 'sweep', lc = 'blue'),
Item('External', 'external', lc = '#489D43'),
Item('Other', other_scope, lc = 'grey'),
Item('IGC Steps', 'steps_took', lc = '#FF6347'))
],
[
Set('style fill solid 0.5 noborder'),
Set('style histogram rowstacked'),
Set('style data histograms'),
Plot(Item('Scavenge', scavenge_scope, lc = 'green'),
Item('Marking', 'mark', lc = 'purple'),
Item('Sweep', 'sweep', lc = 'blue'),
Item('External', 'external', lc = '#489D43'),
Item('Other', other_scope, lc = '#ADD8E6'),
Item('External', 'external', lc = '#D3D3D3'))
],
[
Plot(Item('Mutator', real_mutator, lc = 'black', style = 'lines'))
],
[
Set('style histogram rowstacked'),
Set('style data histograms'),
Plot(Item('Heap Size (before GC)', 'total_size_before', x1y2,
fs = 'solid 0.4 noborder',
lc = 'green'),
Item('Total holes (after GC)', 'holes_size_before', x1y2,
fs = 'solid 0.4 noborder',
lc = 'red'),
Item('GC Time', ['i', 'pause'], style = 'lines', lc = 'red'))
],
[
Set('style histogram rowstacked'),
Set('style data histograms'),
Plot(Item('Heap Size (after GC)', 'total_size_after', x1y2,
fs = 'solid 0.4 noborder',
lc = 'green'),
Item('Total holes (after GC)', 'holes_size_after', x1y2,
fs = 'solid 0.4 noborder',
lc = 'red'),
Item('GC Time', ['i', 'pause'],
style = 'lines',
lc = 'red'))
],
[
Set('style fill solid 0.5 noborder'),
Set('style data histograms'),
Plot(Item('Allocated', 'allocated'),
Item('Reclaimed', reclaimed_bytes),
Item('Promoted', 'promoted', style = 'lines', lc = 'black'))
],
]
def freduce(f, field, trace, init):
return reduce(lambda t,r: f(t, r[field]), trace, init)
def calc_total(trace, field):
return freduce(lambda t,v: t + long(v), field, trace, long(0))
def calc_max(trace, field):
return freduce(lambda t,r: max(t, r), field, trace, 0)
def count_nonzero(trace, field):
return freduce(lambda t,r: t if r == 0 else t + 1, field, trace, 0)
def process_trace(filename):
trace = gc_nvp_common.parse_gc_trace(filename)
marksweeps = filter(lambda r: r['gc'] == 'ms', trace)
scavenges = filter(lambda r: r['gc'] == 's', trace)
globalgcs = filter(lambda r: r['gc'] != 's', trace)
charts = plot_all(plots, trace, filename)
def stats(out, prefix, trace, field):
n = len(trace)
total = calc_total(trace, field)
max = calc_max(trace, field)
if n > 0:
avg = total / n
else:
avg = 0
if n > 1:
dev = math.sqrt(freduce(lambda t,r: t + (r - avg) ** 2, field, trace, 0) /
(n - 1))
else:
dev = 0
out.write('<tr><td>%s</td><td>%d</td><td>%d</td>'
'<td>%d</td><td>%d [dev %f]</td></tr>' %
(prefix, n, total, max, avg, dev))
def HumanReadable(size):
suffixes = ['bytes', 'kB', 'MB', 'GB']
power = 1
for i in range(len(suffixes)):
if size < power*1024:
return "%.1f" % (float(size) / power) + " " + suffixes[i]
power *= 1024
def throughput(name, trace):
total_live_after = calc_total(trace, 'total_size_after')
total_live_before = calc_total(trace, 'total_size_before')
total_gc = calc_total(trace, 'pause')
if total_gc == 0:
return
out.write('GC %s Throughput (after): %s / %s ms = %s/ms<br/>' %
(name,
HumanReadable(total_live_after),
total_gc,
HumanReadable(total_live_after / total_gc)))
out.write('GC %s Throughput (before): %s / %s ms = %s/ms<br/>' %
(name,
HumanReadable(total_live_before),
total_gc,
HumanReadable(total_live_before / total_gc)))
with open(filename + '.html', 'w') as out:
out.write('<html><body>')
out.write('<table>')
out.write('<tr><td>Phase</td><td>Count</td><td>Time (ms)</td>')
out.write('<td>Max</td><td>Avg</td></tr>')
stats(out, 'Total in GC', trace, 'pause')
stats(out, 'Scavenge', scavenges, 'pause')
stats(out, 'MarkSweep', marksweeps, 'pause')
stats(out, 'Mark', filter(lambda r: r['mark'] != 0, trace), 'mark')
stats(out, 'Sweep', filter(lambda r: r['sweep'] != 0, trace), 'sweep')
stats(out,
'External',
filter(lambda r: r['external'] != 0, trace),
'external')
out.write('</table>')
throughput('TOTAL', trace)
throughput('MS', marksweeps)
throughput('OLDSPACE', globalgcs)
out.write('<br/>')
for chart in charts:
out.write('<img src="%s">' % chart)
out.write('</body></html>')
print "%s generated." % (filename + '.html')
if len(sys.argv) != 2:
print "Usage: %s <GC-trace-filename>" % sys.argv[0]
sys.exit(1)
process_trace(sys.argv[1])
|
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class appfwpolicylabel_policybinding_binding(base_resource) :
""" Binding class showing the policybinding that can be bound to appfwpolicylabel.
"""
def __init__(self) :
self._policyname = ""
self._priority = 0
self._gotopriorityexpression = ""
self._invoke = False
self._labeltype = ""
self._invoke_labelname = ""
self._labelname = ""
self.___count = 0
@property
def priority(self) :
ur"""Positive integer specifying the priority of the policy. A lower number specifies a higher priority. Must be unique within a group of policies that are bound to the same bind point or label. Policies are evaluated in the order of their priority numbers.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
ur"""Positive integer specifying the priority of the policy. A lower number specifies a higher priority. Must be unique within a group of policies that are bound to the same bind point or label. Policies are evaluated in the order of their priority numbers.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
ur"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@gotopriorityexpression.setter
def gotopriorityexpression(self, gotopriorityexpression) :
ur"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
self._gotopriorityexpression = gotopriorityexpression
except Exception as e:
raise e
@property
def policyname(self) :
ur"""Name of the application firewall policy to bind to the policy label.
"""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
ur"""Name of the application firewall policy to bind to the policy label.
"""
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def labeltype(self) :
ur"""Type of policy label to invoke if the current policy evaluates to TRUE and the invoke parameter is set. Available settings function as follows:
* reqvserver. Invoke the unnamed policy label associated with the specified request virtual server.
* policylabel. Invoke the specified user-defined policy label.<br/>Possible values = reqvserver, policylabel.
"""
try :
return self._labeltype
except Exception as e:
raise e
@labeltype.setter
def labeltype(self, labeltype) :
ur"""Type of policy label to invoke if the current policy evaluates to TRUE and the invoke parameter is set. Available settings function as follows:
* reqvserver. Invoke the unnamed policy label associated with the specified request virtual server.
* policylabel. Invoke the specified user-defined policy label.<br/>Possible values = reqvserver, policylabel
"""
try :
self._labeltype = labeltype
except Exception as e:
raise e
@property
def labelname(self) :
ur"""Name of the application firewall policy label.
"""
try :
return self._labelname
except Exception as e:
raise e
@labelname.setter
def labelname(self, labelname) :
ur"""Name of the application firewall policy label.
"""
try :
self._labelname = labelname
except Exception as e:
raise e
@property
def invoke_labelname(self) :
ur"""Name of the policy label to invoke if the current policy evaluates to TRUE, the invoke parameter is set, and Label Type is set to Policy Label.
"""
try :
return self._invoke_labelname
except Exception as e:
raise e
@invoke_labelname.setter
def invoke_labelname(self, invoke_labelname) :
ur"""Name of the policy label to invoke if the current policy evaluates to TRUE, the invoke parameter is set, and Label Type is set to Policy Label.
"""
try :
self._invoke_labelname = invoke_labelname
except Exception as e:
raise e
@property
def invoke(self) :
ur"""If the current policy evaluates to TRUE, terminate evaluation of policies bound to the current policy label, and then forward the request to the specified virtual server or evaluate the specified policy label.
"""
try :
return self._invoke
except Exception as e:
raise e
@invoke.setter
def invoke(self, invoke) :
ur"""If the current policy evaluates to TRUE, terminate evaluation of policies bound to the current policy label, and then forward the request to the specified virtual server or evaluate the specified policy label.
"""
try :
self._invoke = invoke
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(appfwpolicylabel_policybinding_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.appfwpolicylabel_policybinding_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.labelname is not None :
return str(self.labelname)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, labelname) :
ur""" Use this API to fetch appfwpolicylabel_policybinding_binding resources.
"""
try :
obj = appfwpolicylabel_policybinding_binding()
obj.labelname = labelname
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, labelname, filter_) :
ur""" Use this API to fetch filtered set of appfwpolicylabel_policybinding_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = appfwpolicylabel_policybinding_binding()
obj.labelname = labelname
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, labelname) :
ur""" Use this API to count appfwpolicylabel_policybinding_binding resources configued on NetScaler.
"""
try :
obj = appfwpolicylabel_policybinding_binding()
obj.labelname = labelname
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, labelname, filter_) :
ur""" Use this API to count the filtered set of appfwpolicylabel_policybinding_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = appfwpolicylabel_policybinding_binding()
obj.labelname = labelname
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Labeltype:
reqvserver = "reqvserver"
policylabel = "policylabel"
class appfwpolicylabel_policybinding_binding_response(base_response) :
def __init__(self, length=1) :
self.appfwpolicylabel_policybinding_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.appfwpolicylabel_policybinding_binding = [appfwpolicylabel_policybinding_binding() for _ in range(length)]
|
|
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from netaddr import core as net_exc
from neutron.common import exceptions
from neutron.common import rpc as n_rpc
from neutron.common import rpc_compat
from neutron.openstack.common import excutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import rpc
from neutron.plugins.common import constants
from neutron.services.vpn.common import topics
from neutron.services.vpn import service_drivers
from neutron.services.vpn.service_drivers import cisco_csr_db as csr_id_map
LOG = logging.getLogger(__name__)
IPSEC = 'ipsec'
BASE_IPSEC_VERSION = '1.0'
LIFETIME_LIMITS = {'IKE Policy': {'min': 60, 'max': 86400},
'IPSec Policy': {'min': 120, 'max': 2592000}}
MIN_CSR_MTU = 1500
MAX_CSR_MTU = 9192
class CsrValidationFailure(exceptions.BadRequest):
message = _("Cisco CSR does not support %(resource)s attribute %(key)s "
"with value '%(value)s'")
class CiscoCsrIPsecVpnDriverCallBack(rpc_compat.RpcCallback):
"""Handler for agent to plugin RPC messaging."""
# history
# 1.0 Initial version
RPC_API_VERSION = BASE_IPSEC_VERSION
def __init__(self, driver):
super(CiscoCsrIPsecVpnDriverCallBack, self).__init__()
self.driver = driver
def create_rpc_dispatcher(self):
return n_rpc.PluginRpcDispatcher([self])
def get_vpn_services_on_host(self, context, host=None):
"""Retuns info on the vpnservices on the host."""
plugin = self.driver.service_plugin
vpnservices = plugin._get_agent_hosting_vpn_services(
context, host)
return [self.driver._make_vpnservice_dict(vpnservice, context)
for vpnservice in vpnservices]
def update_status(self, context, status):
"""Update status of all vpnservices."""
plugin = self.driver.service_plugin
plugin.update_status_by_agent(context, status)
class CiscoCsrIPsecVpnAgentApi(service_drivers.BaseIPsecVpnAgentApi,
rpc_compat.RpcCallback):
"""API and handler for Cisco IPSec plugin to agent RPC messaging."""
RPC_API_VERSION = BASE_IPSEC_VERSION
def __init__(self, topic, default_version):
super(CiscoCsrIPsecVpnAgentApi, self).__init__(
topics.CISCO_IPSEC_AGENT_TOPIC, topic, default_version)
class CiscoCsrIPsecVPNDriver(service_drivers.VpnDriver):
"""Cisco CSR VPN Service Driver class for IPsec."""
def __init__(self, service_plugin):
super(CiscoCsrIPsecVPNDriver, self).__init__(service_plugin)
self.callbacks = CiscoCsrIPsecVpnDriverCallBack(self)
self.conn = rpc.create_connection(new=True)
self.conn.create_consumer(
topics.CISCO_IPSEC_DRIVER_TOPIC,
self.callbacks.create_rpc_dispatcher(),
fanout=False)
self.conn.consume_in_thread()
self.agent_rpc = CiscoCsrIPsecVpnAgentApi(
topics.CISCO_IPSEC_AGENT_TOPIC, BASE_IPSEC_VERSION)
@property
def service_type(self):
return IPSEC
def validate_lifetime(self, for_policy, policy_info):
"""Ensure lifetime in secs and value is supported, based on policy."""
units = policy_info['lifetime']['units']
if units != 'seconds':
raise CsrValidationFailure(resource=for_policy,
key='lifetime:units',
value=units)
value = policy_info['lifetime']['value']
if (value < LIFETIME_LIMITS[for_policy]['min'] or
value > LIFETIME_LIMITS[for_policy]['max']):
raise CsrValidationFailure(resource=for_policy,
key='lifetime:value',
value=value)
def validate_ike_version(self, policy_info):
"""Ensure IKE policy is v1 for current REST API."""
version = policy_info['ike_version']
if version != 'v1':
raise CsrValidationFailure(resource='IKE Policy',
key='ike_version',
value=version)
def validate_mtu(self, conn_info):
"""Ensure the MTU value is supported."""
mtu = conn_info['mtu']
if mtu < MIN_CSR_MTU or mtu > MAX_CSR_MTU:
raise CsrValidationFailure(resource='IPSec Connection',
key='mtu',
value=mtu)
def validate_public_ip_present(self, vpn_service):
"""Ensure there is one gateway IP specified for the router used."""
gw_port = vpn_service.router.gw_port
if not gw_port or len(gw_port.fixed_ips) != 1:
raise CsrValidationFailure(resource='IPSec Connection',
key='router:gw_port:ip_address',
value='missing')
def validate_peer_id(self, ipsec_conn):
"""Ensure that an IP address is specified for peer ID."""
# TODO(pcm) Should we check peer_address too?
peer_id = ipsec_conn['peer_id']
try:
netaddr.IPAddress(peer_id)
except net_exc.AddrFormatError:
raise CsrValidationFailure(resource='IPSec Connection',
key='peer_id', value=peer_id)
def validate_ipsec_connection(self, context, ipsec_conn, vpn_service):
"""Validate attributes w.r.t. Cisco CSR capabilities."""
ike_policy = self.service_plugin.get_ikepolicy(
context, ipsec_conn['ikepolicy_id'])
ipsec_policy = self.service_plugin.get_ipsecpolicy(
context, ipsec_conn['ipsecpolicy_id'])
self.validate_lifetime('IKE Policy', ike_policy)
self.validate_lifetime('IPSec Policy', ipsec_policy)
self.validate_ike_version(ike_policy)
self.validate_mtu(ipsec_conn)
self.validate_public_ip_present(vpn_service)
self.validate_peer_id(ipsec_conn)
LOG.debug(_("IPSec connection %s validated for Cisco CSR"),
ipsec_conn['id'])
def create_ipsec_site_connection(self, context, ipsec_site_connection):
vpnservice = self.service_plugin._get_vpnservice(
context, ipsec_site_connection['vpnservice_id'])
try:
self.validate_ipsec_connection(context, ipsec_site_connection,
vpnservice)
except CsrValidationFailure:
with excutils.save_and_reraise_exception():
self.service_plugin.update_ipsec_site_conn_status(
context, ipsec_site_connection['id'], constants.ERROR)
csr_id_map.create_tunnel_mapping(context, ipsec_site_connection)
self.agent_rpc.vpnservice_updated(context, vpnservice['router_id'],
reason='ipsec-conn-create')
def update_ipsec_site_connection(
self, context, old_ipsec_site_connection, ipsec_site_connection):
vpnservice = self.service_plugin._get_vpnservice(
context, ipsec_site_connection['vpnservice_id'])
self.agent_rpc.vpnservice_updated(
context, vpnservice['router_id'],
reason='ipsec-conn-update')
def delete_ipsec_site_connection(self, context, ipsec_site_connection):
vpnservice = self.service_plugin._get_vpnservice(
context, ipsec_site_connection['vpnservice_id'])
self.agent_rpc.vpnservice_updated(context, vpnservice['router_id'],
reason='ipsec-conn-delete')
def create_ikepolicy(self, context, ikepolicy):
pass
def delete_ikepolicy(self, context, ikepolicy):
pass
def update_ikepolicy(self, context, old_ikepolicy, ikepolicy):
pass
def create_ipsecpolicy(self, context, ipsecpolicy):
pass
def delete_ipsecpolicy(self, context, ipsecpolicy):
pass
def update_ipsecpolicy(self, context, old_ipsec_policy, ipsecpolicy):
pass
def create_vpnservice(self, context, vpnservice):
pass
def update_vpnservice(self, context, old_vpnservice, vpnservice):
self.agent_rpc.vpnservice_updated(context, vpnservice['router_id'],
reason='vpn-service-update')
def delete_vpnservice(self, context, vpnservice):
self.agent_rpc.vpnservice_updated(context, vpnservice['router_id'],
reason='vpn-service-delete')
def get_cisco_connection_mappings(self, conn_id, context):
"""Obtain persisted mappings for IDs related to connection."""
tunnel_id, ike_id, ipsec_id = csr_id_map.get_tunnel_mapping_for(
conn_id, context.session)
return {'site_conn_id': u'Tunnel%d' % tunnel_id,
'ike_policy_id': u'%d' % ike_id,
'ipsec_policy_id': u'%s' % ipsec_id}
def _make_vpnservice_dict(self, vpnservice, context):
"""Collect all info on service, including Cisco info per IPSec conn."""
vpnservice_dict = dict(vpnservice)
vpnservice_dict['ipsec_conns'] = []
vpnservice_dict['subnet'] = dict(
vpnservice.subnet)
vpnservice_dict['external_ip'] = vpnservice.router.gw_port[
'fixed_ips'][0]['ip_address']
for ipsec_conn in vpnservice.ipsec_site_connections:
ipsec_conn_dict = dict(ipsec_conn)
ipsec_conn_dict['ike_policy'] = dict(ipsec_conn.ikepolicy)
ipsec_conn_dict['ipsec_policy'] = dict(ipsec_conn.ipsecpolicy)
ipsec_conn_dict['peer_cidrs'] = [
peer_cidr.cidr for peer_cidr in ipsec_conn.peer_cidrs]
ipsec_conn_dict['cisco'] = self.get_cisco_connection_mappings(
ipsec_conn['id'], context)
vpnservice_dict['ipsec_conns'].append(ipsec_conn_dict)
return vpnservice_dict
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nicira Networks, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Aaron Rosen, Nicira, Inc
#
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.orm import exc
from sqlalchemy.orm import scoped_session
from quantum.db import model_base
from quantum.db import models_v2
from quantum.extensions import securitygroup as ext_sg
from quantum.openstack.common import cfg
from quantum.openstack.common import uuidutils
class SecurityGroup(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represents a v2 quantum security group."""
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
external_id = sa.Column(sa.Integer, unique=True)
class SecurityGroupPortBinding(model_base.BASEV2):
"""Represents binding between quantum ports and security profiles"""
port_id = sa.Column(sa.String(36), sa.ForeignKey("ports.id"),
primary_key=True)
security_group_id = sa.Column(sa.String(36),
sa.ForeignKey("securitygroups.id"),
primary_key=True)
class SecurityGroupRule(model_base.BASEV2, models_v2.HasId,
models_v2.HasTenant):
"""Represents a v2 quantum security group rule."""
external_id = sa.Column(sa.Integer)
security_group_id = sa.Column(sa.String(36),
sa.ForeignKey("securitygroups.id",
ondelete="CASCADE"),
nullable=False)
source_group_id = sa.Column(sa.String(36),
sa.ForeignKey("securitygroups.id",
ondelete="CASCADE"),
nullable=True)
direction = sa.Column(sa.Enum('ingress', 'egress'))
ethertype = sa.Column(sa.String(40))
protocol = sa.Column(sa.String(40))
port_range_min = sa.Column(sa.Integer)
port_range_max = sa.Column(sa.Integer)
source_ip_prefix = sa.Column(sa.String(255))
security_group = orm.relationship(
SecurityGroup,
backref=orm.backref('rules', cascade='all,delete'),
primaryjoin="SecurityGroup.id==SecurityGroupRule.security_group_id")
source_group = orm.relationship(
SecurityGroup,
backref=orm.backref('source_rules', cascade='all,delete'),
primaryjoin="SecurityGroup.id==SecurityGroupRule.source_group_id")
class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase):
"""Mixin class to add security group to db_plugin_base_v2."""
__native_bulk_support = True
def create_security_group_bulk(self, context, security_group_rule):
return self._create_bulk('security_group', context,
security_group_rule)
def create_security_group(self, context, security_group, default_sg=False):
"""Create security group.
If default_sg is true that means we are a default security group for
a given tenant if it does not exist.
"""
s = security_group['security_group']
if (cfg.CONF.SECURITYGROUP.proxy_mode and not context.is_admin):
raise ext_sg.SecurityGroupProxyModeNotAdmin()
if (cfg.CONF.SECURITYGROUP.proxy_mode and not s.get('external_id')):
raise ext_sg.SecurityGroupProxyMode()
if not cfg.CONF.SECURITYGROUP.proxy_mode and s.get('external_id'):
raise ext_sg.SecurityGroupNotProxyMode()
tenant_id = self._get_tenant_id_for_create(context, s)
# if in proxy mode a default security group will be created by source
if not default_sg and not cfg.CONF.SECURITYGROUP.proxy_mode:
self._ensure_default_security_group(context, tenant_id,
security_group)
if s.get('external_id'):
try:
# Check if security group already exists
sg = self.get_security_group(context, s.get('external_id'))
if sg:
raise ext_sg.SecurityGroupAlreadyExists(
name=sg.get('name', ''),
external_id=s.get('external_id'))
except ext_sg.SecurityGroupNotFound:
pass
with context.session.begin(subtransactions=True):
security_group_db = SecurityGroup(id=s.get('id') or (
uuidutils.generate_uuid()),
description=s['description'],
tenant_id=tenant_id,
name=s['name'],
external_id=s.get('external_id'))
context.session.add(security_group_db)
if s.get('name') == 'default':
for ethertype in ext_sg.sg_supported_ethertypes:
# Allow intercommunication
db = SecurityGroupRule(
id=uuidutils.generate_uuid(), tenant_id=tenant_id,
security_group=security_group_db,
direction='ingress',
ethertype=ethertype,
source_group=security_group_db)
context.session.add(db)
return self._make_security_group_dict(security_group_db)
def get_security_groups(self, context, filters=None, fields=None):
return self._get_collection(context, SecurityGroup,
self._make_security_group_dict,
filters=filters, fields=fields)
def get_security_groups_count(self, context, filters=None):
return self._get_collection_count(context, SecurityGroup,
filters=filters)
def get_security_group(self, context, id, fields=None, tenant_id=None):
"""Tenant id is given to handle the case when we
are creating a security group or security group rule on behalf of
another use.
"""
if tenant_id:
tmp_context_tenant_id = context.tenant_id
context.tenant_id = tenant_id
try:
ret = self._make_security_group_dict(self._get_security_group(
context, id), fields)
finally:
if tenant_id:
context.tenant_id = tmp_context_tenant_id
return ret
def _get_security_group(self, context, id):
try:
query = self._model_query(context, SecurityGroup)
if uuidutils.is_uuid_like(id):
sg = query.filter(SecurityGroup.id == id).one()
else:
sg = query.filter(SecurityGroup.external_id == id).one()
except exc.NoResultFound:
raise ext_sg.SecurityGroupNotFound(id=id)
return sg
def delete_security_group(self, context, id):
if (cfg.CONF.SECURITYGROUP.proxy_mode and not context.is_admin):
raise ext_sg.SecurityGroupProxyModeNotAdmin()
filters = {'security_group_id': [id]}
ports = self._get_port_security_group_bindings(context, filters)
if ports:
raise ext_sg.SecurityGroupInUse(id=id)
# confirm security group exists
sg = self._get_security_group(context, id)
if sg['name'] == 'default':
raise ext_sg.SecurityGroupCannotRemoveDefault()
with context.session.begin(subtransactions=True):
context.session.delete(sg)
def _make_security_group_dict(self, security_group, fields=None):
res = {'id': security_group['id'],
'name': security_group['name'],
'tenant_id': security_group['tenant_id'],
'description': security_group['description']}
if security_group.get('external_id'):
res['external_id'] = security_group['external_id']
return self._fields(res, fields)
def _make_security_group_binding_dict(self, security_group, fields=None):
res = {'port_id': security_group['port_id'],
'security_group_id': security_group['security_group_id']}
return self._fields(res, fields)
def _create_port_security_group_binding(self, context, port_id,
security_group_id):
with context.session.begin(subtransactions=True):
db = SecurityGroupPortBinding(port_id=port_id,
security_group_id=security_group_id)
context.session.add(db)
def _get_port_security_group_bindings(self, context,
filters=None, fields=None):
return self._get_collection(context, SecurityGroupPortBinding,
self._make_security_group_binding_dict,
filters=filters, fields=fields)
def _delete_port_security_group_bindings(self, context, port_id):
query = self._model_query(context, SecurityGroupPortBinding)
bindings = query.filter(
SecurityGroupPortBinding.port_id == port_id)
with context.session.begin(subtransactions=True):
for binding in bindings:
context.session.delete(binding)
def create_security_group_rule_bulk(self, context, security_group_rule):
return self._create_bulk('security_group_rule', context,
security_group_rule)
def create_security_group_rule_bulk_native(self, context,
security_group_rule):
r = security_group_rule['security_group_rules']
scoped_session(context.session)
security_group_id = self._validate_security_group_rules(
context, security_group_rule)
with context.session.begin(subtransactions=True):
if not self.get_security_group(context, security_group_id):
raise ext_sg.SecurityGroupNotFound(id=security_group_id)
self._check_for_duplicate_rules(context, r)
ret = []
for rule_dict in r:
rule = rule_dict['security_group_rule']
tenant_id = self._get_tenant_id_for_create(context, rule)
db = SecurityGroupRule(
id=uuidutils.generate_uuid(), tenant_id=tenant_id,
security_group_id=rule['security_group_id'],
direction=rule['direction'],
external_id=rule.get('external_id'),
source_group_id=rule.get('source_group_id'),
ethertype=rule['ethertype'],
protocol=rule['protocol'],
port_range_min=rule['port_range_min'],
port_range_max=rule['port_range_max'],
source_ip_prefix=rule.get('source_ip_prefix'))
context.session.add(db)
ret.append(self._make_security_group_rule_dict(db))
return ret
def create_security_group_rule(self, context, security_group_rule):
bulk_rule = {'security_group_rules': [security_group_rule]}
return self.create_security_group_rule_bulk_native(context,
bulk_rule)[0]
def _validate_security_group_rules(self, context, security_group_rule):
"""Check that rules being installed all belong to the same security
group, source_group_id/security_group_id belong to the same tenant,
and rules are valid.
"""
if (cfg.CONF.SECURITYGROUP.proxy_mode and not context.is_admin):
raise ext_sg.SecurityGroupProxyModeNotAdmin()
new_rules = set()
tenant_ids = set()
for rules in security_group_rule['security_group_rules']:
rule = rules.get('security_group_rule')
new_rules.add(rule['security_group_id'])
if (cfg.CONF.SECURITYGROUP.proxy_mode and
not rule.get('external_id')):
raise ext_sg.SecurityGroupProxyMode()
if (not cfg.CONF.SECURITYGROUP.proxy_mode and
rule.get('external_id')):
raise ext_sg.SecurityGroupNotProxyMode()
protocol = rule.get('protocol')
ethertype = rule.get('ethertype')
# Check that port_range's are valid
if (rule['port_range_min'] is None and
rule['port_range_max'] is None):
pass
elif (rule['port_range_min'] is not None and
rule['port_range_min'] <= rule['port_range_max']):
if not rule['protocol']:
raise ext_sg.SecurityGroupProtocolRequiredWithPorts()
else:
raise ext_sg.SecurityGroupInvalidPortRange()
if rule['source_ip_prefix'] and rule['source_group_id']:
raise ext_sg.SecurityGroupSourceGroupAndIpPrefix()
if rule['tenant_id'] not in tenant_ids:
tenant_ids.add(rule['tenant_id'])
source_group_id = rule.get('source_group_id')
# Check that source_group_id exists for tenant
if source_group_id:
self.get_security_group(context, source_group_id,
tenant_id=rule['tenant_id'])
if len(new_rules) > 1:
raise ext_sg.SecurityGroupNotSingleGroupRules()
security_group_id = new_rules.pop()
# Confirm single tenant and that the tenant has permission
# to add rules to this security group.
if len(tenant_ids) > 1:
raise ext_sg.SecurityGroupRulesNotSingleTenant()
for tenant_id in tenant_ids:
self.get_security_group(context, security_group_id,
tenant_id=tenant_id)
return security_group_id
def _make_security_group_rule_dict(self, security_group_rule, fields=None):
res = {'id': security_group_rule['id'],
'tenant_id': security_group_rule['tenant_id'],
'security_group_id': security_group_rule['security_group_id'],
'ethertype': security_group_rule['ethertype'],
'direction': security_group_rule['direction'],
'protocol': security_group_rule['protocol'],
'port_range_min': security_group_rule['port_range_min'],
'port_range_max': security_group_rule['port_range_max'],
'source_ip_prefix': security_group_rule['source_ip_prefix'],
'source_group_id': security_group_rule['source_group_id'],
'external_id': security_group_rule['external_id']}
return self._fields(res, fields)
def _make_security_group_rule_filter_dict(self, security_group_rule):
sgr = security_group_rule['security_group_rule']
res = {'tenant_id': [sgr['tenant_id']],
'security_group_id': [sgr['security_group_id']],
'direction': [sgr['direction']]}
include_if_present = ['protocol', 'port_range_max', 'port_range_min',
'ethertype', 'source_ip_prefix',
'source_group_id', 'external_id']
for key in include_if_present:
value = sgr.get(key)
if value:
res[key] = [value]
return res
def _check_for_duplicate_rules(self, context, security_group_rules):
for i in security_group_rules:
found_self = False
for j in security_group_rules:
if i['security_group_rule'] == j['security_group_rule']:
if found_self:
raise ext_sg.DuplicateSecurityGroupRuleInPost(rule=i)
found_self = True
# Check in database if rule exists
filters = self._make_security_group_rule_filter_dict(i)
rules = self.get_security_group_rules(context, filters)
if rules:
raise ext_sg.SecurityGroupRuleExists(id=str(rules[0]['id']))
def get_security_group_rules(self, context, filters=None, fields=None):
return self._get_collection(context, SecurityGroupRule,
self._make_security_group_rule_dict,
filters=filters, fields=fields)
def get_security_group_rules_count(self, context, filters=None):
return self._get_collection_count(context, SecurityGroupRule,
filters=filters)
def get_security_group_rule(self, context, id, fields=None):
security_group_rule = self._get_security_group_rule(context, id)
return self._make_security_group_rule_dict(security_group_rule, fields)
def _get_security_group_rule(self, context, id):
try:
if uuidutils.is_uuid_like(id):
query = self._model_query(context, SecurityGroupRule)
sgr = query.filter(SecurityGroupRule.id == id).one()
else:
query = self._model_query(context, SecurityGroupRule)
sgr = query.filter(SecurityGroupRule.external_id == id).one()
except exc.NoResultFound:
raise ext_sg.SecurityGroupRuleNotFound(id=id)
return sgr
def delete_security_group_rule(self, context, sgrid):
if (cfg.CONF.SECURITYGROUP.proxy_mode and not context.is_admin):
raise ext_sg.SecurityGroupProxyModeNotAdmin()
with context.session.begin(subtransactions=True):
rule = self._get_security_group_rule(context, sgrid)
context.session.delete(rule)
def _extend_port_dict_security_group(self, context, port):
filters = {'port_id': [port['id']]}
fields = {'security_group_id': None}
port[ext_sg.SECURITYGROUP] = []
security_group_id = self._get_port_security_group_bindings(
context, filters, fields)
for security_group_id in security_group_id:
port[ext_sg.SECURITYGROUP].append(
security_group_id['security_group_id'])
return port
def _process_port_create_security_group(self, context, port_id,
security_group_id):
if not security_group_id:
return
for security_group_id in security_group_id:
self._create_port_security_group_binding(context, port_id,
security_group_id)
def _ensure_default_security_group(self, context, tenant_id,
security_group=None):
"""Create a default security group if one doesn't exist.
:returns: the default security group id.
"""
# if in proxy mode a default security group will be created by source
if not security_group and cfg.CONF.SECURITYGROUP.proxy_mode:
return
filters = {'name': ['default'], 'tenant_id': [tenant_id]}
default_group = self.get_security_groups(context, filters)
if not default_group:
security_group = {'security_group': {'name': 'default',
'tenant_id': tenant_id,
'description': 'default'}}
if security_group:
security_group['security_group']['external_id'] = (
security_group['security_group'].get('external_id'))
ret = self.create_security_group(context, security_group, True)
return ret['id']
else:
return default_group[0]['id']
def _validate_security_groups_on_port(self, context, port):
p = port['port']
if not p.get(ext_sg.SECURITYGROUP):
return
valid_groups = self.get_security_groups(context, fields={'id': None})
valid_groups_set = set([x['id'] for x in valid_groups])
req_sg_set = set(p[ext_sg.SECURITYGROUP])
invalid_sg_set = req_sg_set - valid_groups_set
if invalid_sg_set:
msg = ' '.join(str(x) for x in invalid_sg_set)
raise ext_sg.SecurityGroupNotFound(id=msg)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import mxnet as mx
import numpy as np
import pickle as pkl
def _np_reduce(dat, axis, keepdims, numpy_reduce_func):
if isinstance(axis, int):
axis = [axis]
else:
axis = list(axis) if axis is not None else range(len(dat.shape))
ret = dat
for i in reversed(sorted(axis)):
ret = numpy_reduce_func(ret, axis=i)
if keepdims:
keepdims_shape = list(dat.shape)
for i in axis:
keepdims_shape[i] = 1
ret = ret.reshape(tuple(keepdims_shape))
return ret
def reldiff(a, b):
diff = np.abs(a - b)
norm = np.abs(a)
reldiff = np.max(diff / (norm + 1e-7))
return reldiff
def same(a, b):
return np.sum(a != b) == 0
def check_with_uniform(uf, arg_shapes, dim=None, npuf=None, rmin=-10, type_list=[np.float32]):
"""check function consistency with uniform random numbers"""
if isinstance(arg_shapes, int):
assert dim
shape = tuple(np.random.randint(1, int(1000**(1.0/dim)), size=dim))
arg_shapes = [shape] * arg_shapes
for dtype in type_list:
ndarray_arg = []
numpy_arg = []
for s in arg_shapes:
npy = np.random.uniform(rmin, 10, s).astype(dtype)
narr = mx.nd.array(npy, dtype=dtype)
ndarray_arg.append(narr)
numpy_arg.append(npy)
out1 = uf(*ndarray_arg)
if npuf is None:
out2 = uf(*numpy_arg).astype(dtype)
else:
out2 = npuf(*numpy_arg).astype(dtype)
assert out1.shape == out2.shape
if isinstance(out1, mx.nd.NDArray):
out1 = out1.asnumpy()
if dtype == np.float16:
assert reldiff(out1, out2) < 2e-3
else:
assert reldiff(out1, out2) < 1e-6
def random_ndarray(dim):
shape = tuple(np.random.randint(1, int(1000**(1.0/dim)), size=dim))
data = mx.nd.array(np.random.uniform(-10, 10, shape))
return data
def test_ndarray_elementwise():
np.random.seed(0)
nrepeat = 10
maxdim = 4
all_type = [np.float32, np.float64, np.float16, np.uint8, np.int32]
real_type = [np.float32, np.float64, np.float16]
for repeat in range(nrepeat):
for dim in range(1, maxdim):
check_with_uniform(lambda x, y: x + y, 2, dim, type_list=all_type)
check_with_uniform(lambda x, y: x - y, 2, dim, type_list=all_type)
check_with_uniform(lambda x, y: x * y, 2, dim, type_list=all_type)
check_with_uniform(lambda x, y: x / y, 2, dim, type_list=real_type)
check_with_uniform(lambda x, y: x / y, 2, dim, rmin=1, type_list=all_type)
check_with_uniform(mx.nd.sqrt, 1, dim, np.sqrt, rmin=0)
check_with_uniform(mx.nd.square, 1, dim, np.square, rmin=0)
check_with_uniform(lambda x: mx.nd.norm(x).asscalar(), 1, dim, np.linalg.norm)
def test_ndarray_negate():
npy = np.random.uniform(-10, 10, (2,3,4))
arr = mx.nd.array(npy)
assert reldiff(npy, arr.asnumpy()) < 1e-6
assert reldiff(-npy, (-arr).asnumpy()) < 1e-6
# a final check to make sure the negation (-) is not implemented
# as inplace operation, so the contents of arr does not change after
# we compute (-arr)
assert reldiff(npy, arr.asnumpy()) < 1e-6
def test_ndarray_choose():
shape = (100, 20)
npy = np.arange(np.prod(shape)).reshape(shape)
arr = mx.nd.array(npy)
nrepeat = 3
for repeat in range(nrepeat):
indices = np.random.randint(shape[1], size=shape[0])
assert same(npy[np.arange(shape[0]), indices],
mx.nd.choose_element_0index(arr, mx.nd.array(indices)).asnumpy())
def test_ndarray_fill():
shape = (100, 20)
npy = np.arange(np.prod(shape)).reshape(shape)
arr = mx.nd.array(npy)
new_npy = npy.copy()
nrepeat = 3
for repeat in range(nrepeat):
indices = np.random.randint(shape[1], size=shape[0])
val = np.random.randint(shape[1], size=shape[0])
new_npy[:] = npy
new_npy[np.arange(shape[0]), indices] = val
assert same(new_npy,
mx.nd.fill_element_0index(arr, mx.nd.array(val), mx.nd.array(indices)).asnumpy())
def test_ndarray_onehot():
shape = (100, 20)
npy = np.arange(np.prod(shape)).reshape(shape)
arr = mx.nd.array(npy)
nrepeat = 3
for repeat in range(nrepeat):
indices = np.random.randint(shape[1], size=shape[0])
npy[:] = 0.0
npy[np.arange(shape[0]), indices] = 1.0
mx.nd.onehot_encode(mx.nd.array(indices), out=arr)
assert same(npy, arr.asnumpy())
def test_ndarray_copy():
c = mx.nd.array(np.random.uniform(-10, 10, (10, 10)))
d = c.copyto(mx.Context('cpu', 0))
assert np.sum(np.abs(c.asnumpy() != d.asnumpy())) == 0.0
def test_ndarray_scalar():
c = mx.nd.empty((10,10))
d = mx.nd.empty((10,10))
c[:] = 0.5
d[:] = 1.0
d -= c * 2 / 3 * 6.0
c += 0.5
assert(np.sum(c.asnumpy()) - 100 < 1e-5)
assert(np.sum(d.asnumpy()) + 100 < 1e-5)
c[:] = 2
assert(np.sum(c.asnumpy()) - 200 < 1e-5)
d = -c + 2
assert(np.sum(d.asnumpy()) < 1e-5)
def test_ndarray_pickle():
np.random.seed(0)
maxdim = 5
nrepeat = 10
for repeat in range(nrepeat):
for dim in range(1, maxdim):
a = random_ndarray(dim)
b = mx.nd.empty(a.shape)
a[:] = np.random.uniform(-10, 10, a.shape)
b[:] = np.random.uniform(-10, 10, a.shape)
a = a + b
data = pkl.dumps(a)
a2 = pkl.loads(data)
assert np.sum(a.asnumpy() != a2.asnumpy()) == 0
def test_ndarray_saveload():
np.random.seed(0)
maxdim = 5
nrepeat = 10
fname = 'tmp_list.bin'
for repeat in range(nrepeat):
data = []
for i in range(10):
data.append(random_ndarray(np.random.randint(1, 5)))
mx.nd.save(fname, data)
data2 = mx.nd.load(fname)
assert len(data) == len(data2)
for x, y in zip(data, data2):
assert np.sum(x.asnumpy() != y.asnumpy()) == 0
dmap = {'ndarray xx %s' % i : x for i, x in enumerate(data)}
mx.nd.save(fname, dmap)
dmap2 = mx.nd.load(fname)
assert len(dmap2) == len(dmap)
for k, x in dmap.items():
y = dmap2[k]
assert np.sum(x.asnumpy() != y.asnumpy()) == 0
os.remove(fname)
def test_ndarray_slice():
shape = (10,)
A = mx.nd.array(np.random.uniform(-10, 10, shape))
A2 = A.asnumpy()
assert same(A[3:8].asnumpy(), A2[3:8])
A2[3:8] *= 10;
A[3:8] = A2[3:8]
assert same(A[3:8].asnumpy(), A2[3:8])
def test_ndarray_slice_along_axis():
arr = mx.nd.array(np.random.uniform(-10, 10, (3, 4, 2, 3)))
sub_arr = arr.slice(begin=(None, 1), end=(None, 3))
# test we sliced correctly
assert same(arr.asnumpy()[:, 1:3, :, :], sub_arr.asnumpy())
# test that slice is copy, instead of shared memory
sub_arr[:] = 0
assert not same(arr.asnumpy()[:, 1:3, :, :], sub_arr.asnumpy())
def test_clip():
shape = (10,)
A = mx.random.uniform(-10, 10, shape)
B = mx.nd.clip(A, -2, 2)
B1 = B.asnumpy()
for i in range(shape[0]):
assert B1[i] >= -2
assert B1[i] <= 2
def test_dot():
a = np.random.uniform(-3, 3, (3, 4))
b = np.random.uniform(-3, 3, (4, 5))
c = np.dot(a, b)
A = mx.nd.array(a)
B = mx.nd.array(b)
C = mx.nd.dot(A, B)
assert reldiff(c, C.asnumpy()) < 1e-5
def test_reduce():
sample_num = 200
def test_reduce_inner(numpy_reduce_func, nd_reduce_func):
for i in range(sample_num):
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 11, size=ndim)
axis_flags = np.random.randint(0, 2, size=ndim)
axes = []
for (axis, flag) in enumerate(axis_flags):
if flag:
axes.append(axis)
keepdims = np.random.randint(0, 2)
dat = np.random.rand(*shape) - 0.5
if 0 == len(axes):
axes = tuple(range(ndim))
else:
axes = tuple(axes)
numpy_ret = numpy_reduce_func(dat, axis=axes, keepdims=keepdims)
ndarray_ret = nd_reduce_func(mx.nd.array(dat), axis=axes, keepdims=keepdims)
if type(ndarray_ret) is mx.ndarray.NDArray:
ndarray_ret = ndarray_ret.asnumpy()
assert (ndarray_ret.shape == numpy_ret.shape) or \
(ndarray_ret.shape == (1,) and numpy_ret.shape == ()), "nd:%s, numpy:%s" \
%(ndarray_ret.shape, numpy_ret.shape)
err = np.square(ndarray_ret - numpy_ret).mean()
assert err < 1E-4
test_reduce_inner(lambda data, axis, keepdims:_np_reduce(data, axis, keepdims, np.sum),
mx.nd.sum)
test_reduce_inner(lambda data, axis, keepdims:_np_reduce(data, axis, keepdims, np.max),
mx.nd.max)
test_reduce_inner(lambda data, axis, keepdims:_np_reduce(data, axis, keepdims, np.min),
mx.nd.min)
def test_broadcast():
sample_num = 1000
def test_broadcast_to():
for i in range(sample_num):
ndim = np.random.randint(1, 6)
target_shape = np.random.randint(1, 11, size=ndim)
shape = target_shape.copy()
axis_flags = np.random.randint(0, 2, size=ndim)
axes = []
for (axis, flag) in enumerate(axis_flags):
if flag:
shape[axis] = 1
dat = np.random.rand(*shape) - 0.5
numpy_ret = dat
ndarray_ret = mx.nd.array(dat).broadcast_to(shape=target_shape)
if type(ndarray_ret) is mx.ndarray.NDArray:
ndarray_ret = ndarray_ret.asnumpy()
assert (ndarray_ret.shape == target_shape).all()
err = np.square(ndarray_ret - numpy_ret).mean()
assert err < 1E-8
test_broadcast_to()
if __name__ == '__main__':
mx.profiler.profiler_set_config(mode='all', filename='profile_ndarray.json')
mx.profiler.profiler_set_state('run')
test_ndarray_slice_along_axis()
test_broadcast()
test_ndarray_elementwise()
test_ndarray_slice()
test_ndarray_pickle()
test_ndarray_saveload()
test_ndarray_copy()
test_ndarray_negate()
test_ndarray_scalar()
test_clip()
test_dot()
test_ndarray_choose()
test_ndarray_onehot()
test_ndarray_fill()
test_reduce()
mx.profiler.profiler_set_state('stop')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.