repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
|---|---|---|---|---|---|
lcweb/node-oauth
|
node_modules/grunt/node_modules/gzip-js/node_modules/deflate-js/test/deflate.py
|
177
|
2329
|
import os
from colorama import Fore
from helpers import deflate, inflate, run_cmd
outDirDefault = 'test-outs'
testDirDefault = 'test-files'
"""
Run a single test
@param tFile- required; the full path to the file to run
@param level- optional (default: all); the compression level [1-9]
@param delete- optional (default: True); whether to delete the gzipped files
@return True if all tests passed; False if at least one test failed
"""
def runTest(tFile, level=None, delete=True, outDir=outDirDefault):
passed = True
if level == None:
for x in range(1, 10):
if runTest(tFile, x, delete) == False:
passed = False
return passed
# make the test-outs directory
try:
os.mkdir(outDir)
except:
pass
out1 = os.path.join(outDir, '%(file)s.%(level)d.deflate' % {'file': os.path.basename(tFile), 'level' : level})
out2 = os.path.join(outDir, '%(file)s.%(level)d.out.deflate' % {'file': os.path.basename(tFile), 'level' : level})
outData = deflate(tFile, outfile=out1, level=level)
run_cmd('../bin/deflate.js --level %(level)d --file %(file)s --output %(output)s' % {'level' : level, 'file' : tFile, 'output' : out2})
result = run_cmd('diff %(file1)s %(file2)s' % {'file1' : out1, 'file2' : out2})
if result['returncode'] == 0:
status = Fore.GREEN + 'PASSED' + Fore.RESET
else:
passed = False
status = Fore.RED + 'FAILED' + Fore.RESET
print 'Level %(level)d: %(status)s' % {'level' : level, 'status' : status}
if delete == True:
os.remove(out1)
os.remove(out2)
return passed
"""
Runs all tests on the given level. This iterates throuth the testDir directory defined above.
@param level- The level to run on [1-9] (default: None, runs on all levels all)
@param delete- Whether to delete output files after the test is run
@return True if all levels passed, False if at least one failed
"""
def runAll(level=None, delete=True, testDir=testDirDefault, outDir=outDirDefault):
# make the test-outs directory
try:
os.mkdir(outDir)
except:
pass
passed = True
for tFile in os.listdir(testDir):
fullPath = os.path.join(testDir, tFile)
print Fore.YELLOW + tFile + Fore.RESET
if runTest(fullPath, level, delete) == False:
passed = False
print ''
# if we deletede all the files that were created, delete the directory
if delete == True:
os.rmdir(outDir)
return passed
|
mit
|
pombredanne/numba
|
numba/cuda/tests/cudadrv/test_cuda_array_slicing.py
|
6
|
2372
|
from __future__ import print_function
import numpy as np
from numba import cuda
from numba.cuda.testing import unittest
class CudaArrayIndexing(unittest.TestCase):
def test_index_1d(self):
arr = np.arange(10)
darr = cuda.to_device(arr)
for i in range(arr.size):
self.assertEqual(arr[i], darr[i])
def test_index_2d(self):
arr = np.arange(9).reshape(3, 3)
darr = cuda.to_device(arr)
for i in range(arr.shape[0]):
for j in range(arr.shape[1]):
self.assertEqual(arr[i, j], darr[i, j])
def test_index_3d(self):
arr = np.arange(3 ** 3).reshape(3, 3, 3)
darr = cuda.to_device(arr)
for i in range(arr.shape[0]):
for j in range(arr.shape[1]):
for k in range(arr.shape[2]):
self.assertEqual(arr[i, j, k], darr[i, j, k])
class CudaArraySlicing(unittest.TestCase):
def test_prefix_1d(self):
arr = np.arange(5)
darr = cuda.to_device(arr)
for i in range(arr.size):
expect = arr[i:]
got = darr[i:].copy_to_host()
self.assertTrue(np.all(expect == got))
def test_prefix_2d(self):
arr = np.arange(3 ** 2).reshape(3, 3)
darr = cuda.to_device(arr)
for i in range(arr.shape[0]):
for j in range(arr.shape[1]):
expect = arr[i:, j:]
sliced = darr[i:, j:]
self.assertEqual(expect.shape, sliced.shape)
self.assertEqual(expect.strides, sliced.strides)
got = sliced.copy_to_host()
self.assertTrue(np.all(expect == got))
def test_select_column(self):
a = np.arange(25).reshape(5, 5, order='F')
da = cuda.to_device(a)
for i in range(a.shape[1]):
self.assertTrue(np.all(da[:, i].copy_to_host() == a[:, i]))
def test_select_row(self):
a = np.arange(25).reshape(5, 5, order='C')
da = cuda.to_device(a)
for i in range(a.shape[0]):
self.assertTrue(np.all(da[i, :].copy_to_host() == a[i, :]))
def test_prefix_select(self):
arr = np.arange(5 ** 2).reshape(5, 5, order='F')
darr = cuda.to_device(arr)
self.assertTrue(np.all(darr[:1, 1].copy_to_host() == arr[:1, 1]))
if __name__ == '__main__':
unittest.main()
|
bsd-2-clause
|
gangadhar-kadam/mic-erpnext
|
accounts/report/accounts_receivable/accounts_receivable.py
|
5
|
5040
|
from __future__ import unicode_literals
import webnotes
from webnotes import msgprint, _
from webnotes.utils import getdate, nowdate, flt, cstr
def execute(filters=None):
if not filters: filters = {}
columns = get_columns()
entries = get_gl_entries(filters)
entries_after_report_date = [[gle.voucher_type, gle.voucher_no]
for gle in get_gl_entries(filters, upto_report_date=False)]
account_territory_map = get_account_territory_map()
si_due_date_map = get_si_due_date_map()
# Age of the invoice on this date
age_on = getdate(filters.get("report_date")) > getdate(nowdate()) \
and nowdate() or filters.get("report_date")
data = []
for gle in entries:
if cstr(gle.against_voucher) == gle.voucher_no or not gle.against_voucher \
or [gle.against_voucher_type, gle.against_voucher] in entries_after_report_date:
due_date = (gle.voucher_type == "Sales Invoice") \
and si_due_date_map.get(gle.voucher_no) or ""
invoiced_amount = gle.debit > 0 and gle.debit or 0
payment_amount = get_payment_amount(gle, filters.get("report_date") or nowdate(),
entries_after_report_date)
outstanding_amount = invoiced_amount - payment_amount
if abs(flt(outstanding_amount)) > 0.01:
row = [gle.posting_date, gle.account, gle.voucher_type, gle.voucher_no,
gle.remarks, due_date, account_territory_map.get(gle.account),
invoiced_amount, payment_amount, outstanding_amount]
# Ageing
if filters.get("ageing_based_on") == "Due Date":
ageing_based_on_date = due_date
else:
ageing_based_on_date = gle.posting_date
row += get_ageing_data(ageing_based_on_date, age_on, outstanding_amount)
data.append(row)
return columns, data
def get_columns():
return [
"Posting Date:Date:80", "Account:Link/Account:150", "Voucher Type::110",
"Voucher No::120", "Remarks::150", "Due Date:Date:80", "Territory:Link/Territory:80",
"Invoiced Amount:Currency:100", "Payment Received:Currency:100",
"Outstanding Amount:Currency:100", "Age:Int:50", "0-30:Currency:100",
"30-60:Currency:100", "60-90:Currency:100", "90-Above:Currency:100"
]
def get_gl_entries(filters, upto_report_date=True):
conditions, customer_accounts = get_conditions(filters, upto_report_date)
return webnotes.conn.sql("""select * from `tabGL Entry`
where ifnull(is_cancelled, 'No') = 'No' %s order by posting_date, account""" %
(conditions), tuple(customer_accounts), as_dict=1)
def get_conditions(filters, upto_report_date=True):
conditions = ""
if filters.get("company"):
conditions += " and company='%s'" % filters["company"]
customer_accounts = []
if filters.get("account"):
customer_accounts = [filters["account"]]
else:
customer_accounts = webnotes.conn.sql_list("""select name from `tabAccount`
where ifnull(master_type, '') = 'Customer' and docstatus < 2 %s""" %
conditions, filters)
if customer_accounts:
conditions += " and account in (%s)" % (", ".join(['%s']*len(customer_accounts)))
else:
msgprint(_("No Customer Accounts found. Customer Accounts are identified based on \
'Master Type' value in account record."), raise_exception=1)
if filters.get("report_date"):
if upto_report_date:
conditions += " and posting_date<='%s'" % filters["report_date"]
else:
conditions += " and posting_date>'%s'" % filters["report_date"]
return conditions, customer_accounts
def get_account_territory_map():
account_territory_map = {}
for each in webnotes.conn.sql("""select t2.name, t1.territory from `tabCustomer` t1,
`tabAccount` t2 where t1.name = t2.master_name"""):
account_territory_map[each[0]] = each[1]
return account_territory_map
def get_si_due_date_map():
""" get due_date from sales invoice """
si_due_date_map = {}
for t in webnotes.conn.sql("""select name, due_date from `tabSales Invoice`"""):
si_due_date_map[t[0]] = t[1]
return si_due_date_map
def get_payment_amount(gle, report_date, entries_after_report_date):
payment_amount = 0
if flt(gle.credit) > 0 and (not gle.against_voucher or
[gle.against_voucher_type, gle.against_voucher] in entries_after_report_date):
payment_amount = gle.credit
elif flt(gle.debit) > 0:
payment_amount = webnotes.conn.sql("""
select sum(ifnull(credit, 0)) - sum(ifnull(debit, 0)) from `tabGL Entry`
where account = %s and posting_date <= %s and against_voucher_type = %s
and against_voucher = %s and name != %s and ifnull(is_cancelled, 'No') = 'No'""",
(gle.account, report_date, gle.voucher_type, gle.voucher_no, gle.name))[0][0]
return flt(payment_amount)
def get_ageing_data(ageing_based_on_date, age_on, outstanding_amount):
val1 = val2 = val3 = val4 = diff = 0
diff = age_on and ageing_based_on_date \
and (getdate(age_on) - getdate(ageing_based_on_date)).days or 0
if diff <= 30:
val1 = outstanding_amount
elif 30 < diff <= 60:
val2 = outstanding_amount
elif 60 < diff <= 90:
val3 = outstanding_amount
elif diff > 90:
val4 = outstanding_amount
return [diff, val1, val2, val3, val4]
|
agpl-3.0
|
VikramShetty/google-diff-match-patch
|
python2/diff_match_patch.py
|
337
|
67934
|
#!/usr/bin/python2.4
from __future__ import division
"""Diff Match and Patch
Copyright 2006 Google Inc.
http://code.google.com/p/google-diff-match-patch/
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""Functions for diff, match and patch.
Computes the difference between two texts to create a patch.
Applies the patch onto another text, allowing for errors.
"""
__author__ = 'fraser@google.com (Neil Fraser)'
import math
import re
import sys
import time
import urllib
class diff_match_patch:
"""Class containing the diff, match and patch methods.
Also contains the behaviour settings.
"""
def __init__(self):
"""Inits a diff_match_patch object with default settings.
Redefine these in your program to override the defaults.
"""
# Number of seconds to map a diff before giving up (0 for infinity).
self.Diff_Timeout = 1.0
# Cost of an empty edit operation in terms of edit characters.
self.Diff_EditCost = 4
# At what point is no match declared (0.0 = perfection, 1.0 = very loose).
self.Match_Threshold = 0.5
# How far to search for a match (0 = exact location, 1000+ = broad match).
# A match this many characters away from the expected location will add
# 1.0 to the score (0.0 is a perfect match).
self.Match_Distance = 1000
# When deleting a large block of text (over ~64 characters), how close do
# the contents have to be to match the expected contents. (0.0 = perfection,
# 1.0 = very loose). Note that Match_Threshold controls how closely the
# end points of a delete need to match.
self.Patch_DeleteThreshold = 0.5
# Chunk size for context length.
self.Patch_Margin = 4
# The number of bits in an int.
# Python has no maximum, thus to disable patch splitting set to 0.
# However to avoid long patches in certain pathological cases, use 32.
# Multiple short patches (using native ints) are much faster than long ones.
self.Match_MaxBits = 32
# DIFF FUNCTIONS
# The data structure representing a diff is an array of tuples:
# [(DIFF_DELETE, "Hello"), (DIFF_INSERT, "Goodbye"), (DIFF_EQUAL, " world.")]
# which means: delete "Hello", add "Goodbye" and keep " world."
DIFF_DELETE = -1
DIFF_INSERT = 1
DIFF_EQUAL = 0
def diff_main(self, text1, text2, checklines=True, deadline=None):
"""Find the differences between two texts. Simplifies the problem by
stripping any common prefix or suffix off the texts before diffing.
Args:
text1: Old string to be diffed.
text2: New string to be diffed.
checklines: Optional speedup flag. If present and false, then don't run
a line-level diff first to identify the changed areas.
Defaults to true, which does a faster, slightly less optimal diff.
deadline: Optional time when the diff should be complete by. Used
internally for recursive calls. Users should set DiffTimeout instead.
Returns:
Array of changes.
"""
# Set a deadline by which time the diff must be complete.
if deadline == None:
# Unlike in most languages, Python counts time in seconds.
if self.Diff_Timeout <= 0:
deadline = sys.maxint
else:
deadline = time.time() + self.Diff_Timeout
# Check for null inputs.
if text1 == None or text2 == None:
raise ValueError("Null inputs. (diff_main)")
# Check for equality (speedup).
if text1 == text2:
if text1:
return [(self.DIFF_EQUAL, text1)]
return []
# Trim off common prefix (speedup).
commonlength = self.diff_commonPrefix(text1, text2)
commonprefix = text1[:commonlength]
text1 = text1[commonlength:]
text2 = text2[commonlength:]
# Trim off common suffix (speedup).
commonlength = self.diff_commonSuffix(text1, text2)
if commonlength == 0:
commonsuffix = ''
else:
commonsuffix = text1[-commonlength:]
text1 = text1[:-commonlength]
text2 = text2[:-commonlength]
# Compute the diff on the middle block.
diffs = self.diff_compute(text1, text2, checklines, deadline)
# Restore the prefix and suffix.
if commonprefix:
diffs[:0] = [(self.DIFF_EQUAL, commonprefix)]
if commonsuffix:
diffs.append((self.DIFF_EQUAL, commonsuffix))
self.diff_cleanupMerge(diffs)
return diffs
def diff_compute(self, text1, text2, checklines, deadline):
"""Find the differences between two texts. Assumes that the texts do not
have any common prefix or suffix.
Args:
text1: Old string to be diffed.
text2: New string to be diffed.
checklines: Speedup flag. If false, then don't run a line-level diff
first to identify the changed areas.
If true, then run a faster, slightly less optimal diff.
deadline: Time when the diff should be complete by.
Returns:
Array of changes.
"""
if not text1:
# Just add some text (speedup).
return [(self.DIFF_INSERT, text2)]
if not text2:
# Just delete some text (speedup).
return [(self.DIFF_DELETE, text1)]
if len(text1) > len(text2):
(longtext, shorttext) = (text1, text2)
else:
(shorttext, longtext) = (text1, text2)
i = longtext.find(shorttext)
if i != -1:
# Shorter text is inside the longer text (speedup).
diffs = [(self.DIFF_INSERT, longtext[:i]), (self.DIFF_EQUAL, shorttext),
(self.DIFF_INSERT, longtext[i + len(shorttext):])]
# Swap insertions for deletions if diff is reversed.
if len(text1) > len(text2):
diffs[0] = (self.DIFF_DELETE, diffs[0][1])
diffs[2] = (self.DIFF_DELETE, diffs[2][1])
return diffs
if len(shorttext) == 1:
# Single character string.
# After the previous speedup, the character can't be an equality.
return [(self.DIFF_DELETE, text1), (self.DIFF_INSERT, text2)]
# Check to see if the problem can be split in two.
hm = self.diff_halfMatch(text1, text2)
if hm:
# A half-match was found, sort out the return data.
(text1_a, text1_b, text2_a, text2_b, mid_common) = hm
# Send both pairs off for separate processing.
diffs_a = self.diff_main(text1_a, text2_a, checklines, deadline)
diffs_b = self.diff_main(text1_b, text2_b, checklines, deadline)
# Merge the results.
return diffs_a + [(self.DIFF_EQUAL, mid_common)] + diffs_b
if checklines and len(text1) > 100 and len(text2) > 100:
return self.diff_lineMode(text1, text2, deadline)
return self.diff_bisect(text1, text2, deadline)
def diff_lineMode(self, text1, text2, deadline):
"""Do a quick line-level diff on both strings, then rediff the parts for
greater accuracy.
This speedup can produce non-minimal diffs.
Args:
text1: Old string to be diffed.
text2: New string to be diffed.
deadline: Time when the diff should be complete by.
Returns:
Array of changes.
"""
# Scan the text on a line-by-line basis first.
(text1, text2, linearray) = self.diff_linesToChars(text1, text2)
diffs = self.diff_main(text1, text2, False, deadline)
# Convert the diff back to original text.
self.diff_charsToLines(diffs, linearray)
# Eliminate freak matches (e.g. blank lines)
self.diff_cleanupSemantic(diffs)
# Rediff any replacement blocks, this time character-by-character.
# Add a dummy entry at the end.
diffs.append((self.DIFF_EQUAL, ''))
pointer = 0
count_delete = 0
count_insert = 0
text_delete = ''
text_insert = ''
while pointer < len(diffs):
if diffs[pointer][0] == self.DIFF_INSERT:
count_insert += 1
text_insert += diffs[pointer][1]
elif diffs[pointer][0] == self.DIFF_DELETE:
count_delete += 1
text_delete += diffs[pointer][1]
elif diffs[pointer][0] == self.DIFF_EQUAL:
# Upon reaching an equality, check for prior redundancies.
if count_delete >= 1 and count_insert >= 1:
# Delete the offending records and add the merged ones.
a = self.diff_main(text_delete, text_insert, False, deadline)
diffs[pointer - count_delete - count_insert : pointer] = a
pointer = pointer - count_delete - count_insert + len(a)
count_insert = 0
count_delete = 0
text_delete = ''
text_insert = ''
pointer += 1
diffs.pop() # Remove the dummy entry at the end.
return diffs
def diff_bisect(self, text1, text2, deadline):
"""Find the 'middle snake' of a diff, split the problem in two
and return the recursively constructed diff.
See Myers 1986 paper: An O(ND) Difference Algorithm and Its Variations.
Args:
text1: Old string to be diffed.
text2: New string to be diffed.
deadline: Time at which to bail if not yet complete.
Returns:
Array of diff tuples.
"""
# Cache the text lengths to prevent multiple calls.
text1_length = len(text1)
text2_length = len(text2)
max_d = (text1_length + text2_length + 1) // 2
v_offset = max_d
v_length = 2 * max_d
v1 = [-1] * v_length
v1[v_offset + 1] = 0
v2 = v1[:]
delta = text1_length - text2_length
# If the total number of characters is odd, then the front path will
# collide with the reverse path.
front = (delta % 2 != 0)
# Offsets for start and end of k loop.
# Prevents mapping of space beyond the grid.
k1start = 0
k1end = 0
k2start = 0
k2end = 0
for d in xrange(max_d):
# Bail out if deadline is reached.
if time.time() > deadline:
break
# Walk the front path one step.
for k1 in xrange(-d + k1start, d + 1 - k1end, 2):
k1_offset = v_offset + k1
if k1 == -d or (k1 != d and
v1[k1_offset - 1] < v1[k1_offset + 1]):
x1 = v1[k1_offset + 1]
else:
x1 = v1[k1_offset - 1] + 1
y1 = x1 - k1
while (x1 < text1_length and y1 < text2_length and
text1[x1] == text2[y1]):
x1 += 1
y1 += 1
v1[k1_offset] = x1
if x1 > text1_length:
# Ran off the right of the graph.
k1end += 2
elif y1 > text2_length:
# Ran off the bottom of the graph.
k1start += 2
elif front:
k2_offset = v_offset + delta - k1
if k2_offset >= 0 and k2_offset < v_length and v2[k2_offset] != -1:
# Mirror x2 onto top-left coordinate system.
x2 = text1_length - v2[k2_offset]
if x1 >= x2:
# Overlap detected.
return self.diff_bisectSplit(text1, text2, x1, y1, deadline)
# Walk the reverse path one step.
for k2 in xrange(-d + k2start, d + 1 - k2end, 2):
k2_offset = v_offset + k2
if k2 == -d or (k2 != d and
v2[k2_offset - 1] < v2[k2_offset + 1]):
x2 = v2[k2_offset + 1]
else:
x2 = v2[k2_offset - 1] + 1
y2 = x2 - k2
while (x2 < text1_length and y2 < text2_length and
text1[-x2 - 1] == text2[-y2 - 1]):
x2 += 1
y2 += 1
v2[k2_offset] = x2
if x2 > text1_length:
# Ran off the left of the graph.
k2end += 2
elif y2 > text2_length:
# Ran off the top of the graph.
k2start += 2
elif not front:
k1_offset = v_offset + delta - k2
if k1_offset >= 0 and k1_offset < v_length and v1[k1_offset] != -1:
x1 = v1[k1_offset]
y1 = v_offset + x1 - k1_offset
# Mirror x2 onto top-left coordinate system.
x2 = text1_length - x2
if x1 >= x2:
# Overlap detected.
return self.diff_bisectSplit(text1, text2, x1, y1, deadline)
# Diff took too long and hit the deadline or
# number of diffs equals number of characters, no commonality at all.
return [(self.DIFF_DELETE, text1), (self.DIFF_INSERT, text2)]
def diff_bisectSplit(self, text1, text2, x, y, deadline):
"""Given the location of the 'middle snake', split the diff in two parts
and recurse.
Args:
text1: Old string to be diffed.
text2: New string to be diffed.
x: Index of split point in text1.
y: Index of split point in text2.
deadline: Time at which to bail if not yet complete.
Returns:
Array of diff tuples.
"""
text1a = text1[:x]
text2a = text2[:y]
text1b = text1[x:]
text2b = text2[y:]
# Compute both diffs serially.
diffs = self.diff_main(text1a, text2a, False, deadline)
diffsb = self.diff_main(text1b, text2b, False, deadline)
return diffs + diffsb
def diff_linesToChars(self, text1, text2):
"""Split two texts into an array of strings. Reduce the texts to a string
of hashes where each Unicode character represents one line.
Args:
text1: First string.
text2: Second string.
Returns:
Three element tuple, containing the encoded text1, the encoded text2 and
the array of unique strings. The zeroth element of the array of unique
strings is intentionally blank.
"""
lineArray = [] # e.g. lineArray[4] == "Hello\n"
lineHash = {} # e.g. lineHash["Hello\n"] == 4
# "\x00" is a valid character, but various debuggers don't like it.
# So we'll insert a junk entry to avoid generating a null character.
lineArray.append('')
def diff_linesToCharsMunge(text):
"""Split a text into an array of strings. Reduce the texts to a string
of hashes where each Unicode character represents one line.
Modifies linearray and linehash through being a closure.
Args:
text: String to encode.
Returns:
Encoded string.
"""
chars = []
# Walk the text, pulling out a substring for each line.
# text.split('\n') would would temporarily double our memory footprint.
# Modifying text would create many large strings to garbage collect.
lineStart = 0
lineEnd = -1
while lineEnd < len(text) - 1:
lineEnd = text.find('\n', lineStart)
if lineEnd == -1:
lineEnd = len(text) - 1
line = text[lineStart:lineEnd + 1]
lineStart = lineEnd + 1
if line in lineHash:
chars.append(unichr(lineHash[line]))
else:
lineArray.append(line)
lineHash[line] = len(lineArray) - 1
chars.append(unichr(len(lineArray) - 1))
return "".join(chars)
chars1 = diff_linesToCharsMunge(text1)
chars2 = diff_linesToCharsMunge(text2)
return (chars1, chars2, lineArray)
def diff_charsToLines(self, diffs, lineArray):
"""Rehydrate the text in a diff from a string of line hashes to real lines
of text.
Args:
diffs: Array of diff tuples.
lineArray: Array of unique strings.
"""
for x in xrange(len(diffs)):
text = []
for char in diffs[x][1]:
text.append(lineArray[ord(char)])
diffs[x] = (diffs[x][0], "".join(text))
def diff_commonPrefix(self, text1, text2):
"""Determine the common prefix of two strings.
Args:
text1: First string.
text2: Second string.
Returns:
The number of characters common to the start of each string.
"""
# Quick check for common null cases.
if not text1 or not text2 or text1[0] != text2[0]:
return 0
# Binary search.
# Performance analysis: http://neil.fraser.name/news/2007/10/09/
pointermin = 0
pointermax = min(len(text1), len(text2))
pointermid = pointermax
pointerstart = 0
while pointermin < pointermid:
if text1[pointerstart:pointermid] == text2[pointerstart:pointermid]:
pointermin = pointermid
pointerstart = pointermin
else:
pointermax = pointermid
pointermid = (pointermax - pointermin) // 2 + pointermin
return pointermid
def diff_commonSuffix(self, text1, text2):
"""Determine the common suffix of two strings.
Args:
text1: First string.
text2: Second string.
Returns:
The number of characters common to the end of each string.
"""
# Quick check for common null cases.
if not text1 or not text2 or text1[-1] != text2[-1]:
return 0
# Binary search.
# Performance analysis: http://neil.fraser.name/news/2007/10/09/
pointermin = 0
pointermax = min(len(text1), len(text2))
pointermid = pointermax
pointerend = 0
while pointermin < pointermid:
if (text1[-pointermid:len(text1) - pointerend] ==
text2[-pointermid:len(text2) - pointerend]):
pointermin = pointermid
pointerend = pointermin
else:
pointermax = pointermid
pointermid = (pointermax - pointermin) // 2 + pointermin
return pointermid
def diff_commonOverlap(self, text1, text2):
"""Determine if the suffix of one string is the prefix of another.
Args:
text1 First string.
text2 Second string.
Returns:
The number of characters common to the end of the first
string and the start of the second string.
"""
# Cache the text lengths to prevent multiple calls.
text1_length = len(text1)
text2_length = len(text2)
# Eliminate the null case.
if text1_length == 0 or text2_length == 0:
return 0
# Truncate the longer string.
if text1_length > text2_length:
text1 = text1[-text2_length:]
elif text1_length < text2_length:
text2 = text2[:text1_length]
text_length = min(text1_length, text2_length)
# Quick check for the worst case.
if text1 == text2:
return text_length
# Start by looking for a single character match
# and increase length until no match is found.
# Performance analysis: http://neil.fraser.name/news/2010/11/04/
best = 0
length = 1
while True:
pattern = text1[-length:]
found = text2.find(pattern)
if found == -1:
return best
length += found
if found == 0 or text1[-length:] == text2[:length]:
best = length
length += 1
def diff_halfMatch(self, text1, text2):
"""Do the two texts share a substring which is at least half the length of
the longer text?
This speedup can produce non-minimal diffs.
Args:
text1: First string.
text2: Second string.
Returns:
Five element Array, containing the prefix of text1, the suffix of text1,
the prefix of text2, the suffix of text2 and the common middle. Or None
if there was no match.
"""
if self.Diff_Timeout <= 0:
# Don't risk returning a non-optimal diff if we have unlimited time.
return None
if len(text1) > len(text2):
(longtext, shorttext) = (text1, text2)
else:
(shorttext, longtext) = (text1, text2)
if len(longtext) < 4 or len(shorttext) * 2 < len(longtext):
return None # Pointless.
def diff_halfMatchI(longtext, shorttext, i):
"""Does a substring of shorttext exist within longtext such that the
substring is at least half the length of longtext?
Closure, but does not reference any external variables.
Args:
longtext: Longer string.
shorttext: Shorter string.
i: Start index of quarter length substring within longtext.
Returns:
Five element Array, containing the prefix of longtext, the suffix of
longtext, the prefix of shorttext, the suffix of shorttext and the
common middle. Or None if there was no match.
"""
seed = longtext[i:i + len(longtext) // 4]
best_common = ''
j = shorttext.find(seed)
while j != -1:
prefixLength = self.diff_commonPrefix(longtext[i:], shorttext[j:])
suffixLength = self.diff_commonSuffix(longtext[:i], shorttext[:j])
if len(best_common) < suffixLength + prefixLength:
best_common = (shorttext[j - suffixLength:j] +
shorttext[j:j + prefixLength])
best_longtext_a = longtext[:i - suffixLength]
best_longtext_b = longtext[i + prefixLength:]
best_shorttext_a = shorttext[:j - suffixLength]
best_shorttext_b = shorttext[j + prefixLength:]
j = shorttext.find(seed, j + 1)
if len(best_common) * 2 >= len(longtext):
return (best_longtext_a, best_longtext_b,
best_shorttext_a, best_shorttext_b, best_common)
else:
return None
# First check if the second quarter is the seed for a half-match.
hm1 = diff_halfMatchI(longtext, shorttext, (len(longtext) + 3) // 4)
# Check again based on the third quarter.
hm2 = diff_halfMatchI(longtext, shorttext, (len(longtext) + 1) // 2)
if not hm1 and not hm2:
return None
elif not hm2:
hm = hm1
elif not hm1:
hm = hm2
else:
# Both matched. Select the longest.
if len(hm1[4]) > len(hm2[4]):
hm = hm1
else:
hm = hm2
# A half-match was found, sort out the return data.
if len(text1) > len(text2):
(text1_a, text1_b, text2_a, text2_b, mid_common) = hm
else:
(text2_a, text2_b, text1_a, text1_b, mid_common) = hm
return (text1_a, text1_b, text2_a, text2_b, mid_common)
def diff_cleanupSemantic(self, diffs):
"""Reduce the number of edits by eliminating semantically trivial
equalities.
Args:
diffs: Array of diff tuples.
"""
changes = False
equalities = [] # Stack of indices where equalities are found.
lastequality = None # Always equal to diffs[equalities[-1]][1]
pointer = 0 # Index of current position.
# Number of chars that changed prior to the equality.
length_insertions1, length_deletions1 = 0, 0
# Number of chars that changed after the equality.
length_insertions2, length_deletions2 = 0, 0
while pointer < len(diffs):
if diffs[pointer][0] == self.DIFF_EQUAL: # Equality found.
equalities.append(pointer)
length_insertions1, length_insertions2 = length_insertions2, 0
length_deletions1, length_deletions2 = length_deletions2, 0
lastequality = diffs[pointer][1]
else: # An insertion or deletion.
if diffs[pointer][0] == self.DIFF_INSERT:
length_insertions2 += len(diffs[pointer][1])
else:
length_deletions2 += len(diffs[pointer][1])
# Eliminate an equality that is smaller or equal to the edits on both
# sides of it.
if (lastequality and (len(lastequality) <=
max(length_insertions1, length_deletions1)) and
(len(lastequality) <= max(length_insertions2, length_deletions2))):
# Duplicate record.
diffs.insert(equalities[-1], (self.DIFF_DELETE, lastequality))
# Change second copy to insert.
diffs[equalities[-1] + 1] = (self.DIFF_INSERT,
diffs[equalities[-1] + 1][1])
# Throw away the equality we just deleted.
equalities.pop()
# Throw away the previous equality (it needs to be reevaluated).
if len(equalities):
equalities.pop()
if len(equalities):
pointer = equalities[-1]
else:
pointer = -1
# Reset the counters.
length_insertions1, length_deletions1 = 0, 0
length_insertions2, length_deletions2 = 0, 0
lastequality = None
changes = True
pointer += 1
# Normalize the diff.
if changes:
self.diff_cleanupMerge(diffs)
self.diff_cleanupSemanticLossless(diffs)
# Find any overlaps between deletions and insertions.
# e.g: <del>abcxxx</del><ins>xxxdef</ins>
# -> <del>abc</del>xxx<ins>def</ins>
# e.g: <del>xxxabc</del><ins>defxxx</ins>
# -> <ins>def</ins>xxx<del>abc</del>
# Only extract an overlap if it is as big as the edit ahead or behind it.
pointer = 1
while pointer < len(diffs):
if (diffs[pointer - 1][0] == self.DIFF_DELETE and
diffs[pointer][0] == self.DIFF_INSERT):
deletion = diffs[pointer - 1][1]
insertion = diffs[pointer][1]
overlap_length1 = self.diff_commonOverlap(deletion, insertion)
overlap_length2 = self.diff_commonOverlap(insertion, deletion)
if overlap_length1 >= overlap_length2:
if (overlap_length1 >= len(deletion) / 2.0 or
overlap_length1 >= len(insertion) / 2.0):
# Overlap found. Insert an equality and trim the surrounding edits.
diffs.insert(pointer, (self.DIFF_EQUAL,
insertion[:overlap_length1]))
diffs[pointer - 1] = (self.DIFF_DELETE,
deletion[:len(deletion) - overlap_length1])
diffs[pointer + 1] = (self.DIFF_INSERT,
insertion[overlap_length1:])
pointer += 1
else:
if (overlap_length2 >= len(deletion) / 2.0 or
overlap_length2 >= len(insertion) / 2.0):
# Reverse overlap found.
# Insert an equality and swap and trim the surrounding edits.
diffs.insert(pointer, (self.DIFF_EQUAL, deletion[:overlap_length2]))
diffs[pointer - 1] = (self.DIFF_INSERT,
insertion[:len(insertion) - overlap_length2])
diffs[pointer + 1] = (self.DIFF_DELETE, deletion[overlap_length2:])
pointer += 1
pointer += 1
pointer += 1
def diff_cleanupSemanticLossless(self, diffs):
"""Look for single edits surrounded on both sides by equalities
which can be shifted sideways to align the edit to a word boundary.
e.g: The c<ins>at c</ins>ame. -> The <ins>cat </ins>came.
Args:
diffs: Array of diff tuples.
"""
def diff_cleanupSemanticScore(one, two):
"""Given two strings, compute a score representing whether the
internal boundary falls on logical boundaries.
Scores range from 6 (best) to 0 (worst).
Closure, but does not reference any external variables.
Args:
one: First string.
two: Second string.
Returns:
The score.
"""
if not one or not two:
# Edges are the best.
return 6
# Each port of this function behaves slightly differently due to
# subtle differences in each language's definition of things like
# 'whitespace'. Since this function's purpose is largely cosmetic,
# the choice has been made to use each language's native features
# rather than force total conformity.
char1 = one[-1]
char2 = two[0]
nonAlphaNumeric1 = not char1.isalnum()
nonAlphaNumeric2 = not char2.isalnum()
whitespace1 = nonAlphaNumeric1 and char1.isspace()
whitespace2 = nonAlphaNumeric2 and char2.isspace()
lineBreak1 = whitespace1 and (char1 == "\r" or char1 == "\n")
lineBreak2 = whitespace2 and (char2 == "\r" or char2 == "\n")
blankLine1 = lineBreak1 and self.BLANKLINEEND.search(one)
blankLine2 = lineBreak2 and self.BLANKLINESTART.match(two)
if blankLine1 or blankLine2:
# Five points for blank lines.
return 5
elif lineBreak1 or lineBreak2:
# Four points for line breaks.
return 4
elif nonAlphaNumeric1 and not whitespace1 and whitespace2:
# Three points for end of sentences.
return 3
elif whitespace1 or whitespace2:
# Two points for whitespace.
return 2
elif nonAlphaNumeric1 or nonAlphaNumeric2:
# One point for non-alphanumeric.
return 1
return 0
pointer = 1
# Intentionally ignore the first and last element (don't need checking).
while pointer < len(diffs) - 1:
if (diffs[pointer - 1][0] == self.DIFF_EQUAL and
diffs[pointer + 1][0] == self.DIFF_EQUAL):
# This is a single edit surrounded by equalities.
equality1 = diffs[pointer - 1][1]
edit = diffs[pointer][1]
equality2 = diffs[pointer + 1][1]
# First, shift the edit as far left as possible.
commonOffset = self.diff_commonSuffix(equality1, edit)
if commonOffset:
commonString = edit[-commonOffset:]
equality1 = equality1[:-commonOffset]
edit = commonString + edit[:-commonOffset]
equality2 = commonString + equality2
# Second, step character by character right, looking for the best fit.
bestEquality1 = equality1
bestEdit = edit
bestEquality2 = equality2
bestScore = (diff_cleanupSemanticScore(equality1, edit) +
diff_cleanupSemanticScore(edit, equality2))
while edit and equality2 and edit[0] == equality2[0]:
equality1 += edit[0]
edit = edit[1:] + equality2[0]
equality2 = equality2[1:]
score = (diff_cleanupSemanticScore(equality1, edit) +
diff_cleanupSemanticScore(edit, equality2))
# The >= encourages trailing rather than leading whitespace on edits.
if score >= bestScore:
bestScore = score
bestEquality1 = equality1
bestEdit = edit
bestEquality2 = equality2
if diffs[pointer - 1][1] != bestEquality1:
# We have an improvement, save it back to the diff.
if bestEquality1:
diffs[pointer - 1] = (diffs[pointer - 1][0], bestEquality1)
else:
del diffs[pointer - 1]
pointer -= 1
diffs[pointer] = (diffs[pointer][0], bestEdit)
if bestEquality2:
diffs[pointer + 1] = (diffs[pointer + 1][0], bestEquality2)
else:
del diffs[pointer + 1]
pointer -= 1
pointer += 1
# Define some regex patterns for matching boundaries.
BLANKLINEEND = re.compile(r"\n\r?\n$");
BLANKLINESTART = re.compile(r"^\r?\n\r?\n");
def diff_cleanupEfficiency(self, diffs):
"""Reduce the number of edits by eliminating operationally trivial
equalities.
Args:
diffs: Array of diff tuples.
"""
changes = False
equalities = [] # Stack of indices where equalities are found.
lastequality = None # Always equal to diffs[equalities[-1]][1]
pointer = 0 # Index of current position.
pre_ins = False # Is there an insertion operation before the last equality.
pre_del = False # Is there a deletion operation before the last equality.
post_ins = False # Is there an insertion operation after the last equality.
post_del = False # Is there a deletion operation after the last equality.
while pointer < len(diffs):
if diffs[pointer][0] == self.DIFF_EQUAL: # Equality found.
if (len(diffs[pointer][1]) < self.Diff_EditCost and
(post_ins or post_del)):
# Candidate found.
equalities.append(pointer)
pre_ins = post_ins
pre_del = post_del
lastequality = diffs[pointer][1]
else:
# Not a candidate, and can never become one.
equalities = []
lastequality = None
post_ins = post_del = False
else: # An insertion or deletion.
if diffs[pointer][0] == self.DIFF_DELETE:
post_del = True
else:
post_ins = True
# Five types to be split:
# <ins>A</ins><del>B</del>XY<ins>C</ins><del>D</del>
# <ins>A</ins>X<ins>C</ins><del>D</del>
# <ins>A</ins><del>B</del>X<ins>C</ins>
# <ins>A</del>X<ins>C</ins><del>D</del>
# <ins>A</ins><del>B</del>X<del>C</del>
if lastequality and ((pre_ins and pre_del and post_ins and post_del) or
((len(lastequality) < self.Diff_EditCost / 2) and
(pre_ins + pre_del + post_ins + post_del) == 3)):
# Duplicate record.
diffs.insert(equalities[-1], (self.DIFF_DELETE, lastequality))
# Change second copy to insert.
diffs[equalities[-1] + 1] = (self.DIFF_INSERT,
diffs[equalities[-1] + 1][1])
equalities.pop() # Throw away the equality we just deleted.
lastequality = None
if pre_ins and pre_del:
# No changes made which could affect previous entry, keep going.
post_ins = post_del = True
equalities = []
else:
if len(equalities):
equalities.pop() # Throw away the previous equality.
if len(equalities):
pointer = equalities[-1]
else:
pointer = -1
post_ins = post_del = False
changes = True
pointer += 1
if changes:
self.diff_cleanupMerge(diffs)
def diff_cleanupMerge(self, diffs):
"""Reorder and merge like edit sections. Merge equalities.
Any edit section can move as long as it doesn't cross an equality.
Args:
diffs: Array of diff tuples.
"""
diffs.append((self.DIFF_EQUAL, '')) # Add a dummy entry at the end.
pointer = 0
count_delete = 0
count_insert = 0
text_delete = ''
text_insert = ''
while pointer < len(diffs):
if diffs[pointer][0] == self.DIFF_INSERT:
count_insert += 1
text_insert += diffs[pointer][1]
pointer += 1
elif diffs[pointer][0] == self.DIFF_DELETE:
count_delete += 1
text_delete += diffs[pointer][1]
pointer += 1
elif diffs[pointer][0] == self.DIFF_EQUAL:
# Upon reaching an equality, check for prior redundancies.
if count_delete + count_insert > 1:
if count_delete != 0 and count_insert != 0:
# Factor out any common prefixies.
commonlength = self.diff_commonPrefix(text_insert, text_delete)
if commonlength != 0:
x = pointer - count_delete - count_insert - 1
if x >= 0 and diffs[x][0] == self.DIFF_EQUAL:
diffs[x] = (diffs[x][0], diffs[x][1] +
text_insert[:commonlength])
else:
diffs.insert(0, (self.DIFF_EQUAL, text_insert[:commonlength]))
pointer += 1
text_insert = text_insert[commonlength:]
text_delete = text_delete[commonlength:]
# Factor out any common suffixies.
commonlength = self.diff_commonSuffix(text_insert, text_delete)
if commonlength != 0:
diffs[pointer] = (diffs[pointer][0], text_insert[-commonlength:] +
diffs[pointer][1])
text_insert = text_insert[:-commonlength]
text_delete = text_delete[:-commonlength]
# Delete the offending records and add the merged ones.
if count_delete == 0:
diffs[pointer - count_insert : pointer] = [
(self.DIFF_INSERT, text_insert)]
elif count_insert == 0:
diffs[pointer - count_delete : pointer] = [
(self.DIFF_DELETE, text_delete)]
else:
diffs[pointer - count_delete - count_insert : pointer] = [
(self.DIFF_DELETE, text_delete),
(self.DIFF_INSERT, text_insert)]
pointer = pointer - count_delete - count_insert + 1
if count_delete != 0:
pointer += 1
if count_insert != 0:
pointer += 1
elif pointer != 0 and diffs[pointer - 1][0] == self.DIFF_EQUAL:
# Merge this equality with the previous one.
diffs[pointer - 1] = (diffs[pointer - 1][0],
diffs[pointer - 1][1] + diffs[pointer][1])
del diffs[pointer]
else:
pointer += 1
count_insert = 0
count_delete = 0
text_delete = ''
text_insert = ''
if diffs[-1][1] == '':
diffs.pop() # Remove the dummy entry at the end.
# Second pass: look for single edits surrounded on both sides by equalities
# which can be shifted sideways to eliminate an equality.
# e.g: A<ins>BA</ins>C -> <ins>AB</ins>AC
changes = False
pointer = 1
# Intentionally ignore the first and last element (don't need checking).
while pointer < len(diffs) - 1:
if (diffs[pointer - 1][0] == self.DIFF_EQUAL and
diffs[pointer + 1][0] == self.DIFF_EQUAL):
# This is a single edit surrounded by equalities.
if diffs[pointer][1].endswith(diffs[pointer - 1][1]):
# Shift the edit over the previous equality.
diffs[pointer] = (diffs[pointer][0],
diffs[pointer - 1][1] +
diffs[pointer][1][:-len(diffs[pointer - 1][1])])
diffs[pointer + 1] = (diffs[pointer + 1][0],
diffs[pointer - 1][1] + diffs[pointer + 1][1])
del diffs[pointer - 1]
changes = True
elif diffs[pointer][1].startswith(diffs[pointer + 1][1]):
# Shift the edit over the next equality.
diffs[pointer - 1] = (diffs[pointer - 1][0],
diffs[pointer - 1][1] + diffs[pointer + 1][1])
diffs[pointer] = (diffs[pointer][0],
diffs[pointer][1][len(diffs[pointer + 1][1]):] +
diffs[pointer + 1][1])
del diffs[pointer + 1]
changes = True
pointer += 1
# If shifts were made, the diff needs reordering and another shift sweep.
if changes:
self.diff_cleanupMerge(diffs)
def diff_xIndex(self, diffs, loc):
"""loc is a location in text1, compute and return the equivalent location
in text2. e.g. "The cat" vs "The big cat", 1->1, 5->8
Args:
diffs: Array of diff tuples.
loc: Location within text1.
Returns:
Location within text2.
"""
chars1 = 0
chars2 = 0
last_chars1 = 0
last_chars2 = 0
for x in xrange(len(diffs)):
(op, text) = diffs[x]
if op != self.DIFF_INSERT: # Equality or deletion.
chars1 += len(text)
if op != self.DIFF_DELETE: # Equality or insertion.
chars2 += len(text)
if chars1 > loc: # Overshot the location.
break
last_chars1 = chars1
last_chars2 = chars2
if len(diffs) != x and diffs[x][0] == self.DIFF_DELETE:
# The location was deleted.
return last_chars2
# Add the remaining len(character).
return last_chars2 + (loc - last_chars1)
def diff_prettyHtml(self, diffs):
"""Convert a diff array into a pretty HTML report.
Args:
diffs: Array of diff tuples.
Returns:
HTML representation.
"""
html = []
for (op, data) in diffs:
text = (data.replace("&", "&").replace("<", "<")
.replace(">", ">").replace("\n", "¶<br>"))
if op == self.DIFF_INSERT:
html.append("<ins style=\"background:#e6ffe6;\">%s</ins>" % text)
elif op == self.DIFF_DELETE:
html.append("<del style=\"background:#ffe6e6;\">%s</del>" % text)
elif op == self.DIFF_EQUAL:
html.append("<span>%s</span>" % text)
return "".join(html)
def diff_text1(self, diffs):
"""Compute and return the source text (all equalities and deletions).
Args:
diffs: Array of diff tuples.
Returns:
Source text.
"""
text = []
for (op, data) in diffs:
if op != self.DIFF_INSERT:
text.append(data)
return "".join(text)
def diff_text2(self, diffs):
"""Compute and return the destination text (all equalities and insertions).
Args:
diffs: Array of diff tuples.
Returns:
Destination text.
"""
text = []
for (op, data) in diffs:
if op != self.DIFF_DELETE:
text.append(data)
return "".join(text)
def diff_levenshtein(self, diffs):
"""Compute the Levenshtein distance; the number of inserted, deleted or
substituted characters.
Args:
diffs: Array of diff tuples.
Returns:
Number of changes.
"""
levenshtein = 0
insertions = 0
deletions = 0
for (op, data) in diffs:
if op == self.DIFF_INSERT:
insertions += len(data)
elif op == self.DIFF_DELETE:
deletions += len(data)
elif op == self.DIFF_EQUAL:
# A deletion and an insertion is one substitution.
levenshtein += max(insertions, deletions)
insertions = 0
deletions = 0
levenshtein += max(insertions, deletions)
return levenshtein
def diff_toDelta(self, diffs):
"""Crush the diff into an encoded string which describes the operations
required to transform text1 into text2.
E.g. =3\t-2\t+ing -> Keep 3 chars, delete 2 chars, insert 'ing'.
Operations are tab-separated. Inserted text is escaped using %xx notation.
Args:
diffs: Array of diff tuples.
Returns:
Delta text.
"""
text = []
for (op, data) in diffs:
if op == self.DIFF_INSERT:
# High ascii will raise UnicodeDecodeError. Use Unicode instead.
data = data.encode("utf-8")
text.append("+" + urllib.quote(data, "!~*'();/?:@&=+$,# "))
elif op == self.DIFF_DELETE:
text.append("-%d" % len(data))
elif op == self.DIFF_EQUAL:
text.append("=%d" % len(data))
return "\t".join(text)
def diff_fromDelta(self, text1, delta):
"""Given the original text1, and an encoded string which describes the
operations required to transform text1 into text2, compute the full diff.
Args:
text1: Source string for the diff.
delta: Delta text.
Returns:
Array of diff tuples.
Raises:
ValueError: If invalid input.
"""
if type(delta) == unicode:
# Deltas should be composed of a subset of ascii chars, Unicode not
# required. If this encode raises UnicodeEncodeError, delta is invalid.
delta = delta.encode("ascii")
diffs = []
pointer = 0 # Cursor in text1
tokens = delta.split("\t")
for token in tokens:
if token == "":
# Blank tokens are ok (from a trailing \t).
continue
# Each token begins with a one character parameter which specifies the
# operation of this token (delete, insert, equality).
param = token[1:]
if token[0] == "+":
param = urllib.unquote(param).decode("utf-8")
diffs.append((self.DIFF_INSERT, param))
elif token[0] == "-" or token[0] == "=":
try:
n = int(param)
except ValueError:
raise ValueError("Invalid number in diff_fromDelta: " + param)
if n < 0:
raise ValueError("Negative number in diff_fromDelta: " + param)
text = text1[pointer : pointer + n]
pointer += n
if token[0] == "=":
diffs.append((self.DIFF_EQUAL, text))
else:
diffs.append((self.DIFF_DELETE, text))
else:
# Anything else is an error.
raise ValueError("Invalid diff operation in diff_fromDelta: " +
token[0])
if pointer != len(text1):
raise ValueError(
"Delta length (%d) does not equal source text length (%d)." %
(pointer, len(text1)))
return diffs
# MATCH FUNCTIONS
def match_main(self, text, pattern, loc):
"""Locate the best instance of 'pattern' in 'text' near 'loc'.
Args:
text: The text to search.
pattern: The pattern to search for.
loc: The location to search around.
Returns:
Best match index or -1.
"""
# Check for null inputs.
if text == None or pattern == None:
raise ValueError("Null inputs. (match_main)")
loc = max(0, min(loc, len(text)))
if text == pattern:
# Shortcut (potentially not guaranteed by the algorithm)
return 0
elif not text:
# Nothing to match.
return -1
elif text[loc:loc + len(pattern)] == pattern:
# Perfect match at the perfect spot! (Includes case of null pattern)
return loc
else:
# Do a fuzzy compare.
match = self.match_bitap(text, pattern, loc)
return match
def match_bitap(self, text, pattern, loc):
"""Locate the best instance of 'pattern' in 'text' near 'loc' using the
Bitap algorithm.
Args:
text: The text to search.
pattern: The pattern to search for.
loc: The location to search around.
Returns:
Best match index or -1.
"""
# Python doesn't have a maxint limit, so ignore this check.
#if self.Match_MaxBits != 0 and len(pattern) > self.Match_MaxBits:
# raise ValueError("Pattern too long for this application.")
# Initialise the alphabet.
s = self.match_alphabet(pattern)
def match_bitapScore(e, x):
"""Compute and return the score for a match with e errors and x location.
Accesses loc and pattern through being a closure.
Args:
e: Number of errors in match.
x: Location of match.
Returns:
Overall score for match (0.0 = good, 1.0 = bad).
"""
accuracy = float(e) / len(pattern)
proximity = abs(loc - x)
if not self.Match_Distance:
# Dodge divide by zero error.
return proximity and 1.0 or accuracy
return accuracy + (proximity / float(self.Match_Distance))
# Highest score beyond which we give up.
score_threshold = self.Match_Threshold
# Is there a nearby exact match? (speedup)
best_loc = text.find(pattern, loc)
if best_loc != -1:
score_threshold = min(match_bitapScore(0, best_loc), score_threshold)
# What about in the other direction? (speedup)
best_loc = text.rfind(pattern, loc + len(pattern))
if best_loc != -1:
score_threshold = min(match_bitapScore(0, best_loc), score_threshold)
# Initialise the bit arrays.
matchmask = 1 << (len(pattern) - 1)
best_loc = -1
bin_max = len(pattern) + len(text)
# Empty initialization added to appease pychecker.
last_rd = None
for d in xrange(len(pattern)):
# Scan for the best match each iteration allows for one more error.
# Run a binary search to determine how far from 'loc' we can stray at
# this error level.
bin_min = 0
bin_mid = bin_max
while bin_min < bin_mid:
if match_bitapScore(d, loc + bin_mid) <= score_threshold:
bin_min = bin_mid
else:
bin_max = bin_mid
bin_mid = (bin_max - bin_min) // 2 + bin_min
# Use the result from this iteration as the maximum for the next.
bin_max = bin_mid
start = max(1, loc - bin_mid + 1)
finish = min(loc + bin_mid, len(text)) + len(pattern)
rd = [0] * (finish + 2)
rd[finish + 1] = (1 << d) - 1
for j in xrange(finish, start - 1, -1):
if len(text) <= j - 1:
# Out of range.
charMatch = 0
else:
charMatch = s.get(text[j - 1], 0)
if d == 0: # First pass: exact match.
rd[j] = ((rd[j + 1] << 1) | 1) & charMatch
else: # Subsequent passes: fuzzy match.
rd[j] = (((rd[j + 1] << 1) | 1) & charMatch) | (
((last_rd[j + 1] | last_rd[j]) << 1) | 1) | last_rd[j + 1]
if rd[j] & matchmask:
score = match_bitapScore(d, j - 1)
# This match will almost certainly be better than any existing match.
# But check anyway.
if score <= score_threshold:
# Told you so.
score_threshold = score
best_loc = j - 1
if best_loc > loc:
# When passing loc, don't exceed our current distance from loc.
start = max(1, 2 * loc - best_loc)
else:
# Already passed loc, downhill from here on in.
break
# No hope for a (better) match at greater error levels.
if match_bitapScore(d + 1, loc) > score_threshold:
break
last_rd = rd
return best_loc
def match_alphabet(self, pattern):
"""Initialise the alphabet for the Bitap algorithm.
Args:
pattern: The text to encode.
Returns:
Hash of character locations.
"""
s = {}
for char in pattern:
s[char] = 0
for i in xrange(len(pattern)):
s[pattern[i]] |= 1 << (len(pattern) - i - 1)
return s
# PATCH FUNCTIONS
def patch_addContext(self, patch, text):
"""Increase the context until it is unique,
but don't let the pattern expand beyond Match_MaxBits.
Args:
patch: The patch to grow.
text: Source text.
"""
if len(text) == 0:
return
pattern = text[patch.start2 : patch.start2 + patch.length1]
padding = 0
# Look for the first and last matches of pattern in text. If two different
# matches are found, increase the pattern length.
while (text.find(pattern) != text.rfind(pattern) and (self.Match_MaxBits ==
0 or len(pattern) < self.Match_MaxBits - self.Patch_Margin -
self.Patch_Margin)):
padding += self.Patch_Margin
pattern = text[max(0, patch.start2 - padding) :
patch.start2 + patch.length1 + padding]
# Add one chunk for good luck.
padding += self.Patch_Margin
# Add the prefix.
prefix = text[max(0, patch.start2 - padding) : patch.start2]
if prefix:
patch.diffs[:0] = [(self.DIFF_EQUAL, prefix)]
# Add the suffix.
suffix = text[patch.start2 + patch.length1 :
patch.start2 + patch.length1 + padding]
if suffix:
patch.diffs.append((self.DIFF_EQUAL, suffix))
# Roll back the start points.
patch.start1 -= len(prefix)
patch.start2 -= len(prefix)
# Extend lengths.
patch.length1 += len(prefix) + len(suffix)
patch.length2 += len(prefix) + len(suffix)
def patch_make(self, a, b=None, c=None):
"""Compute a list of patches to turn text1 into text2.
Use diffs if provided, otherwise compute it ourselves.
There are four ways to call this function, depending on what data is
available to the caller:
Method 1:
a = text1, b = text2
Method 2:
a = diffs
Method 3 (optimal):
a = text1, b = diffs
Method 4 (deprecated, use method 3):
a = text1, b = text2, c = diffs
Args:
a: text1 (methods 1,3,4) or Array of diff tuples for text1 to
text2 (method 2).
b: text2 (methods 1,4) or Array of diff tuples for text1 to
text2 (method 3) or undefined (method 2).
c: Array of diff tuples for text1 to text2 (method 4) or
undefined (methods 1,2,3).
Returns:
Array of Patch objects.
"""
text1 = None
diffs = None
# Note that texts may arrive as 'str' or 'unicode'.
if isinstance(a, basestring) and isinstance(b, basestring) and c is None:
# Method 1: text1, text2
# Compute diffs from text1 and text2.
text1 = a
diffs = self.diff_main(text1, b, True)
if len(diffs) > 2:
self.diff_cleanupSemantic(diffs)
self.diff_cleanupEfficiency(diffs)
elif isinstance(a, list) and b is None and c is None:
# Method 2: diffs
# Compute text1 from diffs.
diffs = a
text1 = self.diff_text1(diffs)
elif isinstance(a, basestring) and isinstance(b, list) and c is None:
# Method 3: text1, diffs
text1 = a
diffs = b
elif (isinstance(a, basestring) and isinstance(b, basestring) and
isinstance(c, list)):
# Method 4: text1, text2, diffs
# text2 is not used.
text1 = a
diffs = c
else:
raise ValueError("Unknown call format to patch_make.")
if not diffs:
return [] # Get rid of the None case.
patches = []
patch = patch_obj()
char_count1 = 0 # Number of characters into the text1 string.
char_count2 = 0 # Number of characters into the text2 string.
prepatch_text = text1 # Recreate the patches to determine context info.
postpatch_text = text1
for x in xrange(len(diffs)):
(diff_type, diff_text) = diffs[x]
if len(patch.diffs) == 0 and diff_type != self.DIFF_EQUAL:
# A new patch starts here.
patch.start1 = char_count1
patch.start2 = char_count2
if diff_type == self.DIFF_INSERT:
# Insertion
patch.diffs.append(diffs[x])
patch.length2 += len(diff_text)
postpatch_text = (postpatch_text[:char_count2] + diff_text +
postpatch_text[char_count2:])
elif diff_type == self.DIFF_DELETE:
# Deletion.
patch.length1 += len(diff_text)
patch.diffs.append(diffs[x])
postpatch_text = (postpatch_text[:char_count2] +
postpatch_text[char_count2 + len(diff_text):])
elif (diff_type == self.DIFF_EQUAL and
len(diff_text) <= 2 * self.Patch_Margin and
len(patch.diffs) != 0 and len(diffs) != x + 1):
# Small equality inside a patch.
patch.diffs.append(diffs[x])
patch.length1 += len(diff_text)
patch.length2 += len(diff_text)
if (diff_type == self.DIFF_EQUAL and
len(diff_text) >= 2 * self.Patch_Margin):
# Time for a new patch.
if len(patch.diffs) != 0:
self.patch_addContext(patch, prepatch_text)
patches.append(patch)
patch = patch_obj()
# Unlike Unidiff, our patch lists have a rolling context.
# http://code.google.com/p/google-diff-match-patch/wiki/Unidiff
# Update prepatch text & pos to reflect the application of the
# just completed patch.
prepatch_text = postpatch_text
char_count1 = char_count2
# Update the current character count.
if diff_type != self.DIFF_INSERT:
char_count1 += len(diff_text)
if diff_type != self.DIFF_DELETE:
char_count2 += len(diff_text)
# Pick up the leftover patch if not empty.
if len(patch.diffs) != 0:
self.patch_addContext(patch, prepatch_text)
patches.append(patch)
return patches
def patch_deepCopy(self, patches):
"""Given an array of patches, return another array that is identical.
Args:
patches: Array of Patch objects.
Returns:
Array of Patch objects.
"""
patchesCopy = []
for patch in patches:
patchCopy = patch_obj()
# No need to deep copy the tuples since they are immutable.
patchCopy.diffs = patch.diffs[:]
patchCopy.start1 = patch.start1
patchCopy.start2 = patch.start2
patchCopy.length1 = patch.length1
patchCopy.length2 = patch.length2
patchesCopy.append(patchCopy)
return patchesCopy
def patch_apply(self, patches, text):
"""Merge a set of patches onto the text. Return a patched text, as well
as a list of true/false values indicating which patches were applied.
Args:
patches: Array of Patch objects.
text: Old text.
Returns:
Two element Array, containing the new text and an array of boolean values.
"""
if not patches:
return (text, [])
# Deep copy the patches so that no changes are made to originals.
patches = self.patch_deepCopy(patches)
nullPadding = self.patch_addPadding(patches)
text = nullPadding + text + nullPadding
self.patch_splitMax(patches)
# delta keeps track of the offset between the expected and actual location
# of the previous patch. If there are patches expected at positions 10 and
# 20, but the first patch was found at 12, delta is 2 and the second patch
# has an effective expected position of 22.
delta = 0
results = []
for patch in patches:
expected_loc = patch.start2 + delta
text1 = self.diff_text1(patch.diffs)
end_loc = -1
if len(text1) > self.Match_MaxBits:
# patch_splitMax will only provide an oversized pattern in the case of
# a monster delete.
start_loc = self.match_main(text, text1[:self.Match_MaxBits],
expected_loc)
if start_loc != -1:
end_loc = self.match_main(text, text1[-self.Match_MaxBits:],
expected_loc + len(text1) - self.Match_MaxBits)
if end_loc == -1 or start_loc >= end_loc:
# Can't find valid trailing context. Drop this patch.
start_loc = -1
else:
start_loc = self.match_main(text, text1, expected_loc)
if start_loc == -1:
# No match found. :(
results.append(False)
# Subtract the delta for this failed patch from subsequent patches.
delta -= patch.length2 - patch.length1
else:
# Found a match. :)
results.append(True)
delta = start_loc - expected_loc
if end_loc == -1:
text2 = text[start_loc : start_loc + len(text1)]
else:
text2 = text[start_loc : end_loc + self.Match_MaxBits]
if text1 == text2:
# Perfect match, just shove the replacement text in.
text = (text[:start_loc] + self.diff_text2(patch.diffs) +
text[start_loc + len(text1):])
else:
# Imperfect match.
# Run a diff to get a framework of equivalent indices.
diffs = self.diff_main(text1, text2, False)
if (len(text1) > self.Match_MaxBits and
self.diff_levenshtein(diffs) / float(len(text1)) >
self.Patch_DeleteThreshold):
# The end points match, but the content is unacceptably bad.
results[-1] = False
else:
self.diff_cleanupSemanticLossless(diffs)
index1 = 0
for (op, data) in patch.diffs:
if op != self.DIFF_EQUAL:
index2 = self.diff_xIndex(diffs, index1)
if op == self.DIFF_INSERT: # Insertion
text = text[:start_loc + index2] + data + text[start_loc +
index2:]
elif op == self.DIFF_DELETE: # Deletion
text = text[:start_loc + index2] + text[start_loc +
self.diff_xIndex(diffs, index1 + len(data)):]
if op != self.DIFF_DELETE:
index1 += len(data)
# Strip the padding off.
text = text[len(nullPadding):-len(nullPadding)]
return (text, results)
def patch_addPadding(self, patches):
"""Add some padding on text start and end so that edges can match
something. Intended to be called only from within patch_apply.
Args:
patches: Array of Patch objects.
Returns:
The padding string added to each side.
"""
paddingLength = self.Patch_Margin
nullPadding = ""
for x in xrange(1, paddingLength + 1):
nullPadding += chr(x)
# Bump all the patches forward.
for patch in patches:
patch.start1 += paddingLength
patch.start2 += paddingLength
# Add some padding on start of first diff.
patch = patches[0]
diffs = patch.diffs
if not diffs or diffs[0][0] != self.DIFF_EQUAL:
# Add nullPadding equality.
diffs.insert(0, (self.DIFF_EQUAL, nullPadding))
patch.start1 -= paddingLength # Should be 0.
patch.start2 -= paddingLength # Should be 0.
patch.length1 += paddingLength
patch.length2 += paddingLength
elif paddingLength > len(diffs[0][1]):
# Grow first equality.
extraLength = paddingLength - len(diffs[0][1])
newText = nullPadding[len(diffs[0][1]):] + diffs[0][1]
diffs[0] = (diffs[0][0], newText)
patch.start1 -= extraLength
patch.start2 -= extraLength
patch.length1 += extraLength
patch.length2 += extraLength
# Add some padding on end of last diff.
patch = patches[-1]
diffs = patch.diffs
if not diffs or diffs[-1][0] != self.DIFF_EQUAL:
# Add nullPadding equality.
diffs.append((self.DIFF_EQUAL, nullPadding))
patch.length1 += paddingLength
patch.length2 += paddingLength
elif paddingLength > len(diffs[-1][1]):
# Grow last equality.
extraLength = paddingLength - len(diffs[-1][1])
newText = diffs[-1][1] + nullPadding[:extraLength]
diffs[-1] = (diffs[-1][0], newText)
patch.length1 += extraLength
patch.length2 += extraLength
return nullPadding
def patch_splitMax(self, patches):
"""Look through the patches and break up any which are longer than the
maximum limit of the match algorithm.
Intended to be called only from within patch_apply.
Args:
patches: Array of Patch objects.
"""
patch_size = self.Match_MaxBits
if patch_size == 0:
# Python has the option of not splitting strings due to its ability
# to handle integers of arbitrary precision.
return
for x in xrange(len(patches)):
if patches[x].length1 <= patch_size:
continue
bigpatch = patches[x]
# Remove the big old patch.
del patches[x]
x -= 1
start1 = bigpatch.start1
start2 = bigpatch.start2
precontext = ''
while len(bigpatch.diffs) != 0:
# Create one of several smaller patches.
patch = patch_obj()
empty = True
patch.start1 = start1 - len(precontext)
patch.start2 = start2 - len(precontext)
if precontext:
patch.length1 = patch.length2 = len(precontext)
patch.diffs.append((self.DIFF_EQUAL, precontext))
while (len(bigpatch.diffs) != 0 and
patch.length1 < patch_size - self.Patch_Margin):
(diff_type, diff_text) = bigpatch.diffs[0]
if diff_type == self.DIFF_INSERT:
# Insertions are harmless.
patch.length2 += len(diff_text)
start2 += len(diff_text)
patch.diffs.append(bigpatch.diffs.pop(0))
empty = False
elif (diff_type == self.DIFF_DELETE and len(patch.diffs) == 1 and
patch.diffs[0][0] == self.DIFF_EQUAL and
len(diff_text) > 2 * patch_size):
# This is a large deletion. Let it pass in one chunk.
patch.length1 += len(diff_text)
start1 += len(diff_text)
empty = False
patch.diffs.append((diff_type, diff_text))
del bigpatch.diffs[0]
else:
# Deletion or equality. Only take as much as we can stomach.
diff_text = diff_text[:patch_size - patch.length1 -
self.Patch_Margin]
patch.length1 += len(diff_text)
start1 += len(diff_text)
if diff_type == self.DIFF_EQUAL:
patch.length2 += len(diff_text)
start2 += len(diff_text)
else:
empty = False
patch.diffs.append((diff_type, diff_text))
if diff_text == bigpatch.diffs[0][1]:
del bigpatch.diffs[0]
else:
bigpatch.diffs[0] = (bigpatch.diffs[0][0],
bigpatch.diffs[0][1][len(diff_text):])
# Compute the head context for the next patch.
precontext = self.diff_text2(patch.diffs)
precontext = precontext[-self.Patch_Margin:]
# Append the end context for this patch.
postcontext = self.diff_text1(bigpatch.diffs)[:self.Patch_Margin]
if postcontext:
patch.length1 += len(postcontext)
patch.length2 += len(postcontext)
if len(patch.diffs) != 0 and patch.diffs[-1][0] == self.DIFF_EQUAL:
patch.diffs[-1] = (self.DIFF_EQUAL, patch.diffs[-1][1] +
postcontext)
else:
patch.diffs.append((self.DIFF_EQUAL, postcontext))
if not empty:
x += 1
patches.insert(x, patch)
def patch_toText(self, patches):
"""Take a list of patches and return a textual representation.
Args:
patches: Array of Patch objects.
Returns:
Text representation of patches.
"""
text = []
for patch in patches:
text.append(str(patch))
return "".join(text)
def patch_fromText(self, textline):
"""Parse a textual representation of patches and return a list of patch
objects.
Args:
textline: Text representation of patches.
Returns:
Array of Patch objects.
Raises:
ValueError: If invalid input.
"""
if type(textline) == unicode:
# Patches should be composed of a subset of ascii chars, Unicode not
# required. If this encode raises UnicodeEncodeError, patch is invalid.
textline = textline.encode("ascii")
patches = []
if not textline:
return patches
text = textline.split('\n')
while len(text) != 0:
m = re.match("^@@ -(\d+),?(\d*) \+(\d+),?(\d*) @@$", text[0])
if not m:
raise ValueError("Invalid patch string: " + text[0])
patch = patch_obj()
patches.append(patch)
patch.start1 = int(m.group(1))
if m.group(2) == '':
patch.start1 -= 1
patch.length1 = 1
elif m.group(2) == '0':
patch.length1 = 0
else:
patch.start1 -= 1
patch.length1 = int(m.group(2))
patch.start2 = int(m.group(3))
if m.group(4) == '':
patch.start2 -= 1
patch.length2 = 1
elif m.group(4) == '0':
patch.length2 = 0
else:
patch.start2 -= 1
patch.length2 = int(m.group(4))
del text[0]
while len(text) != 0:
if text[0]:
sign = text[0][0]
else:
sign = ''
line = urllib.unquote(text[0][1:])
line = line.decode("utf-8")
if sign == '+':
# Insertion.
patch.diffs.append((self.DIFF_INSERT, line))
elif sign == '-':
# Deletion.
patch.diffs.append((self.DIFF_DELETE, line))
elif sign == ' ':
# Minor equality.
patch.diffs.append((self.DIFF_EQUAL, line))
elif sign == '@':
# Start of next patch.
break
elif sign == '':
# Blank line? Whatever.
pass
else:
# WTF?
raise ValueError("Invalid patch mode: '%s'\n%s" % (sign, line))
del text[0]
return patches
class patch_obj:
"""Class representing one patch operation.
"""
def __init__(self):
"""Initializes with an empty list of diffs.
"""
self.diffs = []
self.start1 = None
self.start2 = None
self.length1 = 0
self.length2 = 0
def __str__(self):
"""Emmulate GNU diff's format.
Header: @@ -382,8 +481,9 @@
Indicies are printed as 1-based, not 0-based.
Returns:
The GNU diff string.
"""
if self.length1 == 0:
coords1 = str(self.start1) + ",0"
elif self.length1 == 1:
coords1 = str(self.start1 + 1)
else:
coords1 = str(self.start1 + 1) + "," + str(self.length1)
if self.length2 == 0:
coords2 = str(self.start2) + ",0"
elif self.length2 == 1:
coords2 = str(self.start2 + 1)
else:
coords2 = str(self.start2 + 1) + "," + str(self.length2)
text = ["@@ -", coords1, " +", coords2, " @@\n"]
# Escape the body of the patch with %xx notation.
for (op, data) in self.diffs:
if op == diff_match_patch.DIFF_INSERT:
text.append("+")
elif op == diff_match_patch.DIFF_DELETE:
text.append("-")
elif op == diff_match_patch.DIFF_EQUAL:
text.append(" ")
# High ascii will raise UnicodeDecodeError. Use Unicode instead.
data = data.encode("utf-8")
text.append(urllib.quote(data, "!~*'();/?:@&=+$,# ") + "\n")
return "".join(text)
|
apache-2.0
|
d2bit/compose
|
compose/cli/log_printer.py
|
21
|
2616
|
from __future__ import unicode_literals
from __future__ import absolute_import
import sys
from itertools import cycle
from .multiplexer import Multiplexer
from . import colors
from .utils import split_buffer
class LogPrinter(object):
def __init__(self, containers, attach_params=None, output=sys.stdout, monochrome=False):
self.containers = containers
self.attach_params = attach_params or {}
self.prefix_width = self._calculate_prefix_width(containers)
self.generators = self._make_log_generators(monochrome)
self.output = output
def run(self):
mux = Multiplexer(self.generators)
for line in mux.loop():
self.output.write(line)
def _calculate_prefix_width(self, containers):
"""
Calculate the maximum width of container names so we can make the log
prefixes line up like so:
db_1 | Listening
web_1 | Listening
"""
prefix_width = 0
for container in containers:
prefix_width = max(prefix_width, len(container.name_without_project))
return prefix_width
def _make_log_generators(self, monochrome):
color_fns = cycle(colors.rainbow())
generators = []
def no_color(text):
return text
for container in self.containers:
if monochrome:
color_fn = no_color
else:
color_fn = next(color_fns)
generators.append(self._make_log_generator(container, color_fn))
return generators
def _make_log_generator(self, container, color_fn):
prefix = color_fn(self._generate_prefix(container)).encode('utf-8')
# Attach to container before log printer starts running
line_generator = split_buffer(self._attach(container), '\n')
for line in line_generator:
yield prefix + line
exit_code = container.wait()
yield color_fn("%s exited with code %s\n" % (container.name, exit_code))
def _generate_prefix(self, container):
"""
Generate the prefix for a log line without colour
"""
name = container.name_without_project
padding = ' ' * (self.prefix_width - len(name))
return ''.join([name, padding, ' | '])
def _attach(self, container):
params = {
'stdout': True,
'stderr': True,
'stream': True,
}
params.update(self.attach_params)
params = dict((name, 1 if value else 0) for (name, value) in list(params.items()))
return container.attach(**params)
|
apache-2.0
|
jaggu303619/asylum
|
openerp/addons/account/wizard/account_tax_chart.py
|
51
|
3332
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_tax_chart(osv.osv_memory):
"""
For Chart of taxes
"""
_name = "account.tax.chart"
_description = "Account tax chart"
_columns = {
'period_id': fields.many2one('account.period', \
'Period', \
),
'target_move': fields.selection([('posted', 'All Posted Entries'),
('all', 'All Entries'),
], 'Target Moves', required=True),
}
def _get_period(self, cr, uid, context=None):
"""Return default period value"""
ctx = dict(context or {}, account_period_prefer_normal=True)
period_ids = self.pool.get('account.period').find(cr, uid, context=ctx)
return period_ids and period_ids[0] or False
def account_tax_chart_open_window(self, cr, uid, ids, context=None):
"""
Opens chart of Accounts
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of account chart’s IDs
@return: dictionary of Open account chart window on given fiscalyear and all Entries or posted entries
"""
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
if context is None:
context = {}
data = self.browse(cr, uid, ids, context=context)[0]
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_tax_code_tree')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
if data.period_id:
result['context'] = str({'period_id': data.period_id.id, \
'fiscalyear_id': data.period_id.fiscalyear_id.id, \
'state': data.target_move})
period_code = data.period_id.code
result['name'] += period_code and (':' + period_code) or ''
else:
result['context'] = str({'state': data.target_move})
return result
_defaults = {
'period_id': _get_period,
'target_move': 'posted'
}
account_tax_chart()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
mikelarre/odoomrp-wip-1
|
warning_log/models/warning.py
|
2
|
6419
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from openerp.osv import orm, fields
from datetime import datetime
class WarningLog(orm.Model):
_name = "warning.log"
_rec_name = 'user'
_columns = {
"date": fields.datetime("Date"),
"user": fields.many2one("res.users", "User"),
"msg": fields.text("Message"),
"type": fields.many2one("ir.model", "Model")}
def name_search(self, cr, uid, name='', args=None, operator='ilike',
context=None, limit=100):
if not args:
args = []
ids = self.search(cr, uid, [('user', operator, name)] + args,
limit=limit, context=context)
ids += self.search(cr, uid, [('date', operator, name)] + args,
limit=limit, context=context)
return self.name_get(cr, uid, ids, context)
def name_get(self, cr, uid, ids, context=None):
if not len(ids):
return []
res = [(r.user.login, r.date)
for r in self.browse(cr, uid, ids, context=context)]
return res
def create_warning_log(self, cr, uid, ids, model, warning,
context=None):
if warning:
model_id = self.pool['ir.model'].search(
cr, uid, [('model', '=', model)], context=context)
self.create(
cr, uid, {
'date': datetime.now(), 'user': uid,
'msg': warning.get('message'),
'type': model_id[0]}, context=context)
class SaleOrder(orm.Model):
_inherit = "sale.order"
def onchange_partner_id(self, cr, uid, ids, part, context=None):
vals = super(SaleOrder, self).onchange_partner_id(cr, uid, ids, part,
context=context)
self.pool['warning.log'].create_warning_log(
cr, uid, ids, self._name, vals.get('warning'), context=context)
return vals
class PurchaseOrder(orm.Model):
_inherit = 'purchase.order'
def onchange_partner_id(self, cr, uid, ids, part, context=None):
vals = super(PurchaseOrder, self).onchange_partner_id(
cr, uid, ids, part, context=context)
self.pool['warning.log'].create_warning_log(
cr, uid, ids, self._name, vals.get('warning'), context=context)
return vals
class AccountInvoice(orm.Model):
_inherit = 'account.invoice'
def onchange_partner_id(self, cr, uid, ids, type, partner_id,
date_invoice=False, payment_term=False,
partner_bank_id=False, company_id=False,
context=None):
vals = super(AccountInvoice, self).onchange_partner_id(
cr, uid, ids, type, partner_id, date_invoice=date_invoice,
payment_term=payment_term, partner_bank_id=partner_bank_id,
company_id=company_id, context=context)
self.pool['warning.log'].create_warning_log(
cr, uid, ids, self._name, vals.get('warning'), context=context)
return vals
class StockPicking(orm.Model):
_inherit = 'stock.picking'
def onchange_partner_in(self, cr, uid, ids, partner_id=None,
context=None):
vals = super(StockPicking, self).onchange_partner_in(
cr, uid, ids, partner_id=partner_id, context=context)
self.pool['warning.log'].create_warning_log(
cr, uid, ids, self._name, vals.get('warning'), context=context)
return vals
class SaleOrderLine(orm.Model):
_inherit = 'sale.order.line'
def product_id_change_with_wh(self, cr, uid, ids, pricelist, product,
qty=0, uom=False, qty_uos=0, uos=False,
name='', partner_id=False, lang=False,
update_tax=True, date_order=False,
packaging=False, fiscal_position=False,
flag=False, warehouse_id=False,
context=None):
vals = super(SaleOrderLine, self).product_id_change_with_wh(
cr, uid, ids, pricelist, product, qty=qty, uom=uom,
qty_uos=qty_uos, uos=uos, name=name, partner_id=partner_id,
lang=lang, update_tax=update_tax, date_order=date_order,
packaging=packaging, fiscal_position=fiscal_position, flag=flag,
warehouse_id=warehouse_id, context=context)
self.pool['warning.log'].create_warning_log(
cr, uid, ids, self._name, vals.get('warning'), context=context)
return vals
class PurchaseOrderLine(orm.Model):
_inherit = 'purchase.order.line'
def onchange_product_id(self, cr, uid, ids, pricelist, product, qty, uom,
partner_id, date_order=False,
fiscal_position_id=False, date_planned=False,
name=False, price_unit=False, state='draft',
notes=False, context=None):
vals = super(PurchaseOrderLine, self).onchange_product_id(
cr, uid, ids, pricelist, product, qty, uom, partner_id,
date_order=date_order, fiscal_position_id=fiscal_position_id,
date_planned=date_planned, name=name, price_unit=price_unit,
state='draft', notes=notes, context=context)
self.pool['warning.log'].create_warning_log(
cr, uid, ids, self._name, vals.get('warning'), context=context)
return vals
|
agpl-3.0
|
ejpbruel/servo
|
tests/wpt/web-platform-tests/tools/pytest/testing/test_junitxml.py
|
166
|
27154
|
# -*- coding: utf-8 -*-
from xml.dom import minidom
from _pytest.main import EXIT_NOTESTSCOLLECTED
import py
import sys
import os
from _pytest.junitxml import LogXML
import pytest
def runandparse(testdir, *args):
resultpath = testdir.tmpdir.join("junit.xml")
result = testdir.runpytest("--junitxml=%s" % resultpath, *args)
xmldoc = minidom.parse(str(resultpath))
return result, DomNode(xmldoc)
def assert_attr(node, **kwargs):
__tracebackhide__ = True
def nodeval(node, name):
anode = node.getAttributeNode(name)
if anode is not None:
return anode.value
expected = dict((name, str(value)) for name, value in kwargs.items())
on_node = dict((name, nodeval(node, name)) for name in expected)
assert on_node == expected
class DomNode(object):
def __init__(self, dom):
self.__node = dom
def __repr__(self):
return self.__node.toxml()
def find_first_by_tag(self, tag):
return self.find_nth_by_tag(tag, 0)
def _by_tag(self, tag):
return self.__node.getElementsByTagName(tag)
def find_nth_by_tag(self, tag, n):
items = self._by_tag(tag)
try:
nth = items[n]
except IndexError:
pass
else:
return type(self)(nth)
def find_by_tag(self, tag):
t = type(self)
return [t(x) for x in self.__node.getElementsByTagName(tag)]
def __getitem__(self, key):
node = self.__node.getAttributeNode(key)
if node is not None:
return node.value
def assert_attr(self, **kwargs):
__tracebackhide__ = True
return assert_attr(self.__node, **kwargs)
def toxml(self):
return self.__node.toxml()
@property
def text(self):
return self.__node.childNodes[0].wholeText
@property
def tag(self):
return self.__node.tagName
@property
def next_siebling(self):
return type(self)(self.__node.nextSibling)
class TestPython:
def test_summing_simple(self, testdir):
testdir.makepyfile("""
import pytest
def test_pass():
pass
def test_fail():
assert 0
def test_skip():
pytest.skip("")
@pytest.mark.xfail
def test_xfail():
assert 0
@pytest.mark.xfail
def test_xpass():
assert 1
""")
result, dom = runandparse(testdir)
assert result.ret
node = dom.find_first_by_tag("testsuite")
node.assert_attr(name="pytest", errors=0, failures=1, skips=3, tests=2)
def test_timing_function(self, testdir):
testdir.makepyfile("""
import time, pytest
def setup_module():
time.sleep(0.01)
def teardown_module():
time.sleep(0.01)
def test_sleep():
time.sleep(0.01)
""")
result, dom = runandparse(testdir)
node = dom.find_first_by_tag("testsuite")
tnode = node.find_first_by_tag("testcase")
val = tnode["time"]
assert round(float(val), 2) >= 0.03
def test_setup_error(self, testdir):
testdir.makepyfile("""
def pytest_funcarg__arg(request):
raise ValueError()
def test_function(arg):
pass
""")
result, dom = runandparse(testdir)
assert result.ret
node = dom.find_first_by_tag("testsuite")
node.assert_attr(errors=1, tests=0)
tnode = node.find_first_by_tag("testcase")
tnode.assert_attr(
file="test_setup_error.py",
line="2",
classname="test_setup_error",
name="test_function")
fnode = tnode.find_first_by_tag("error")
fnode.assert_attr(message="test setup failure")
assert "ValueError" in fnode.toxml()
def test_skip_contains_name_reason(self, testdir):
testdir.makepyfile("""
import pytest
def test_skip():
pytest.skip("hello23")
""")
result, dom = runandparse(testdir)
assert result.ret == 0
node = dom.find_first_by_tag("testsuite")
node.assert_attr(skips=1)
tnode = node.find_first_by_tag("testcase")
tnode.assert_attr(
file="test_skip_contains_name_reason.py",
line="1",
classname="test_skip_contains_name_reason",
name="test_skip")
snode = tnode.find_first_by_tag("skipped")
snode.assert_attr(type="pytest.skip", message="hello23", )
def test_classname_instance(self, testdir):
testdir.makepyfile("""
class TestClass:
def test_method(self):
assert 0
""")
result, dom = runandparse(testdir)
assert result.ret
node = dom.find_first_by_tag("testsuite")
node.assert_attr(failures=1)
tnode = node.find_first_by_tag("testcase")
tnode.assert_attr(
file="test_classname_instance.py",
line="1",
classname="test_classname_instance.TestClass",
name="test_method")
def test_classname_nested_dir(self, testdir):
p = testdir.tmpdir.ensure("sub", "test_hello.py")
p.write("def test_func(): 0/0")
result, dom = runandparse(testdir)
assert result.ret
node = dom.find_first_by_tag("testsuite")
node.assert_attr(failures=1)
tnode = node.find_first_by_tag("testcase")
tnode.assert_attr(
file=os.path.join("sub", "test_hello.py"),
line="0",
classname="sub.test_hello",
name="test_func")
def test_internal_error(self, testdir):
testdir.makeconftest("def pytest_runtest_protocol(): 0 / 0")
testdir.makepyfile("def test_function(): pass")
result, dom = runandparse(testdir)
assert result.ret
node = dom.find_first_by_tag("testsuite")
node.assert_attr(errors=1, tests=0)
tnode = node.find_first_by_tag("testcase")
tnode.assert_attr(classname="pytest", name="internal")
fnode = tnode.find_first_by_tag("error")
fnode.assert_attr(message="internal error")
assert "Division" in fnode.toxml()
def test_failure_function(self, testdir):
testdir.makepyfile("""
import sys
def test_fail():
print ("hello-stdout")
sys.stderr.write("hello-stderr\\n")
raise ValueError(42)
""")
result, dom = runandparse(testdir)
assert result.ret
node = dom.find_first_by_tag("testsuite")
node.assert_attr(failures=1, tests=1)
tnode = node.find_first_by_tag("testcase")
tnode.assert_attr(
file="test_failure_function.py",
line="1",
classname="test_failure_function",
name="test_fail")
fnode = tnode.find_first_by_tag("failure")
fnode.assert_attr(message="ValueError: 42")
assert "ValueError" in fnode.toxml()
systemout = fnode.next_siebling
assert systemout.tag == "system-out"
assert "hello-stdout" in systemout.toxml()
systemerr = systemout.next_siebling
assert systemerr.tag == "system-err"
assert "hello-stderr" in systemerr.toxml()
def test_failure_verbose_message(self, testdir):
testdir.makepyfile("""
import sys
def test_fail():
assert 0, "An error"
""")
result, dom = runandparse(testdir)
node = dom.find_first_by_tag("testsuite")
tnode = node.find_first_by_tag("testcase")
fnode = tnode.find_first_by_tag("failure")
fnode.assert_attr(message="AssertionError: An error assert 0")
def test_failure_escape(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.parametrize('arg1', "<&'", ids="<&'")
def test_func(arg1):
print(arg1)
assert 0
""")
result, dom = runandparse(testdir)
assert result.ret
node = dom.find_first_by_tag("testsuite")
node.assert_attr(failures=3, tests=3)
for index, char in enumerate("<&'"):
tnode = node.find_nth_by_tag("testcase", index)
tnode.assert_attr(
file="test_failure_escape.py",
line="1",
classname="test_failure_escape",
name="test_func[%s]" % char)
sysout = tnode.find_first_by_tag('system-out')
text = sysout.text
assert text == '%s\n' % char
def test_junit_prefixing(self, testdir):
testdir.makepyfile("""
def test_func():
assert 0
class TestHello:
def test_hello(self):
pass
""")
result, dom = runandparse(testdir, "--junitprefix=xyz")
assert result.ret
node = dom.find_first_by_tag("testsuite")
node.assert_attr(failures=1, tests=2)
tnode = node.find_first_by_tag("testcase")
tnode.assert_attr(
file="test_junit_prefixing.py",
line="0",
classname="xyz.test_junit_prefixing",
name="test_func")
tnode = node.find_nth_by_tag("testcase", 1)
tnode.assert_attr(
file="test_junit_prefixing.py",
line="3",
classname="xyz.test_junit_prefixing."
"TestHello",
name="test_hello")
def test_xfailure_function(self, testdir):
testdir.makepyfile("""
import pytest
def test_xfail():
pytest.xfail("42")
""")
result, dom = runandparse(testdir)
assert not result.ret
node = dom.find_first_by_tag("testsuite")
node.assert_attr(skips=1, tests=0)
tnode = node.find_first_by_tag("testcase")
tnode.assert_attr(
file="test_xfailure_function.py",
line="1",
classname="test_xfailure_function",
name="test_xfail")
fnode = tnode.find_first_by_tag("skipped")
fnode.assert_attr(message="expected test failure")
# assert "ValueError" in fnode.toxml()
def test_xfailure_xpass(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.xfail
def test_xpass():
pass
""")
result, dom = runandparse(testdir)
# assert result.ret
node = dom.find_first_by_tag("testsuite")
node.assert_attr(skips=1, tests=0)
tnode = node.find_first_by_tag("testcase")
tnode.assert_attr(
file="test_xfailure_xpass.py",
line="1",
classname="test_xfailure_xpass",
name="test_xpass")
fnode = tnode.find_first_by_tag("skipped")
fnode.assert_attr(message="xfail-marked test passes unexpectedly")
# assert "ValueError" in fnode.toxml()
def test_collect_error(self, testdir):
testdir.makepyfile("syntax error")
result, dom = runandparse(testdir)
assert result.ret
node = dom.find_first_by_tag("testsuite")
node.assert_attr(errors=1, tests=0)
tnode = node.find_first_by_tag("testcase")
tnode.assert_attr(
file="test_collect_error.py",
name="test_collect_error")
assert tnode["line"] is None
fnode = tnode.find_first_by_tag("error")
fnode.assert_attr(message="collection failure")
assert "SyntaxError" in fnode.toxml()
def test_collect_skipped(self, testdir):
testdir.makepyfile("import pytest; pytest.skip('xyz')")
result, dom = runandparse(testdir)
assert result.ret == EXIT_NOTESTSCOLLECTED
node = dom.find_first_by_tag("testsuite")
node.assert_attr(skips=1, tests=0)
tnode = node.find_first_by_tag("testcase")
tnode.assert_attr(
file="test_collect_skipped.py",
name="test_collect_skipped")
# py.test doesn't give us a line here.
assert tnode["line"] is None
fnode = tnode.find_first_by_tag("skipped")
fnode.assert_attr(message="collection skipped")
def test_unicode(self, testdir):
value = 'hx\xc4\x85\xc4\x87\n'
testdir.makepyfile("""
# coding: latin1
def test_hello():
print (%r)
assert 0
""" % value)
result, dom = runandparse(testdir)
assert result.ret == 1
tnode = dom.find_first_by_tag("testcase")
fnode = tnode.find_first_by_tag("failure")
if not sys.platform.startswith("java"):
assert "hx" in fnode.toxml()
def test_assertion_binchars(self, testdir):
"""this test did fail when the escaping wasnt strict"""
testdir.makepyfile("""
M1 = '\x01\x02\x03\x04'
M2 = '\x01\x02\x03\x05'
def test_str_compare():
assert M1 == M2
""")
result, dom = runandparse(testdir)
print(dom.toxml())
def test_pass_captures_stdout(self, testdir):
testdir.makepyfile("""
def test_pass():
print('hello-stdout')
""")
result, dom = runandparse(testdir)
node = dom.find_first_by_tag("testsuite")
pnode = node.find_first_by_tag("testcase")
systemout = pnode.find_first_by_tag("system-out")
assert "hello-stdout" in systemout.toxml()
def test_pass_captures_stderr(self, testdir):
testdir.makepyfile("""
import sys
def test_pass():
sys.stderr.write('hello-stderr')
""")
result, dom = runandparse(testdir)
node = dom.find_first_by_tag("testsuite")
pnode = node.find_first_by_tag("testcase")
systemout = pnode.find_first_by_tag("system-err")
assert "hello-stderr" in systemout.toxml()
def test_setup_error_captures_stdout(self, testdir):
testdir.makepyfile("""
def pytest_funcarg__arg(request):
print('hello-stdout')
raise ValueError()
def test_function(arg):
pass
""")
result, dom = runandparse(testdir)
node = dom.find_first_by_tag("testsuite")
pnode = node.find_first_by_tag("testcase")
systemout = pnode.find_first_by_tag("system-out")
assert "hello-stdout" in systemout.toxml()
def test_setup_error_captures_stderr(self, testdir):
testdir.makepyfile("""
import sys
def pytest_funcarg__arg(request):
sys.stderr.write('hello-stderr')
raise ValueError()
def test_function(arg):
pass
""")
result, dom = runandparse(testdir)
node = dom.find_first_by_tag("testsuite")
pnode = node.find_first_by_tag("testcase")
systemout = pnode.find_first_by_tag("system-err")
assert "hello-stderr" in systemout.toxml()
def test_mangle_test_address():
from _pytest.junitxml import mangle_test_address
address = '::'.join(
["a/my.py.thing.py", "Class", "()", "method", "[a-1-::]"])
newnames = mangle_test_address(address)
assert newnames == ["a.my.py.thing", "Class", "method", "[a-1-::]"]
def test_dont_configure_on_slaves(tmpdir):
gotten = []
class FakeConfig:
def __init__(self):
self.pluginmanager = self
self.option = self
junitprefix = None
# XXX: shouldnt need tmpdir ?
xmlpath = str(tmpdir.join('junix.xml'))
register = gotten.append
fake_config = FakeConfig()
from _pytest import junitxml
junitxml.pytest_configure(fake_config)
assert len(gotten) == 1
FakeConfig.slaveinput = None
junitxml.pytest_configure(fake_config)
assert len(gotten) == 1
class TestNonPython:
def test_summing_simple(self, testdir):
testdir.makeconftest("""
import pytest
def pytest_collect_file(path, parent):
if path.ext == ".xyz":
return MyItem(path, parent)
class MyItem(pytest.Item):
def __init__(self, path, parent):
super(MyItem, self).__init__(path.basename, parent)
self.fspath = path
def runtest(self):
raise ValueError(42)
def repr_failure(self, excinfo):
return "custom item runtest failed"
""")
testdir.tmpdir.join("myfile.xyz").write("hello")
result, dom = runandparse(testdir)
assert result.ret
node = dom.find_first_by_tag("testsuite")
node.assert_attr(errors=0, failures=1, skips=0, tests=1)
tnode = node.find_first_by_tag("testcase")
tnode.assert_attr(name="myfile.xyz")
fnode = tnode.find_first_by_tag("failure")
fnode.assert_attr(message="custom item runtest failed")
assert "custom item runtest failed" in fnode.toxml()
def test_nullbyte(testdir):
# A null byte can not occur in XML (see section 2.2 of the spec)
testdir.makepyfile("""
import sys
def test_print_nullbyte():
sys.stdout.write('Here the null -->' + chr(0) + '<--')
sys.stdout.write('In repr form -->' + repr(chr(0)) + '<--')
assert False
""")
xmlf = testdir.tmpdir.join('junit.xml')
testdir.runpytest('--junitxml=%s' % xmlf)
text = xmlf.read()
assert '\x00' not in text
assert '#x00' in text
def test_nullbyte_replace(testdir):
# Check if the null byte gets replaced
testdir.makepyfile("""
import sys
def test_print_nullbyte():
sys.stdout.write('Here the null -->' + chr(0) + '<--')
sys.stdout.write('In repr form -->' + repr(chr(0)) + '<--')
assert False
""")
xmlf = testdir.tmpdir.join('junit.xml')
testdir.runpytest('--junitxml=%s' % xmlf)
text = xmlf.read()
assert '#x0' in text
def test_invalid_xml_escape():
# Test some more invalid xml chars, the full range should be
# tested really but let's just thest the edges of the ranges
# intead.
# XXX This only tests low unicode character points for now as
# there are some issues with the testing infrastructure for
# the higher ones.
# XXX Testing 0xD (\r) is tricky as it overwrites the just written
# line in the output, so we skip it too.
global unichr
try:
unichr(65)
except NameError:
unichr = chr
invalid = (0x00, 0x1, 0xB, 0xC, 0xE, 0x19, 27, # issue #126
0xD800, 0xDFFF, 0xFFFE, 0x0FFFF) # , 0x110000)
valid = (0x9, 0xA, 0x20, )
# 0xD, 0xD7FF, 0xE000, 0xFFFD, 0x10000, 0x10FFFF)
from _pytest.junitxml import bin_xml_escape
for i in invalid:
got = bin_xml_escape(unichr(i)).uniobj
if i <= 0xFF:
expected = '#x%02X' % i
else:
expected = '#x%04X' % i
assert got == expected
for i in valid:
assert chr(i) == bin_xml_escape(unichr(i)).uniobj
def test_logxml_path_expansion(tmpdir, monkeypatch):
home_tilde = py.path.local(os.path.expanduser('~')).join('test.xml')
xml_tilde = LogXML('~%stest.xml' % tmpdir.sep, None)
assert xml_tilde.logfile == home_tilde
# this is here for when $HOME is not set correct
monkeypatch.setenv("HOME", tmpdir)
home_var = os.path.normpath(os.path.expandvars('$HOME/test.xml'))
xml_var = LogXML('$HOME%stest.xml' % tmpdir.sep, None)
assert xml_var.logfile == home_var
def test_logxml_changingdir(testdir):
testdir.makepyfile("""
def test_func():
import os
os.chdir("a")
""")
testdir.tmpdir.mkdir("a")
result = testdir.runpytest("--junitxml=a/x.xml")
assert result.ret == 0
assert testdir.tmpdir.join("a/x.xml").check()
def test_logxml_makedir(testdir):
"""--junitxml should automatically create directories for the xml file"""
testdir.makepyfile("""
def test_pass():
pass
""")
result = testdir.runpytest("--junitxml=path/to/results.xml")
assert result.ret == 0
assert testdir.tmpdir.join("path/to/results.xml").check()
def test_escaped_parametrized_names_xml(testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.parametrize('char', ["\\x00"])
def test_func(char):
assert char
""")
result, dom = runandparse(testdir)
assert result.ret == 0
node = dom.find_first_by_tag("testcase")
node.assert_attr(name="test_func[#x00]")
def test_double_colon_split_function_issue469(testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.parametrize('param', ["double::colon"])
def test_func(param):
pass
""")
result, dom = runandparse(testdir)
assert result.ret == 0
node = dom.find_first_by_tag("testcase")
node.assert_attr(classname="test_double_colon_split_function_issue469")
node.assert_attr(name='test_func[double::colon]')
def test_double_colon_split_method_issue469(testdir):
testdir.makepyfile("""
import pytest
class TestClass:
@pytest.mark.parametrize('param', ["double::colon"])
def test_func(self, param):
pass
""")
result, dom = runandparse(testdir)
assert result.ret == 0
node = dom.find_first_by_tag("testcase")
node.assert_attr(
classname="test_double_colon_split_method_issue469.TestClass")
node.assert_attr(name='test_func[double::colon]')
def test_unicode_issue368(testdir):
path = testdir.tmpdir.join("test.xml")
log = LogXML(str(path), None)
ustr = py.builtin._totext("ВНИ!", "utf-8")
from _pytest.runner import BaseReport
class Report(BaseReport):
longrepr = ustr
sections = []
nodeid = "something"
location = 'tests/filename.py', 42, 'TestClass.method'
test_report = Report()
# hopefully this is not too brittle ...
log.pytest_sessionstart()
node_reporter = log._opentestcase(test_report)
node_reporter.append_failure(test_report)
node_reporter.append_collect_error(test_report)
node_reporter.append_collect_skipped(test_report)
node_reporter.append_error(test_report)
test_report.longrepr = "filename", 1, ustr
node_reporter.append_skipped(test_report)
test_report.longrepr = "filename", 1, "Skipped: 卡嘣嘣"
node_reporter.append_skipped(test_report)
test_report.wasxfail = ustr
node_reporter.append_skipped(test_report)
log.pytest_sessionfinish()
def test_record_property(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture
def other(record_xml_property):
record_xml_property("bar", 1)
def test_record(record_xml_property, other):
record_xml_property("foo", "<1");
""")
result, dom = runandparse(testdir, '-rw')
node = dom.find_first_by_tag("testsuite")
tnode = node.find_first_by_tag("testcase")
psnode = tnode.find_first_by_tag('properties')
pnodes = psnode.find_by_tag('property')
pnodes[0].assert_attr(name="bar", value="1")
pnodes[1].assert_attr(name="foo", value="<1")
result.stdout.fnmatch_lines('*C3*test_record_property.py*experimental*')
def test_record_property_same_name(testdir):
testdir.makepyfile("""
def test_record_with_same_name(record_xml_property):
record_xml_property("foo", "bar")
record_xml_property("foo", "baz")
""")
result, dom = runandparse(testdir, '-rw')
node = dom.find_first_by_tag("testsuite")
tnode = node.find_first_by_tag("testcase")
psnode = tnode.find_first_by_tag('properties')
pnodes = psnode.find_by_tag('property')
pnodes[0].assert_attr(name="foo", value="bar")
pnodes[1].assert_attr(name="foo", value="baz")
def test_random_report_log_xdist(testdir):
"""xdist calls pytest_runtest_logreport as they are executed by the slaves,
with nodes from several nodes overlapping, so junitxml must cope with that
to produce correct reports. #1064
"""
pytest.importorskip('xdist')
testdir.makepyfile("""
import pytest, time
@pytest.mark.parametrize('i', list(range(30)))
def test_x(i):
assert i != 22
""")
_, dom = runandparse(testdir, '-n2')
suite_node = dom.find_first_by_tag("testsuite")
failed = []
for case_node in suite_node.find_by_tag("testcase"):
if case_node.find_first_by_tag('failure'):
failed.append(case_node['name'])
assert failed == ['test_x[22]']
def test_runs_twice(testdir):
f = testdir.makepyfile('''
def test_pass():
pass
''')
result, dom = runandparse(testdir, f, f)
assert 'INTERNALERROR' not in result.stdout.str()
first, second = [x['classname'] for x in dom.find_by_tag("testcase")]
assert first == second
@pytest.mark.xfail(reason='hangs', run=False)
def test_runs_twice_xdist(testdir):
pytest.importorskip('xdist')
f = testdir.makepyfile('''
def test_pass():
pass
''')
result, dom = runandparse(
testdir, f,
'--dist', 'each', '--tx', '2*popen',)
assert 'INTERNALERROR' not in result.stdout.str()
first, second = [x['classname'] for x in dom.find_by_tag("testcase")]
assert first == second
def test_fancy_items_regression(testdir):
# issue 1259
testdir.makeconftest("""
import pytest
class FunItem(pytest.Item):
def runtest(self):
pass
class NoFunItem(pytest.Item):
def runtest(self):
pass
class FunCollector(pytest.File):
def collect(self):
return [
FunItem('a', self),
NoFunItem('a', self),
NoFunItem('b', self),
]
def pytest_collect_file(path, parent):
if path.check(ext='.py'):
return FunCollector(path, parent)
""")
testdir.makepyfile('''
def test_pass():
pass
''')
result, dom = runandparse(testdir)
assert 'INTERNALERROR' not in result.stdout.str()
items = sorted(
'%(classname)s %(name)s %(file)s' % x
for x in dom.find_by_tag("testcase"))
import pprint
pprint.pprint(items)
assert items == [
u'conftest a conftest.py',
u'conftest a conftest.py',
u'conftest b conftest.py',
u'test_fancy_items_regression a test_fancy_items_regression.py',
u'test_fancy_items_regression a test_fancy_items_regression.py',
u'test_fancy_items_regression b test_fancy_items_regression.py',
u'test_fancy_items_regression test_pass'
u' test_fancy_items_regression.py',
]
|
mpl-2.0
|
chauhanmohit/phantomjs
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/checkout/baselineoptimizer.py
|
121
|
13839
|
# Copyright (C) 2011, Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import copy
import logging
_log = logging.getLogger(__name__)
# Yes, it's a hypergraph.
# FIXME: Should this function live with the ports somewhere?
# Perhaps this should move onto PortFactory?
def _baseline_search_hypergraph(host, port_names):
hypergraph = {}
# These edges in the hypergraph aren't visible on build.webkit.org,
# but they impose constraints on how we optimize baselines.
hypergraph.update(_VIRTUAL_PORTS)
# FIXME: Should we get this constant from somewhere?
fallback_path = ['LayoutTests']
port_factory = host.port_factory
for port_name in port_names:
port = port_factory.get(port_name)
webkit_base = port.webkit_base()
search_path = port.baseline_search_path()
if search_path:
hypergraph[port_name] = [host.filesystem.relpath(path, webkit_base) for path in search_path] + fallback_path
return hypergraph
_VIRTUAL_PORTS = {
'mac-future': ['LayoutTests/platform/mac-future', 'LayoutTests/platform/mac', 'LayoutTests'],
'win-future': ['LayoutTests/platform/win-future', 'LayoutTests/platform/win', 'LayoutTests'],
'qt-unknown': ['LayoutTests/platform/qt-unknown', 'LayoutTests/platform/qt', 'LayoutTests'],
}
# FIXME: Should this function be somewhere more general?
def _invert_dictionary(dictionary):
inverted_dictionary = {}
for key, value in dictionary.items():
if inverted_dictionary.get(value):
inverted_dictionary[value].append(key)
else:
inverted_dictionary[value] = [key]
return inverted_dictionary
class BaselineOptimizer(object):
def __init__(self, host, port_names):
self._host = host
self._filesystem = self._host.filesystem
self._scm = self._host.scm()
self._hypergraph = _baseline_search_hypergraph(host, port_names)
self._directories = reduce(set.union, map(set, self._hypergraph.values()))
def read_results_by_directory(self, baseline_name):
results_by_directory = {}
for directory in self._directories:
path = self._filesystem.join(self._scm.checkout_root, directory, baseline_name)
if self._filesystem.exists(path):
results_by_directory[directory] = self._filesystem.sha1(path)
return results_by_directory
def _results_by_port_name(self, results_by_directory):
results_by_port_name = {}
for port_name, search_path in self._hypergraph.items():
for directory in search_path:
if directory in results_by_directory:
results_by_port_name[port_name] = results_by_directory[directory]
break
return results_by_port_name
def _most_specific_common_directory(self, port_names):
paths = [self._hypergraph[port_name] for port_name in port_names]
common_directories = reduce(set.intersection, map(set, paths))
def score(directory):
return sum([path.index(directory) for path in paths])
_, directory = sorted([(score(directory), directory) for directory in common_directories])[0]
return directory
def _filter_port_names_by_result(self, predicate, port_names_by_result):
filtered_port_names_by_result = {}
for result, port_names in port_names_by_result.items():
filtered_port_names = filter(predicate, port_names)
if filtered_port_names:
filtered_port_names_by_result[result] = filtered_port_names
return filtered_port_names_by_result
def _place_results_in_most_specific_common_directory(self, port_names_by_result, results_by_directory):
for result, port_names in port_names_by_result.items():
directory = self._most_specific_common_directory(port_names)
results_by_directory[directory] = result
def _find_optimal_result_placement(self, baseline_name):
results_by_directory = self.read_results_by_directory(baseline_name)
results_by_port_name = self._results_by_port_name(results_by_directory)
port_names_by_result = _invert_dictionary(results_by_port_name)
new_results_by_directory = self._optimize_by_most_specific_common_directory(results_by_directory, results_by_port_name, port_names_by_result)
if not new_results_by_directory:
new_results_by_directory = self._optimize_by_pushing_results_up(results_by_directory, results_by_port_name, port_names_by_result)
return results_by_directory, new_results_by_directory
def _optimize_by_most_specific_common_directory(self, results_by_directory, results_by_port_name, port_names_by_result):
new_results_by_directory = {}
unsatisfied_port_names_by_result = port_names_by_result
while unsatisfied_port_names_by_result:
self._place_results_in_most_specific_common_directory(unsatisfied_port_names_by_result, new_results_by_directory)
new_results_by_port_name = self._results_by_port_name(new_results_by_directory)
def is_unsatisfied(port_name):
return results_by_port_name[port_name] != new_results_by_port_name[port_name]
new_unsatisfied_port_names_by_result = self._filter_port_names_by_result(is_unsatisfied, port_names_by_result)
if len(new_unsatisfied_port_names_by_result.values()) >= len(unsatisfied_port_names_by_result.values()):
return {} # Frowns. We do not appear to be converging.
unsatisfied_port_names_by_result = new_unsatisfied_port_names_by_result
return new_results_by_directory
def _optimize_by_pushing_results_up(self, results_by_directory, results_by_port_name, port_names_by_result):
try:
results_by_directory = results_by_directory
best_so_far = results_by_directory
while True:
new_results_by_directory = copy.copy(best_so_far)
for port_name in self._hypergraph.keys():
fallback_path = self._hypergraph[port_name]
current_index, current_directory = self._find_in_fallbackpath(fallback_path, results_by_port_name[port_name], best_so_far)
current_result = results_by_port_name[port_name]
for index in range(current_index + 1, len(fallback_path)):
new_directory = fallback_path[index]
if not new_directory in new_results_by_directory:
new_results_by_directory[new_directory] = current_result
if current_directory in new_results_by_directory:
del new_results_by_directory[current_directory]
elif new_results_by_directory[new_directory] == current_result:
if current_directory in new_results_by_directory:
del new_results_by_directory[current_directory]
else:
# The new_directory contains a different result, so stop trying to push results up.
break
if len(new_results_by_directory) >= len(best_so_far):
# We've failed to improve, so give up.
break
best_so_far = new_results_by_directory
return best_so_far
except KeyError as e:
# FIXME: KeyErrors get raised if we're missing baselines. We should handle this better.
return {}
def _find_in_fallbackpath(self, fallback_path, current_result, results_by_directory):
for index, directory in enumerate(fallback_path):
if directory in results_by_directory and (results_by_directory[directory] == current_result):
return index, directory
assert False, "result %s not found in fallback_path %s, %s" % (current_result, fallback_path, results_by_directory)
def _filtered_results_by_port_name(self, results_by_directory):
results_by_port_name = self._results_by_port_name(results_by_directory)
for port_name in _VIRTUAL_PORTS.keys():
if port_name in results_by_port_name:
del results_by_port_name[port_name]
return results_by_port_name
def _platform(self, filename):
platform_dir = 'LayoutTests' + self._filesystem.sep + 'platform' + self._filesystem.sep
if filename.startswith(platform_dir):
return filename.replace(platform_dir, '').split(self._filesystem.sep)[0]
platform_dir = self._filesystem.join(self._scm.checkout_root, platform_dir)
if filename.startswith(platform_dir):
return filename.replace(platform_dir, '').split(self._filesystem.sep)[0]
return '(generic)'
def _move_baselines(self, baseline_name, results_by_directory, new_results_by_directory):
data_for_result = {}
for directory, result in results_by_directory.items():
if not result in data_for_result:
source = self._filesystem.join(self._scm.checkout_root, directory, baseline_name)
data_for_result[result] = self._filesystem.read_binary_file(source)
file_names = []
for directory, result in results_by_directory.items():
if new_results_by_directory.get(directory) != result:
file_names.append(self._filesystem.join(self._scm.checkout_root, directory, baseline_name))
if file_names:
_log.debug(" Deleting:")
for platform_dir in sorted(self._platform(filename) for filename in file_names):
_log.debug(" " + platform_dir)
self._scm.delete_list(file_names)
else:
_log.debug(" (Nothing to delete)")
file_names = []
for directory, result in new_results_by_directory.items():
if results_by_directory.get(directory) != result:
destination = self._filesystem.join(self._scm.checkout_root, directory, baseline_name)
self._filesystem.maybe_make_directory(self._filesystem.split(destination)[0])
self._filesystem.write_binary_file(destination, data_for_result[result])
file_names.append(destination)
if file_names:
_log.debug(" Adding:")
for platform_dir in sorted(self._platform(filename) for filename in file_names):
_log.debug(" " + platform_dir)
self._scm.add_list(file_names)
else:
_log.debug(" (Nothing to add)")
def directories_by_result(self, baseline_name):
results_by_directory = self.read_results_by_directory(baseline_name)
return _invert_dictionary(results_by_directory)
def write_by_directory(self, results_by_directory, writer, indent):
for path in sorted(results_by_directory):
writer("%s%s: %s" % (indent, self._platform(path), results_by_directory[path][0:6]))
def optimize(self, baseline_name):
basename = self._filesystem.basename(baseline_name)
results_by_directory, new_results_by_directory = self._find_optimal_result_placement(baseline_name)
self.new_results_by_directory = new_results_by_directory
if new_results_by_directory == results_by_directory:
if new_results_by_directory:
_log.debug(" %s: (already optimal)" % basename)
self.write_by_directory(results_by_directory, _log.debug, " ")
else:
_log.debug(" %s: (no baselines found)" % basename)
return True
if self._filtered_results_by_port_name(results_by_directory) != self._filtered_results_by_port_name(new_results_by_directory):
_log.warning(" %s: optimization failed" % basename)
self.write_by_directory(results_by_directory, _log.warning, " ")
return False
_log.debug(" %s:" % basename)
_log.debug(" Before: ")
self.write_by_directory(results_by_directory, _log.debug, " ")
_log.debug(" After: ")
self.write_by_directory(new_results_by_directory, _log.debug, " ")
self._move_baselines(baseline_name, results_by_directory, new_results_by_directory)
return True
|
bsd-3-clause
|
samuelhavron/heroku-buildpack-python
|
Python-3.4.3/Lib/distutils/tests/test_build.py
|
147
|
1919
|
"""Tests for distutils.command.build."""
import unittest
import os
import sys
from test.support import run_unittest
from distutils.command.build import build
from distutils.tests import support
from sysconfig import get_platform
class BuildTestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
def test_finalize_options(self):
pkg_dir, dist = self.create_dist()
cmd = build(dist)
cmd.finalize_options()
# if not specified, plat_name gets the current platform
self.assertEqual(cmd.plat_name, get_platform())
# build_purelib is build + lib
wanted = os.path.join(cmd.build_base, 'lib')
self.assertEqual(cmd.build_purelib, wanted)
# build_platlib is 'build/lib.platform-x.x[-pydebug]'
# examples:
# build/lib.macosx-10.3-i386-2.7
plat_spec = '.%s-%s' % (cmd.plat_name, sys.version[0:3])
if hasattr(sys, 'gettotalrefcount'):
self.assertTrue(cmd.build_platlib.endswith('-pydebug'))
plat_spec += '-pydebug'
wanted = os.path.join(cmd.build_base, 'lib' + plat_spec)
self.assertEqual(cmd.build_platlib, wanted)
# by default, build_lib = build_purelib
self.assertEqual(cmd.build_lib, cmd.build_purelib)
# build_temp is build/temp.<plat>
wanted = os.path.join(cmd.build_base, 'temp' + plat_spec)
self.assertEqual(cmd.build_temp, wanted)
# build_scripts is build/scripts-x.x
wanted = os.path.join(cmd.build_base, 'scripts-' + sys.version[0:3])
self.assertEqual(cmd.build_scripts, wanted)
# executable is os.path.normpath(sys.executable)
self.assertEqual(cmd.executable, os.path.normpath(sys.executable))
def test_suite():
return unittest.makeSuite(BuildTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
|
mit
|
dwnld/thrift
|
contrib/fb303/py/setup.py
|
67
|
1629
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys
try:
from setuptools import setup, Extension
except:
from distutils.core import setup, Extension, Command
setup(name = 'thrift_fb303',
version = '1.0.0-dev',
description = 'Python bindings for the Apache Thrift FB303',
author = ['Thrift Developers'],
author_email = ['dev@thrift.apache.org'],
url = 'http://thrift.apache.org',
license = 'Apache License 2.0',
packages = [
'fb303',
'fb303_scripts',
],
classifiers = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Topic :: Software Development :: Libraries',
'Topic :: System :: Networking'
],
)
|
apache-2.0
|
denny820909/builder
|
lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/words/xish/xpath.py
|
67
|
9290
|
# -*- test-case-name: twisted.words.test.test_xpath -*-
#
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
XPath query support.
This module provides L{XPathQuery} to match
L{domish.Element<twisted.words.xish.domish.Element>} instances against
XPath-like expressions.
"""
try:
import cStringIO as StringIO
except ImportError:
import StringIO
class LiteralValue(str):
def value(self, elem):
return self
class IndexValue:
def __init__(self, index):
self.index = int(index) - 1
def value(self, elem):
return elem.children[self.index]
class AttribValue:
def __init__(self, attribname):
self.attribname = attribname
if self.attribname == "xmlns":
self.value = self.value_ns
def value_ns(self, elem):
return elem.uri
def value(self, elem):
if self.attribname in elem.attributes:
return elem.attributes[self.attribname]
else:
return None
class CompareValue:
def __init__(self, lhs, op, rhs):
self.lhs = lhs
self.rhs = rhs
if op == "=":
self.value = self._compareEqual
else:
self.value = self._compareNotEqual
def _compareEqual(self, elem):
return self.lhs.value(elem) == self.rhs.value(elem)
def _compareNotEqual(self, elem):
return self.lhs.value(elem) != self.rhs.value(elem)
class BooleanValue:
"""
Provide boolean XPath expression operators.
@ivar lhs: Left hand side expression of the operator.
@ivar op: The operator. One of C{'and'}, C{'or'}.
@ivar rhs: Right hand side expression of the operator.
@ivar value: Reference to the method that will calculate the value of
this expression given an element.
"""
def __init__(self, lhs, op, rhs):
self.lhs = lhs
self.rhs = rhs
if op == "and":
self.value = self._booleanAnd
else:
self.value = self._booleanOr
def _booleanAnd(self, elem):
"""
Calculate boolean and of the given expressions given an element.
@param elem: The element to calculate the value of the expression from.
"""
return self.lhs.value(elem) and self.rhs.value(elem)
def _booleanOr(self, elem):
"""
Calculate boolean or of the given expressions given an element.
@param elem: The element to calculate the value of the expression from.
"""
return self.lhs.value(elem) or self.rhs.value(elem)
def Function(fname):
"""
Internal method which selects the function object
"""
klassname = "_%s_Function" % fname
c = globals()[klassname]()
return c
class _not_Function:
def __init__(self):
self.baseValue = None
def setParams(self, baseValue):
self.baseValue = baseValue
def value(self, elem):
return not self.baseValue.value(elem)
class _text_Function:
def setParams(self):
pass
def value(self, elem):
return str(elem)
class _Location:
def __init__(self):
self.predicates = []
self.elementName = None
self.childLocation = None
def matchesPredicates(self, elem):
if self.elementName != None and self.elementName != elem.name:
return 0
for p in self.predicates:
if not p.value(elem):
return 0
return 1
def matches(self, elem):
if not self.matchesPredicates(elem):
return 0
if self.childLocation != None:
for c in elem.elements():
if self.childLocation.matches(c):
return 1
else:
return 1
return 0
def queryForString(self, elem, resultbuf):
if not self.matchesPredicates(elem):
return
if self.childLocation != None:
for c in elem.elements():
self.childLocation.queryForString(c, resultbuf)
else:
resultbuf.write(str(elem))
def queryForNodes(self, elem, resultlist):
if not self.matchesPredicates(elem):
return
if self.childLocation != None:
for c in elem.elements():
self.childLocation.queryForNodes(c, resultlist)
else:
resultlist.append(elem)
def queryForStringList(self, elem, resultlist):
if not self.matchesPredicates(elem):
return
if self.childLocation != None:
for c in elem.elements():
self.childLocation.queryForStringList(c, resultlist)
else:
for c in elem.children:
if isinstance(c, (str, unicode)):
resultlist.append(c)
class _AnyLocation:
def __init__(self):
self.predicates = []
self.elementName = None
self.childLocation = None
def matchesPredicates(self, elem):
for p in self.predicates:
if not p.value(elem):
return 0
return 1
def listParents(self, elem, parentlist):
if elem.parent != None:
self.listParents(elem.parent, parentlist)
parentlist.append(elem.name)
def isRootMatch(self, elem):
if (self.elementName == None or self.elementName == elem.name) and \
self.matchesPredicates(elem):
if self.childLocation != None:
for c in elem.elements():
if self.childLocation.matches(c):
return True
else:
return True
return False
def findFirstRootMatch(self, elem):
if (self.elementName == None or self.elementName == elem.name) and \
self.matchesPredicates(elem):
# Thus far, the name matches and the predicates match,
# now check into the children and find the first one
# that matches the rest of the structure
# the rest of the structure
if self.childLocation != None:
for c in elem.elements():
if self.childLocation.matches(c):
return c
return None
else:
# No children locations; this is a match!
return elem
else:
# Ok, predicates or name didn't match, so we need to start
# down each child and treat it as the root and try
# again
for c in elem.elements():
if self.matches(c):
return c
# No children matched...
return None
def matches(self, elem):
if self.isRootMatch(elem):
return True
else:
# Ok, initial element isn't an exact match, walk
# down each child and treat it as the root and try
# again
for c in elem.elements():
if self.matches(c):
return True
# No children matched...
return False
def queryForString(self, elem, resultbuf):
raise NotImplementedError(
"queryForString is not implemented for any location")
def queryForNodes(self, elem, resultlist):
# First check to see if _this_ element is a root
if self.isRootMatch(elem):
resultlist.append(elem)
# Now check each child
for c in elem.elements():
self.queryForNodes(c, resultlist)
def queryForStringList(self, elem, resultlist):
if self.isRootMatch(elem):
for c in elem.children:
if isinstance(c, (str, unicode)):
resultlist.append(c)
for c in elem.elements():
self.queryForStringList(c, resultlist)
class XPathQuery:
def __init__(self, queryStr):
self.queryStr = queryStr
from twisted.words.xish.xpathparser import parse
self.baseLocation = parse('XPATH', queryStr)
def __hash__(self):
return self.queryStr.__hash__()
def matches(self, elem):
return self.baseLocation.matches(elem)
def queryForString(self, elem):
result = StringIO.StringIO()
self.baseLocation.queryForString(elem, result)
return result.getvalue()
def queryForNodes(self, elem):
result = []
self.baseLocation.queryForNodes(elem, result)
if len(result) == 0:
return None
else:
return result
def queryForStringList(self, elem):
result = []
self.baseLocation.queryForStringList(elem, result)
if len(result) == 0:
return None
else:
return result
__internedQueries = {}
def internQuery(queryString):
if queryString not in __internedQueries:
__internedQueries[queryString] = XPathQuery(queryString)
return __internedQueries[queryString]
def matches(xpathstr, elem):
return internQuery(xpathstr).matches(elem)
def queryForStringList(xpathstr, elem):
return internQuery(xpathstr).queryForStringList(elem)
def queryForString(xpathstr, elem):
return internQuery(xpathstr).queryForString(elem)
def queryForNodes(xpathstr, elem):
return internQuery(xpathstr).queryForNodes(elem)
|
mit
|
andresgz/django
|
django/contrib/gis/utils/layermapping.py
|
335
|
27300
|
# LayerMapping -- A Django Model/OGR Layer Mapping Utility
"""
The LayerMapping class provides a way to map the contents of OGR
vector files (e.g. SHP files) to Geographic-enabled Django models.
For more information, please consult the GeoDjango documentation:
https://docs.djangoproject.com/en/dev/ref/contrib/gis/layermapping/
"""
import sys
from decimal import Decimal, InvalidOperation as DecimalInvalidOperation
from django.contrib.gis.db.models import GeometryField
from django.contrib.gis.gdal import (
CoordTransform, DataSource, GDALException, OGRGeometry, OGRGeomType,
SpatialReference,
)
from django.contrib.gis.gdal.field import (
OFTDate, OFTDateTime, OFTInteger, OFTReal, OFTString, OFTTime,
)
from django.core.exceptions import FieldDoesNotExist, ObjectDoesNotExist
from django.db import connections, models, router, transaction
from django.utils import six
from django.utils.encoding import force_text
# LayerMapping exceptions.
class LayerMapError(Exception):
pass
class InvalidString(LayerMapError):
pass
class InvalidDecimal(LayerMapError):
pass
class InvalidInteger(LayerMapError):
pass
class MissingForeignKey(LayerMapError):
pass
class LayerMapping(object):
"A class that maps OGR Layers to GeoDjango Models."
# Acceptable 'base' types for a multi-geometry type.
MULTI_TYPES = {1: OGRGeomType('MultiPoint'),
2: OGRGeomType('MultiLineString'),
3: OGRGeomType('MultiPolygon'),
OGRGeomType('Point25D').num: OGRGeomType('MultiPoint25D'),
OGRGeomType('LineString25D').num: OGRGeomType('MultiLineString25D'),
OGRGeomType('Polygon25D').num: OGRGeomType('MultiPolygon25D'),
}
# Acceptable Django field types and corresponding acceptable OGR
# counterparts.
FIELD_TYPES = {
models.AutoField: OFTInteger,
models.IntegerField: (OFTInteger, OFTReal, OFTString),
models.FloatField: (OFTInteger, OFTReal),
models.DateField: OFTDate,
models.DateTimeField: OFTDateTime,
models.EmailField: OFTString,
models.TimeField: OFTTime,
models.DecimalField: (OFTInteger, OFTReal),
models.CharField: OFTString,
models.SlugField: OFTString,
models.TextField: OFTString,
models.URLField: OFTString,
models.BigIntegerField: (OFTInteger, OFTReal, OFTString),
models.SmallIntegerField: (OFTInteger, OFTReal, OFTString),
models.PositiveSmallIntegerField: (OFTInteger, OFTReal, OFTString),
}
def __init__(self, model, data, mapping, layer=0,
source_srs=None, encoding='utf-8',
transaction_mode='commit_on_success',
transform=True, unique=None, using=None):
"""
A LayerMapping object is initialized using the given Model (not an instance),
a DataSource (or string path to an OGR-supported data file), and a mapping
dictionary. See the module level docstring for more details and keyword
argument usage.
"""
# Getting the DataSource and the associated Layer.
if isinstance(data, six.string_types):
self.ds = DataSource(data, encoding=encoding)
else:
self.ds = data
self.layer = self.ds[layer]
self.using = using if using is not None else router.db_for_write(model)
self.spatial_backend = connections[self.using].ops
# Setting the mapping & model attributes.
self.mapping = mapping
self.model = model
# Checking the layer -- initialization of the object will fail if
# things don't check out before hand.
self.check_layer()
# Getting the geometry column associated with the model (an
# exception will be raised if there is no geometry column).
if connections[self.using].features.supports_transform:
self.geo_field = self.geometry_field()
else:
transform = False
# Checking the source spatial reference system, and getting
# the coordinate transformation object (unless the `transform`
# keyword is set to False)
if transform:
self.source_srs = self.check_srs(source_srs)
self.transform = self.coord_transform()
else:
self.transform = transform
# Setting the encoding for OFTString fields, if specified.
if encoding:
# Making sure the encoding exists, if not a LookupError
# exception will be thrown.
from codecs import lookup
lookup(encoding)
self.encoding = encoding
else:
self.encoding = None
if unique:
self.check_unique(unique)
transaction_mode = 'autocommit' # Has to be set to autocommit.
self.unique = unique
else:
self.unique = None
# Setting the transaction decorator with the function in the
# transaction modes dictionary.
self.transaction_mode = transaction_mode
if transaction_mode == 'autocommit':
self.transaction_decorator = None
elif transaction_mode == 'commit_on_success':
self.transaction_decorator = transaction.atomic
else:
raise LayerMapError('Unrecognized transaction mode: %s' % transaction_mode)
# #### Checking routines used during initialization ####
def check_fid_range(self, fid_range):
"This checks the `fid_range` keyword."
if fid_range:
if isinstance(fid_range, (tuple, list)):
return slice(*fid_range)
elif isinstance(fid_range, slice):
return fid_range
else:
raise TypeError
else:
return None
def check_layer(self):
"""
This checks the Layer metadata, and ensures that it is compatible
with the mapping information and model. Unlike previous revisions,
there is no need to increment through each feature in the Layer.
"""
# The geometry field of the model is set here.
# TODO: Support more than one geometry field / model. However, this
# depends on the GDAL Driver in use.
self.geom_field = False
self.fields = {}
# Getting lists of the field names and the field types available in
# the OGR Layer.
ogr_fields = self.layer.fields
ogr_field_types = self.layer.field_types
# Function for determining if the OGR mapping field is in the Layer.
def check_ogr_fld(ogr_map_fld):
try:
idx = ogr_fields.index(ogr_map_fld)
except ValueError:
raise LayerMapError('Given mapping OGR field "%s" not found in OGR Layer.' % ogr_map_fld)
return idx
# No need to increment through each feature in the model, simply check
# the Layer metadata against what was given in the mapping dictionary.
for field_name, ogr_name in self.mapping.items():
# Ensuring that a corresponding field exists in the model
# for the given field name in the mapping.
try:
model_field = self.model._meta.get_field(field_name)
except FieldDoesNotExist:
raise LayerMapError('Given mapping field "%s" not in given Model fields.' % field_name)
# Getting the string name for the Django field class (e.g., 'PointField').
fld_name = model_field.__class__.__name__
if isinstance(model_field, GeometryField):
if self.geom_field:
raise LayerMapError('LayerMapping does not support more than one GeometryField per model.')
# Getting the coordinate dimension of the geometry field.
coord_dim = model_field.dim
try:
if coord_dim == 3:
gtype = OGRGeomType(ogr_name + '25D')
else:
gtype = OGRGeomType(ogr_name)
except GDALException:
raise LayerMapError('Invalid mapping for GeometryField "%s".' % field_name)
# Making sure that the OGR Layer's Geometry is compatible.
ltype = self.layer.geom_type
if not (ltype.name.startswith(gtype.name) or self.make_multi(ltype, model_field)):
raise LayerMapError('Invalid mapping geometry; model has %s%s, '
'layer geometry type is %s.' %
(fld_name, '(dim=3)' if coord_dim == 3 else '', ltype))
# Setting the `geom_field` attribute w/the name of the model field
# that is a Geometry. Also setting the coordinate dimension
# attribute.
self.geom_field = field_name
self.coord_dim = coord_dim
fields_val = model_field
elif isinstance(model_field, models.ForeignKey):
if isinstance(ogr_name, dict):
# Is every given related model mapping field in the Layer?
rel_model = model_field.remote_field.model
for rel_name, ogr_field in ogr_name.items():
idx = check_ogr_fld(ogr_field)
try:
rel_model._meta.get_field(rel_name)
except FieldDoesNotExist:
raise LayerMapError('ForeignKey mapping field "%s" not in %s fields.' %
(rel_name, rel_model.__class__.__name__))
fields_val = rel_model
else:
raise TypeError('ForeignKey mapping must be of dictionary type.')
else:
# Is the model field type supported by LayerMapping?
if model_field.__class__ not in self.FIELD_TYPES:
raise LayerMapError('Django field type "%s" has no OGR mapping (yet).' % fld_name)
# Is the OGR field in the Layer?
idx = check_ogr_fld(ogr_name)
ogr_field = ogr_field_types[idx]
# Can the OGR field type be mapped to the Django field type?
if not issubclass(ogr_field, self.FIELD_TYPES[model_field.__class__]):
raise LayerMapError('OGR field "%s" (of type %s) cannot be mapped to Django %s.' %
(ogr_field, ogr_field.__name__, fld_name))
fields_val = model_field
self.fields[field_name] = fields_val
def check_srs(self, source_srs):
"Checks the compatibility of the given spatial reference object."
if isinstance(source_srs, SpatialReference):
sr = source_srs
elif isinstance(source_srs, self.spatial_backend.spatial_ref_sys()):
sr = source_srs.srs
elif isinstance(source_srs, (int, six.string_types)):
sr = SpatialReference(source_srs)
else:
# Otherwise just pulling the SpatialReference from the layer
sr = self.layer.srs
if not sr:
raise LayerMapError('No source reference system defined.')
else:
return sr
def check_unique(self, unique):
"Checks the `unique` keyword parameter -- may be a sequence or string."
if isinstance(unique, (list, tuple)):
# List of fields to determine uniqueness with
for attr in unique:
if attr not in self.mapping:
raise ValueError
elif isinstance(unique, six.string_types):
# Only a single field passed in.
if unique not in self.mapping:
raise ValueError
else:
raise TypeError('Unique keyword argument must be set with a tuple, list, or string.')
# Keyword argument retrieval routines ####
def feature_kwargs(self, feat):
"""
Given an OGR Feature, this will return a dictionary of keyword arguments
for constructing the mapped model.
"""
# The keyword arguments for model construction.
kwargs = {}
# Incrementing through each model field and OGR field in the
# dictionary mapping.
for field_name, ogr_name in self.mapping.items():
model_field = self.fields[field_name]
if isinstance(model_field, GeometryField):
# Verify OGR geometry.
try:
val = self.verify_geom(feat.geom, model_field)
except GDALException:
raise LayerMapError('Could not retrieve geometry from feature.')
elif isinstance(model_field, models.base.ModelBase):
# The related _model_, not a field was passed in -- indicating
# another mapping for the related Model.
val = self.verify_fk(feat, model_field, ogr_name)
else:
# Otherwise, verify OGR Field type.
val = self.verify_ogr_field(feat[ogr_name], model_field)
# Setting the keyword arguments for the field name with the
# value obtained above.
kwargs[field_name] = val
return kwargs
def unique_kwargs(self, kwargs):
"""
Given the feature keyword arguments (from `feature_kwargs`) this routine
will construct and return the uniqueness keyword arguments -- a subset
of the feature kwargs.
"""
if isinstance(self.unique, six.string_types):
return {self.unique: kwargs[self.unique]}
else:
return {fld: kwargs[fld] for fld in self.unique}
# #### Verification routines used in constructing model keyword arguments. ####
def verify_ogr_field(self, ogr_field, model_field):
"""
Verifies if the OGR Field contents are acceptable to the Django
model field. If they are, the verified value is returned,
otherwise the proper exception is raised.
"""
if (isinstance(ogr_field, OFTString) and
isinstance(model_field, (models.CharField, models.TextField))):
if self.encoding:
# The encoding for OGR data sources may be specified here
# (e.g., 'cp437' for Census Bureau boundary files).
val = force_text(ogr_field.value, self.encoding)
else:
val = ogr_field.value
if model_field.max_length and len(val) > model_field.max_length:
raise InvalidString('%s model field maximum string length is %s, given %s characters.' %
(model_field.name, model_field.max_length, len(val)))
elif isinstance(ogr_field, OFTReal) and isinstance(model_field, models.DecimalField):
try:
# Creating an instance of the Decimal value to use.
d = Decimal(str(ogr_field.value))
except DecimalInvalidOperation:
raise InvalidDecimal('Could not construct decimal from: %s' % ogr_field.value)
# Getting the decimal value as a tuple.
dtup = d.as_tuple()
digits = dtup[1]
d_idx = dtup[2] # index where the decimal is
# Maximum amount of precision, or digits to the left of the decimal.
max_prec = model_field.max_digits - model_field.decimal_places
# Getting the digits to the left of the decimal place for the
# given decimal.
if d_idx < 0:
n_prec = len(digits[:d_idx])
else:
n_prec = len(digits) + d_idx
# If we have more than the maximum digits allowed, then throw an
# InvalidDecimal exception.
if n_prec > max_prec:
raise InvalidDecimal(
'A DecimalField with max_digits %d, decimal_places %d must '
'round to an absolute value less than 10^%d.' %
(model_field.max_digits, model_field.decimal_places, max_prec)
)
val = d
elif isinstance(ogr_field, (OFTReal, OFTString)) and isinstance(model_field, models.IntegerField):
# Attempt to convert any OFTReal and OFTString value to an OFTInteger.
try:
val = int(ogr_field.value)
except ValueError:
raise InvalidInteger('Could not construct integer from: %s' % ogr_field.value)
else:
val = ogr_field.value
return val
def verify_fk(self, feat, rel_model, rel_mapping):
"""
Given an OGR Feature, the related model and its dictionary mapping,
this routine will retrieve the related model for the ForeignKey
mapping.
"""
# TODO: It is expensive to retrieve a model for every record --
# explore if an efficient mechanism exists for caching related
# ForeignKey models.
# Constructing and verifying the related model keyword arguments.
fk_kwargs = {}
for field_name, ogr_name in rel_mapping.items():
fk_kwargs[field_name] = self.verify_ogr_field(feat[ogr_name], rel_model._meta.get_field(field_name))
# Attempting to retrieve and return the related model.
try:
return rel_model.objects.using(self.using).get(**fk_kwargs)
except ObjectDoesNotExist:
raise MissingForeignKey(
'No ForeignKey %s model found with keyword arguments: %s' %
(rel_model.__name__, fk_kwargs)
)
def verify_geom(self, geom, model_field):
"""
Verifies the geometry -- will construct and return a GeometryCollection
if necessary (for example if the model field is MultiPolygonField while
the mapped shapefile only contains Polygons).
"""
# Downgrade a 3D geom to a 2D one, if necessary.
if self.coord_dim != geom.coord_dim:
geom.coord_dim = self.coord_dim
if self.make_multi(geom.geom_type, model_field):
# Constructing a multi-geometry type to contain the single geometry
multi_type = self.MULTI_TYPES[geom.geom_type.num]
g = OGRGeometry(multi_type)
g.add(geom)
else:
g = geom
# Transforming the geometry with our Coordinate Transformation object,
# but only if the class variable `transform` is set w/a CoordTransform
# object.
if self.transform:
g.transform(self.transform)
# Returning the WKT of the geometry.
return g.wkt
# #### Other model methods ####
def coord_transform(self):
"Returns the coordinate transformation object."
SpatialRefSys = self.spatial_backend.spatial_ref_sys()
try:
# Getting the target spatial reference system
target_srs = SpatialRefSys.objects.using(self.using).get(srid=self.geo_field.srid).srs
# Creating the CoordTransform object
return CoordTransform(self.source_srs, target_srs)
except Exception as msg:
new_msg = 'Could not translate between the data source and model geometry: %s' % msg
six.reraise(LayerMapError, LayerMapError(new_msg), sys.exc_info()[2])
def geometry_field(self):
"Returns the GeometryField instance associated with the geographic column."
# Use `get_field()` on the model's options so that we
# get the correct field instance if there's model inheritance.
opts = self.model._meta
return opts.get_field(self.geom_field)
def make_multi(self, geom_type, model_field):
"""
Given the OGRGeomType for a geometry and its associated GeometryField,
determine whether the geometry should be turned into a GeometryCollection.
"""
return (geom_type.num in self.MULTI_TYPES and
model_field.__class__.__name__ == 'Multi%s' % geom_type.django)
def save(self, verbose=False, fid_range=False, step=False,
progress=False, silent=False, stream=sys.stdout, strict=False):
"""
Saves the contents from the OGR DataSource Layer into the database
according to the mapping dictionary given at initialization.
Keyword Parameters:
verbose:
If set, information will be printed subsequent to each model save
executed on the database.
fid_range:
May be set with a slice or tuple of (begin, end) feature ID's to map
from the data source. In other words, this keyword enables the user
to selectively import a subset range of features in the geographic
data source.
step:
If set with an integer, transactions will occur at every step
interval. For example, if step=1000, a commit would occur after
the 1,000th feature, the 2,000th feature etc.
progress:
When this keyword is set, status information will be printed giving
the number of features processed and successfully saved. By default,
progress information will pe printed every 1000 features processed,
however, this default may be overridden by setting this keyword with an
integer for the desired interval.
stream:
Status information will be written to this file handle. Defaults to
using `sys.stdout`, but any object with a `write` method is supported.
silent:
By default, non-fatal error notifications are printed to stdout, but
this keyword may be set to disable these notifications.
strict:
Execution of the model mapping will cease upon the first error
encountered. The default behavior is to attempt to continue.
"""
# Getting the default Feature ID range.
default_range = self.check_fid_range(fid_range)
# Setting the progress interval, if requested.
if progress:
if progress is True or not isinstance(progress, int):
progress_interval = 1000
else:
progress_interval = progress
def _save(feat_range=default_range, num_feat=0, num_saved=0):
if feat_range:
layer_iter = self.layer[feat_range]
else:
layer_iter = self.layer
for feat in layer_iter:
num_feat += 1
# Getting the keyword arguments
try:
kwargs = self.feature_kwargs(feat)
except LayerMapError as msg:
# Something borked the validation
if strict:
raise
elif not silent:
stream.write('Ignoring Feature ID %s because: %s\n' % (feat.fid, msg))
else:
# Constructing the model using the keyword args
is_update = False
if self.unique:
# If we want unique models on a particular field, handle the
# geometry appropriately.
try:
# Getting the keyword arguments and retrieving
# the unique model.
u_kwargs = self.unique_kwargs(kwargs)
m = self.model.objects.using(self.using).get(**u_kwargs)
is_update = True
# Getting the geometry (in OGR form), creating
# one from the kwargs WKT, adding in additional
# geometries, and update the attribute with the
# just-updated geometry WKT.
geom = getattr(m, self.geom_field).ogr
new = OGRGeometry(kwargs[self.geom_field])
for g in new:
geom.add(g)
setattr(m, self.geom_field, geom.wkt)
except ObjectDoesNotExist:
# No unique model exists yet, create.
m = self.model(**kwargs)
else:
m = self.model(**kwargs)
try:
# Attempting to save.
m.save(using=self.using)
num_saved += 1
if verbose:
stream.write('%s: %s\n' % ('Updated' if is_update else 'Saved', m))
except Exception as msg:
if strict:
# Bailing out if the `strict` keyword is set.
if not silent:
stream.write(
'Failed to save the feature (id: %s) into the '
'model with the keyword arguments:\n' % feat.fid
)
stream.write('%s\n' % kwargs)
raise
elif not silent:
stream.write('Failed to save %s:\n %s\nContinuing\n' % (kwargs, msg))
# Printing progress information, if requested.
if progress and num_feat % progress_interval == 0:
stream.write('Processed %d features, saved %d ...\n' % (num_feat, num_saved))
# Only used for status output purposes -- incremental saving uses the
# values returned here.
return num_saved, num_feat
if self.transaction_decorator is not None:
_save = self.transaction_decorator(_save)
nfeat = self.layer.num_feat
if step and isinstance(step, int) and step < nfeat:
# Incremental saving is requested at the given interval (step)
if default_range:
raise LayerMapError('The `step` keyword may not be used in conjunction with the `fid_range` keyword.')
beg, num_feat, num_saved = (0, 0, 0)
indices = range(step, nfeat, step)
n_i = len(indices)
for i, end in enumerate(indices):
# Constructing the slice to use for this step; the last slice is
# special (e.g, [100:] instead of [90:100]).
if i + 1 == n_i:
step_slice = slice(beg, None)
else:
step_slice = slice(beg, end)
try:
num_feat, num_saved = _save(step_slice, num_feat, num_saved)
beg = end
except: # Deliberately catch everything
stream.write('%s\nFailed to save slice: %s\n' % ('=-' * 20, step_slice))
raise
else:
# Otherwise, just calling the previously defined _save() function.
_save()
|
bsd-3-clause
|
dnozay/lettuce
|
tests/integration/lib/Django-1.3/django/conf/locale/pl/formats.py
|
238
|
1288
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j E Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j E Y H:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd-m-Y'
SHORT_DATETIME_FORMAT = 'd-m-Y H:i:s'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%Y-%m-%d', '%y-%m-%d', # '2006-10-25', '06-10-25'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = u' '
NUMBER_GROUPING = 3
|
gpl-3.0
|
RyanNoelk/ClanLadder
|
MatchHistory/protocols/protocol32283.py
|
4
|
27069
|
# Copyright (c) 2013 Blizzard Entertainment
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from decoders import *
# Decoding instructions for each protocol type.
typeinfos = [
('_int',[(0,7)]), #0
('_int',[(0,4)]), #1
('_int',[(0,5)]), #2
('_int',[(0,6)]), #3
('_int',[(0,14)]), #4
('_int',[(0,22)]), #5
('_int',[(0,32)]), #6
('_choice',[(0,2),{0:('m_uint6',3),1:('m_uint14',4),2:('m_uint22',5),3:('m_uint32',6)}]), #7
('_struct',[[('m_userId',2,-1)]]), #8
('_blob',[(0,8)]), #9
('_int',[(0,8)]), #10
('_struct',[[('m_flags',10,0),('m_major',10,1),('m_minor',10,2),('m_revision',10,3),('m_build',6,4),('m_baseBuild',6,5)]]), #11
('_int',[(0,3)]), #12
('_bool',[]), #13
('_struct',[[('m_signature',9,0),('m_version',11,1),('m_type',12,2),('m_elapsedGameLoops',6,3),('m_useScaledTime',13,4)]]), #14
('_fourcc',[]), #15
('_blob',[(0,7)]), #16
('_int',[(0,64)]), #17
('_struct',[[('m_region',10,0),('m_programId',15,1),('m_realm',6,2),('m_name',16,3),('m_id',17,4)]]), #18
('_struct',[[('m_a',10,0),('m_r',10,1),('m_g',10,2),('m_b',10,3)]]), #19
('_int',[(0,2)]), #20
('_optional',[10]), #21
('_struct',[[('m_name',9,0),('m_toon',18,1),('m_race',9,2),('m_color',19,3),('m_control',10,4),('m_teamId',1,5),('m_handicap',0,6),('m_observe',20,7),('m_result',20,8),('m_workingSetSlotId',21,9)]]), #22
('_array',[(0,5),22]), #23
('_optional',[23]), #24
('_blob',[(0,10)]), #25
('_blob',[(0,11)]), #26
('_struct',[[('m_file',26,0)]]), #27
('_optional',[13]), #28
('_int',[(-9223372036854775808,64)]), #29
('_blob',[(0,12)]), #30
('_blob',[(40,0)]), #31
('_array',[(0,6),31]), #32
('_optional',[32]), #33
('_array',[(0,6),26]), #34
('_optional',[34]), #35
('_struct',[[('m_playerList',24,0),('m_title',25,1),('m_difficulty',9,2),('m_thumbnail',27,3),('m_isBlizzardMap',13,4),('m_restartAsTransitionMap',28,16),('m_timeUTC',29,5),('m_timeLocalOffset',29,6),('m_description',30,7),('m_imageFilePath',26,8),('m_campaignIndex',10,15),('m_mapFileName',26,9),('m_cacheHandles',33,10),('m_miniSave',13,11),('m_gameSpeed',12,12),('m_defaultDifficulty',3,13),('m_modPaths',35,14)]]), #36
('_optional',[9]), #37
('_optional',[31]), #38
('_optional',[6]), #39
('_struct',[[('m_race',21,-1)]]), #40
('_struct',[[('m_team',21,-1)]]), #41
('_struct',[[('m_name',9,-13),('m_clanTag',37,-12),('m_clanLogo',38,-11),('m_highestLeague',21,-10),('m_combinedRaceLevels',39,-9),('m_randomSeed',6,-8),('m_racePreference',40,-7),('m_teamPreference',41,-6),('m_testMap',13,-5),('m_testAuto',13,-4),('m_examine',13,-3),('m_customInterface',13,-2),('m_observe',20,-1)]]), #42
('_array',[(0,5),42]), #43
('_struct',[[('m_lockTeams',13,-12),('m_teamsTogether',13,-11),('m_advancedSharedControl',13,-10),('m_randomRaces',13,-9),('m_battleNet',13,-8),('m_amm',13,-7),('m_competitive',13,-6),('m_noVictoryOrDefeat',13,-5),('m_fog',20,-4),('m_observers',20,-3),('m_userDifficulty',20,-2),('m_clientDebugFlags',17,-1)]]), #44
('_int',[(1,4)]), #45
('_int',[(1,8)]), #46
('_bitarray',[(0,6)]), #47
('_bitarray',[(0,8)]), #48
('_bitarray',[(0,2)]), #49
('_bitarray',[(0,7)]), #50
('_struct',[[('m_allowedColors',47,-6),('m_allowedRaces',48,-5),('m_allowedDifficulty',47,-4),('m_allowedControls',48,-3),('m_allowedObserveTypes',49,-2),('m_allowedAIBuilds',50,-1)]]), #51
('_array',[(0,5),51]), #52
('_struct',[[('m_randomValue',6,-26),('m_gameCacheName',25,-25),('m_gameOptions',44,-24),('m_gameSpeed',12,-23),('m_gameType',12,-22),('m_maxUsers',2,-21),('m_maxObservers',2,-20),('m_maxPlayers',2,-19),('m_maxTeams',45,-18),('m_maxColors',3,-17),('m_maxRaces',46,-16),('m_maxControls',10,-15),('m_mapSizeX',10,-14),('m_mapSizeY',10,-13),('m_mapFileSyncChecksum',6,-12),('m_mapFileName',26,-11),('m_mapAuthorName',9,-10),('m_modFileSyncChecksum',6,-9),('m_slotDescriptions',52,-8),('m_defaultDifficulty',3,-7),('m_defaultAIBuild',0,-6),('m_cacheHandles',32,-5),('m_hasExtensionMod',13,-4),('m_isBlizzardMap',13,-3),('m_isPremadeFFA',13,-2),('m_isCoopMode',13,-1)]]), #53
('_optional',[1]), #54
('_optional',[2]), #55
('_struct',[[('m_color',55,-1)]]), #56
('_array',[(0,6),6]), #57
('_array',[(0,9),6]), #58
('_struct',[[('m_control',10,-14),('m_userId',54,-13),('m_teamId',1,-12),('m_colorPref',56,-11),('m_racePref',40,-10),('m_difficulty',3,-9),('m_aiBuild',0,-8),('m_handicap',0,-7),('m_observe',20,-6),('m_logoIndex',6,-5),('m_workingSetSlotId',21,-4),('m_rewards',57,-3),('m_toonHandle',16,-2),('m_licenses',58,-1)]]), #59
('_array',[(0,5),59]), #60
('_struct',[[('m_phase',12,-10),('m_maxUsers',2,-9),('m_maxObservers',2,-8),('m_slots',60,-7),('m_randomSeed',6,-6),('m_hostUserId',54,-5),('m_isSinglePlayer',13,-4),('m_gameDuration',6,-3),('m_defaultDifficulty',3,-2),('m_defaultAIBuild',0,-1)]]), #61
('_struct',[[('m_userInitialData',43,-3),('m_gameDescription',53,-2),('m_lobbyState',61,-1)]]), #62
('_struct',[[('m_syncLobbyState',62,-1)]]), #63
('_struct',[[('m_name',16,-1)]]), #64
('_blob',[(0,6)]), #65
('_struct',[[('m_name',65,-1)]]), #66
('_struct',[[('m_name',65,-3),('m_type',6,-2),('m_data',16,-1)]]), #67
('_struct',[[('m_type',6,-3),('m_name',65,-2),('m_data',30,-1)]]), #68
('_array',[(0,5),10]), #69
('_struct',[[('m_signature',69,-2),('m_toonHandle',16,-1)]]), #70
('_struct',[[('m_gameFullyDownloaded',13,-8),('m_developmentCheatsEnabled',13,-7),('m_multiplayerCheatsEnabled',13,-6),('m_syncChecksummingEnabled',13,-5),('m_isMapToMapTransition',13,-4),('m_startingRally',13,-3),('m_debugPauseEnabled',13,-2),('m_baseBuildNum',6,-1)]]), #71
('_struct',[[]]), #72
('_int',[(0,16)]), #73
('_struct',[[('x',73,-2),('y',73,-1)]]), #74
('_struct',[[('m_which',12,-2),('m_target',74,-1)]]), #75
('_struct',[[('m_fileName',26,-5),('m_automatic',13,-4),('m_overwrite',13,-3),('m_name',9,-2),('m_description',25,-1)]]), #76
('_int',[(-2147483648,32)]), #77
('_struct',[[('x',77,-2),('y',77,-1)]]), #78
('_struct',[[('m_point',78,-4),('m_time',77,-3),('m_verb',25,-2),('m_arguments',25,-1)]]), #79
('_struct',[[('m_data',79,-1)]]), #80
('_int',[(0,20)]), #81
('_struct',[[('m_abilLink',73,-3),('m_abilCmdIndex',2,-2),('m_abilCmdData',21,-1)]]), #82
('_optional',[82]), #83
('_null',[]), #84
('_struct',[[('x',81,-3),('y',81,-2),('z',77,-1)]]), #85
('_struct',[[('m_targetUnitFlags',10,-7),('m_timer',10,-6),('m_tag',6,-5),('m_snapshotUnitLink',73,-4),('m_snapshotControlPlayerId',54,-3),('m_snapshotUpkeepPlayerId',54,-2),('m_snapshotPoint',85,-1)]]), #86
('_choice',[(0,2),{0:('None',84),1:('TargetPoint',85),2:('TargetUnit',86),3:('Data',6)}]), #87
('_struct',[[('m_cmdFlags',81,-4),('m_abil',83,-3),('m_data',87,-2),('m_otherUnit',39,-1)]]), #88
('_int',[(0,9)]), #89
('_bitarray',[(0,9)]), #90
('_array',[(0,9),89]), #91
('_choice',[(0,2),{0:('None',84),1:('Mask',90),2:('OneIndices',91),3:('ZeroIndices',91)}]), #92
('_struct',[[('m_unitLink',73,-4),('m_subgroupPriority',10,-3),('m_intraSubgroupPriority',10,-2),('m_count',89,-1)]]), #93
('_array',[(0,9),93]), #94
('_struct',[[('m_subgroupIndex',89,-4),('m_removeMask',92,-3),('m_addSubgroups',94,-2),('m_addUnitTags',58,-1)]]), #95
('_struct',[[('m_controlGroupId',1,-2),('m_delta',95,-1)]]), #96
('_struct',[[('m_controlGroupIndex',1,-3),('m_controlGroupUpdate',20,-2),('m_mask',92,-1)]]), #97
('_struct',[[('m_count',89,-6),('m_subgroupCount',89,-5),('m_activeSubgroupIndex',89,-4),('m_unitTagsChecksum',6,-3),('m_subgroupIndicesChecksum',6,-2),('m_subgroupsChecksum',6,-1)]]), #98
('_struct',[[('m_controlGroupId',1,-2),('m_selectionSyncData',98,-1)]]), #99
('_array',[(0,3),77]), #100
('_struct',[[('m_recipientId',1,-2),('m_resources',100,-1)]]), #101
('_struct',[[('m_chatMessage',25,-1)]]), #102
('_int',[(-128,8)]), #103
('_struct',[[('x',77,-3),('y',77,-2),('z',77,-1)]]), #104
('_struct',[[('m_beacon',103,-9),('m_ally',103,-8),('m_flags',103,-7),('m_build',103,-6),('m_targetUnitTag',6,-5),('m_targetUnitSnapshotUnitLink',73,-4),('m_targetUnitSnapshotUpkeepPlayerId',103,-3),('m_targetUnitSnapshotControlPlayerId',103,-2),('m_targetPoint',104,-1)]]), #105
('_struct',[[('m_speed',12,-1)]]), #106
('_struct',[[('m_delta',103,-1)]]), #107
('_struct',[[('m_point',78,-3),('m_unit',6,-2),('m_pingedMinimap',13,-1)]]), #108
('_struct',[[('m_verb',25,-2),('m_arguments',25,-1)]]), #109
('_struct',[[('m_alliance',6,-2),('m_control',6,-1)]]), #110
('_struct',[[('m_unitTag',6,-1)]]), #111
('_struct',[[('m_unitTag',6,-2),('m_flags',10,-1)]]), #112
('_struct',[[('m_conversationId',77,-2),('m_replyId',77,-1)]]), #113
('_optional',[16]), #114
('_struct',[[('m_gameUserId',1,-6),('m_observe',20,-5),('m_name',9,-4),('m_toonHandle',114,-3),('m_clanTag',37,-2),('m_clanLogo',38,-1)]]), #115
('_array',[(0,5),115]), #116
('_int',[(0,1)]), #117
('_struct',[[('m_userInfos',116,-2),('m_method',117,-1)]]), #118
('_struct',[[('m_purchaseItemId',77,-1)]]), #119
('_struct',[[('m_difficultyLevel',77,-1)]]), #120
('_choice',[(0,3),{0:('None',84),1:('Checked',13),2:('ValueChanged',6),3:('SelectionChanged',77),4:('TextChanged',26),5:('MouseButton',6)}]), #121
('_struct',[[('m_controlId',77,-3),('m_eventType',77,-2),('m_eventData',121,-1)]]), #122
('_struct',[[('m_soundHash',6,-2),('m_length',6,-1)]]), #123
('_array',[(0,7),6]), #124
('_struct',[[('m_soundHash',124,-2),('m_length',124,-1)]]), #125
('_struct',[[('m_syncInfo',125,-1)]]), #126
('_struct',[[('m_sound',6,-1)]]), #127
('_struct',[[('m_transmissionId',77,-2),('m_thread',6,-1)]]), #128
('_struct',[[('m_transmissionId',77,-1)]]), #129
('_optional',[74]), #130
('_optional',[73]), #131
('_optional',[103]), #132
('_struct',[[('m_target',130,-5),('m_distance',131,-4),('m_pitch',131,-3),('m_yaw',131,-2),('m_reason',132,-1)]]), #133
('_struct',[[('m_skipType',117,-1)]]), #134
('_int',[(0,11)]), #135
('_struct',[[('x',135,-2),('y',135,-1)]]), #136
('_struct',[[('m_button',6,-5),('m_down',13,-4),('m_posUI',136,-3),('m_posWorld',85,-2),('m_flags',103,-1)]]), #137
('_struct',[[('m_posUI',136,-3),('m_posWorld',85,-2),('m_flags',103,-1)]]), #138
('_struct',[[('m_achievementLink',73,-1)]]), #139
('_struct',[[('m_abilLink',73,-3),('m_abilCmdIndex',2,-2),('m_state',103,-1)]]), #140
('_struct',[[('m_soundtrack',6,-1)]]), #141
('_struct',[[('m_planetId',77,-1)]]), #142
('_struct',[[('m_key',103,-2),('m_flags',103,-1)]]), #143
('_struct',[[('m_resources',100,-1)]]), #144
('_struct',[[('m_fulfillRequestId',77,-1)]]), #145
('_struct',[[('m_cancelRequestId',77,-1)]]), #146
('_struct',[[('m_researchItemId',77,-1)]]), #147
('_struct',[[('m_mercenaryId',77,-1)]]), #148
('_struct',[[('m_battleReportId',77,-2),('m_difficultyLevel',77,-1)]]), #149
('_struct',[[('m_battleReportId',77,-1)]]), #150
('_int',[(0,19)]), #151
('_struct',[[('m_decrementMs',151,-1)]]), #152
('_struct',[[('m_portraitId',77,-1)]]), #153
('_struct',[[('m_functionName',16,-1)]]), #154
('_struct',[[('m_result',77,-1)]]), #155
('_struct',[[('m_gameMenuItemIndex',77,-1)]]), #156
('_struct',[[('m_purchaseCategoryId',77,-1)]]), #157
('_struct',[[('m_button',73,-1)]]), #158
('_struct',[[('m_cutsceneId',77,-2),('m_bookmarkName',16,-1)]]), #159
('_struct',[[('m_cutsceneId',77,-1)]]), #160
('_struct',[[('m_cutsceneId',77,-3),('m_conversationLine',16,-2),('m_altConversationLine',16,-1)]]), #161
('_struct',[[('m_cutsceneId',77,-2),('m_conversationLine',16,-1)]]), #162
('_struct',[[('m_observe',20,-5),('m_name',9,-4),('m_toonHandle',114,-3),('m_clanTag',37,-2),('m_clanLogo',38,-1)]]), #163
('_struct',[[('m_recipient',12,-2),('m_string',26,-1)]]), #164
('_struct',[[('m_recipient',12,-2),('m_point',78,-1)]]), #165
('_struct',[[('m_progress',77,-1)]]), #166
('_struct',[[('m_scoreValueMineralsCurrent',77,0),('m_scoreValueVespeneCurrent',77,1),('m_scoreValueMineralsCollectionRate',77,2),('m_scoreValueVespeneCollectionRate',77,3),('m_scoreValueWorkersActiveCount',77,4),('m_scoreValueMineralsUsedInProgressArmy',77,5),('m_scoreValueMineralsUsedInProgressEconomy',77,6),('m_scoreValueMineralsUsedInProgressTechnology',77,7),('m_scoreValueVespeneUsedInProgressArmy',77,8),('m_scoreValueVespeneUsedInProgressEconomy',77,9),('m_scoreValueVespeneUsedInProgressTechnology',77,10),('m_scoreValueMineralsUsedCurrentArmy',77,11),('m_scoreValueMineralsUsedCurrentEconomy',77,12),('m_scoreValueMineralsUsedCurrentTechnology',77,13),('m_scoreValueVespeneUsedCurrentArmy',77,14),('m_scoreValueVespeneUsedCurrentEconomy',77,15),('m_scoreValueVespeneUsedCurrentTechnology',77,16),('m_scoreValueMineralsLostArmy',77,17),('m_scoreValueMineralsLostEconomy',77,18),('m_scoreValueMineralsLostTechnology',77,19),('m_scoreValueVespeneLostArmy',77,20),('m_scoreValueVespeneLostEconomy',77,21),('m_scoreValueVespeneLostTechnology',77,22),('m_scoreValueMineralsKilledArmy',77,23),('m_scoreValueMineralsKilledEconomy',77,24),('m_scoreValueMineralsKilledTechnology',77,25),('m_scoreValueVespeneKilledArmy',77,26),('m_scoreValueVespeneKilledEconomy',77,27),('m_scoreValueVespeneKilledTechnology',77,28),('m_scoreValueFoodUsed',77,29),('m_scoreValueFoodMade',77,30),('m_scoreValueMineralsUsedActiveForces',77,31),('m_scoreValueVespeneUsedActiveForces',77,32),('m_scoreValueMineralsFriendlyFireArmy',77,33),('m_scoreValueMineralsFriendlyFireEconomy',77,34),('m_scoreValueMineralsFriendlyFireTechnology',77,35),('m_scoreValueVespeneFriendlyFireArmy',77,36),('m_scoreValueVespeneFriendlyFireEconomy',77,37),('m_scoreValueVespeneFriendlyFireTechnology',77,38)]]), #167
('_struct',[[('m_playerId',1,0),('m_stats',167,1)]]), #168
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_unitTypeName',25,2),('m_controlPlayerId',1,3),('m_upkeepPlayerId',1,4),('m_x',10,5),('m_y',10,6)]]), #169
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_killerPlayerId',54,2),('m_x',10,3),('m_y',10,4),('m_killerUnitTagIndex',39,5),('m_killerUnitTagRecycle',39,6)]]), #170
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_controlPlayerId',1,2),('m_upkeepPlayerId',1,3)]]), #171
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_unitTypeName',25,2)]]), #172
('_struct',[[('m_playerId',1,0),('m_upgradeTypeName',25,1),('m_count',77,2)]]), #173
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1)]]), #174
('_array',[(0,10),77]), #175
('_struct',[[('m_firstUnitIndex',6,0),('m_items',175,1)]]), #176
('_struct',[[('m_playerId',1,0),('m_type',6,1),('m_userId',39,2),('m_slotId',39,3)]]), #177
]
# Map from protocol NNet.Game.*Event eventid to (typeid, name)
game_event_types = {
5: (72, 'NNet.Game.SUserFinishedLoadingSyncEvent'),
7: (71, 'NNet.Game.SUserOptionsEvent'),
9: (64, 'NNet.Game.SBankFileEvent'),
10: (66, 'NNet.Game.SBankSectionEvent'),
11: (67, 'NNet.Game.SBankKeyEvent'),
12: (68, 'NNet.Game.SBankValueEvent'),
13: (70, 'NNet.Game.SBankSignatureEvent'),
14: (75, 'NNet.Game.SCameraSaveEvent'),
21: (76, 'NNet.Game.SSaveGameEvent'),
22: (72, 'NNet.Game.SSaveGameDoneEvent'),
23: (72, 'NNet.Game.SLoadGameDoneEvent'),
26: (80, 'NNet.Game.SGameCheatEvent'),
27: (88, 'NNet.Game.SCmdEvent'),
28: (96, 'NNet.Game.SSelectionDeltaEvent'),
29: (97, 'NNet.Game.SControlGroupUpdateEvent'),
30: (99, 'NNet.Game.SSelectionSyncCheckEvent'),
31: (101, 'NNet.Game.SResourceTradeEvent'),
32: (102, 'NNet.Game.STriggerChatMessageEvent'),
33: (105, 'NNet.Game.SAICommunicateEvent'),
34: (106, 'NNet.Game.SSetAbsoluteGameSpeedEvent'),
35: (107, 'NNet.Game.SAddAbsoluteGameSpeedEvent'),
36: (108, 'NNet.Game.STriggerPingEvent'),
37: (109, 'NNet.Game.SBroadcastCheatEvent'),
38: (110, 'NNet.Game.SAllianceEvent'),
39: (111, 'NNet.Game.SUnitClickEvent'),
40: (112, 'NNet.Game.SUnitHighlightEvent'),
41: (113, 'NNet.Game.STriggerReplySelectedEvent'),
43: (118, 'NNet.Game.SHijackReplayGameEvent'),
44: (72, 'NNet.Game.STriggerSkippedEvent'),
45: (123, 'NNet.Game.STriggerSoundLengthQueryEvent'),
46: (127, 'NNet.Game.STriggerSoundOffsetEvent'),
47: (128, 'NNet.Game.STriggerTransmissionOffsetEvent'),
48: (129, 'NNet.Game.STriggerTransmissionCompleteEvent'),
49: (133, 'NNet.Game.SCameraUpdateEvent'),
50: (72, 'NNet.Game.STriggerAbortMissionEvent'),
51: (119, 'NNet.Game.STriggerPurchaseMadeEvent'),
52: (72, 'NNet.Game.STriggerPurchaseExitEvent'),
53: (120, 'NNet.Game.STriggerPlanetMissionLaunchedEvent'),
54: (72, 'NNet.Game.STriggerPlanetPanelCanceledEvent'),
55: (122, 'NNet.Game.STriggerDialogControlEvent'),
56: (126, 'NNet.Game.STriggerSoundLengthSyncEvent'),
57: (134, 'NNet.Game.STriggerConversationSkippedEvent'),
58: (137, 'NNet.Game.STriggerMouseClickedEvent'),
59: (138, 'NNet.Game.STriggerMouseMovedEvent'),
60: (139, 'NNet.Game.SAchievementAwardedEvent'),
62: (140, 'NNet.Game.STriggerTargetModeUpdateEvent'),
63: (72, 'NNet.Game.STriggerPlanetPanelReplayEvent'),
64: (141, 'NNet.Game.STriggerSoundtrackDoneEvent'),
65: (142, 'NNet.Game.STriggerPlanetMissionSelectedEvent'),
66: (143, 'NNet.Game.STriggerKeyPressedEvent'),
67: (154, 'NNet.Game.STriggerMovieFunctionEvent'),
68: (72, 'NNet.Game.STriggerPlanetPanelBirthCompleteEvent'),
69: (72, 'NNet.Game.STriggerPlanetPanelDeathCompleteEvent'),
70: (144, 'NNet.Game.SResourceRequestEvent'),
71: (145, 'NNet.Game.SResourceRequestFulfillEvent'),
72: (146, 'NNet.Game.SResourceRequestCancelEvent'),
73: (72, 'NNet.Game.STriggerResearchPanelExitEvent'),
74: (72, 'NNet.Game.STriggerResearchPanelPurchaseEvent'),
75: (147, 'NNet.Game.STriggerResearchPanelSelectionChangedEvent'),
77: (72, 'NNet.Game.STriggerMercenaryPanelExitEvent'),
78: (72, 'NNet.Game.STriggerMercenaryPanelPurchaseEvent'),
79: (148, 'NNet.Game.STriggerMercenaryPanelSelectionChangedEvent'),
80: (72, 'NNet.Game.STriggerVictoryPanelExitEvent'),
81: (72, 'NNet.Game.STriggerBattleReportPanelExitEvent'),
82: (149, 'NNet.Game.STriggerBattleReportPanelPlayMissionEvent'),
83: (150, 'NNet.Game.STriggerBattleReportPanelPlaySceneEvent'),
84: (150, 'NNet.Game.STriggerBattleReportPanelSelectionChangedEvent'),
85: (120, 'NNet.Game.STriggerVictoryPanelPlayMissionAgainEvent'),
86: (72, 'NNet.Game.STriggerMovieStartedEvent'),
87: (72, 'NNet.Game.STriggerMovieFinishedEvent'),
88: (152, 'NNet.Game.SDecrementGameTimeRemainingEvent'),
89: (153, 'NNet.Game.STriggerPortraitLoadedEvent'),
90: (155, 'NNet.Game.STriggerCustomDialogDismissedEvent'),
91: (156, 'NNet.Game.STriggerGameMenuItemSelectedEvent'),
93: (119, 'NNet.Game.STriggerPurchasePanelSelectedPurchaseItemChangedEvent'),
94: (157, 'NNet.Game.STriggerPurchasePanelSelectedPurchaseCategoryChangedEvent'),
95: (158, 'NNet.Game.STriggerButtonPressedEvent'),
96: (72, 'NNet.Game.STriggerGameCreditsFinishedEvent'),
97: (159, 'NNet.Game.STriggerCutsceneBookmarkFiredEvent'),
98: (160, 'NNet.Game.STriggerCutsceneEndSceneFiredEvent'),
99: (161, 'NNet.Game.STriggerCutsceneConversationLineEvent'),
100: (162, 'NNet.Game.STriggerCutsceneConversationLineMissingEvent'),
101: (72, 'NNet.Game.SGameUserLeaveEvent'),
102: (163, 'NNet.Game.SGameUserJoinEvent'),
}
# The typeid of the NNet.Game.EEventId enum.
game_eventid_typeid = 0
# Map from protocol NNet.Game.*Message eventid to (typeid, name)
message_event_types = {
0: (164, 'NNet.Game.SChatMessage'),
1: (165, 'NNet.Game.SPingMessage'),
2: (166, 'NNet.Game.SLoadingProgressMessage'),
3: (72, 'NNet.Game.SServerPingMessage'),
}
# The typeid of the NNet.Game.EMessageId enum.
message_eventid_typeid = 1
# Map from protocol NNet.Replay.Tracker.*Event eventid to (typeid, name)
tracker_event_types = {
0: (168, 'NNet.Replay.Tracker.SPlayerStatsEvent'),
1: (169, 'NNet.Replay.Tracker.SUnitBornEvent'),
2: (170, 'NNet.Replay.Tracker.SUnitDiedEvent'),
3: (171, 'NNet.Replay.Tracker.SUnitOwnerChangeEvent'),
4: (172, 'NNet.Replay.Tracker.SUnitTypeChangeEvent'),
5: (173, 'NNet.Replay.Tracker.SUpgradeEvent'),
6: (169, 'NNet.Replay.Tracker.SUnitInitEvent'),
7: (174, 'NNet.Replay.Tracker.SUnitDoneEvent'),
8: (176, 'NNet.Replay.Tracker.SUnitPositionsEvent'),
9: (177, 'NNet.Replay.Tracker.SPlayerSetupEvent'),
}
# The typeid of the NNet.Replay.Tracker.EEventId enum.
tracker_eventid_typeid = 2
# The typeid of NNet.SVarUint32 (the type used to encode gameloop deltas).
svaruint32_typeid = 7
# The typeid of NNet.Replay.SGameUserId (the type used to encode player ids).
replay_userid_typeid = 8
# The typeid of NNet.Replay.SHeader (the type used to store replay game version and length).
replay_header_typeid = 14
# The typeid of NNet.Game.SDetails (the type used to store overall replay details).
game_details_typeid = 36
# The typeid of NNet.Replay.SInitData (the type used to store the inital lobby).
replay_initdata_typeid = 63
def _varuint32_value(value):
# Returns the numeric value from a SVarUint32 instance.
for k,v in value.iteritems():
return v
return 0
def _decode_event_stream(decoder, eventid_typeid, event_types, decode_user_id):
# Decodes events prefixed with a gameloop and possibly userid
gameloop = 0
while not decoder.done():
start_bits = decoder.used_bits()
# decode the gameloop delta before each event
delta = _varuint32_value(decoder.instance(svaruint32_typeid))
gameloop += delta
# decode the userid before each event
if decode_user_id:
userid = decoder.instance(replay_userid_typeid)
# decode the event id
eventid = decoder.instance(eventid_typeid)
typeid, typename = event_types.get(eventid, (None, None))
if typeid is None:
raise CorruptedError('eventid(%d) at %s' % (eventid, decoder))
# decode the event struct instance
event = decoder.instance(typeid)
event['_event'] = typename
event['_eventid'] = eventid
# insert gameloop and userid
event['_gameloop'] = gameloop
if decode_user_id:
event['_userid'] = userid
# the next event is byte aligned
decoder.byte_align()
# insert bits used in stream
event['_bits'] = decoder.used_bits() - start_bits
yield event
def decode_replay_game_events(contents):
"""Decodes and yields each game event from the contents byte string."""
decoder = BitPackedDecoder(contents, typeinfos)
for event in _decode_event_stream(decoder,
game_eventid_typeid,
game_event_types,
decode_user_id=True):
yield event
def decode_replay_message_events(contents):
"""Decodes and yields each message event from the contents byte string."""
decoder = BitPackedDecoder(contents, typeinfos)
for event in _decode_event_stream(decoder,
message_eventid_typeid,
message_event_types,
decode_user_id=True):
yield event
def decode_replay_tracker_events(contents):
"""Decodes and yields each tracker event from the contents byte string."""
decoder = VersionedDecoder(contents, typeinfos)
for event in _decode_event_stream(decoder,
tracker_eventid_typeid,
tracker_event_types,
decode_user_id=False):
yield event
def decode_replay_header(contents):
"""Decodes and return the replay header from the contents byte string."""
decoder = VersionedDecoder(contents, typeinfos)
return decoder.instance(replay_header_typeid)
def decode_replay_details(contents):
"""Decodes and returns the game details from the contents byte string."""
decoder = VersionedDecoder(contents, typeinfos)
return decoder.instance(game_details_typeid)
def decode_replay_initdata(contents):
"""Decodes and return the replay init data from the contents byte string."""
decoder = BitPackedDecoder(contents, typeinfos)
return decoder.instance(replay_initdata_typeid)
def decode_replay_attributes_events(contents):
"""Decodes and yields each attribute from the contents byte string."""
buffer = BitPackedBuffer(contents, 'little')
attributes = {}
if not buffer.done():
attributes['source'] = buffer.read_bits(8)
attributes['mapNamespace'] = buffer.read_bits(32)
count = buffer.read_bits(32)
attributes['scopes'] = {}
while not buffer.done():
value = {}
value['namespace'] = buffer.read_bits(32)
value['attrid'] = attrid = buffer.read_bits(32)
scope = buffer.read_bits(8)
value['value'] = buffer.read_aligned_bytes(4)[::-1].strip('\x00')
if not scope in attributes['scopes']:
attributes['scopes'][scope] = {}
if not attrid in attributes['scopes'][scope]:
attributes['scopes'][scope][attrid] = []
attributes['scopes'][scope][attrid].append(value)
return attributes
def unit_tag(unitTagIndex, unitTagRecycle):
return (unitTagIndex << 18) + unitTagRecycle
def unit_tag_index(unitTag):
return (unitTag >> 18) & 0x00003fff
def unit_tag_recycle(unitTag):
return (unitTag) & 0x0003ffff
|
mit
|
RI-imaging/nrefocus
|
docs/conf.py
|
1
|
4846
|
# -*- coding: utf-8 -*-
#
# project documentation build configuration file, created by
# sphinx-quickstart on Sat Feb 22 09:35:49 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# Get version number from qpimage._version file
import os.path as op
import sys
# include parent directory
pdir = op.dirname(op.dirname(op.abspath(__file__)))
sys.path.insert(0, pdir)
sys.path.append(op.abspath('extensions'))
exec(open(op.join(pdir, "nrefocus/_version.py")).read())
release = version # noqa: F821
# http://www.sphinx-doc.org/en/stable/ext/autodoc.html#confval-autodoc_member_order
# Order class attributes and functions in separate blocks
# http://www.sphinx-doc.org/en/stable/ext/autodoc.html#confval-autodoc_member_order
# Order class attributes and functions in separate blocks
autodoc_member_order = 'groupwise'
autoclass_content = 'both'
# Display link to GitHub repo instead of doc on rtfd
rst_prolog = """
:github_url: https://github.com/RI-imaging/nrefocus
"""
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
extensions = ['sphinx.ext.intersphinx',
'sphinx.ext.autosummary',
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'fancy_include',
'github_changelog',
'sphinxcontrib.bibtex',
]
# specify bibtex files (required for sphinxcontrib.bibtex>=2.0)
bibtex_bibfiles = ['nrefocus.bib']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
projectname = "nrefocus"
projectdescription = 'numerical focusing of complex wave fields'
project = projectname
year = "2015"
authors = "Paul Müller"
copyright = year + ", " + authors
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Output file base name for HTML help builder.
htmlhelp_basename = projectname+'doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', projectname+'.tex', projectname+' Documentation',
authors, 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', projectname, projectname+' Documentation',
authors, 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', projectname, projectname+u' Documentation',
authors, projectname,
projectdescription,
'Numeric'),
]
# -----------------------------------------------------------------------------
# intersphinx
# -----------------------------------------------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ('https://docs.python.org/', None),
"lmfit": ('https://lmfit.github.io/lmfit-py/', None),
"numpy": ('http://docs.scipy.org/doc/numpy', None),
"pyfftw": ('https://pyfftw.readthedocs.io/en/stable/', None),
"scipy": ('https://docs.scipy.org/doc/scipy/reference/', None),
}
|
bsd-3-clause
|
Chilledheart/depot_tools
|
tests/download_from_google_storage_unittests.py
|
20
|
10966
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=W0212
"""Unit tests for download_from_google_storage.py."""
import optparse
import os
import Queue
import shutil
import sys
import tempfile
import threading
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import upload_to_google_storage
import download_from_google_storage
# ../third_party/gsutil/gsutil
GSUTIL_DEFAULT_PATH = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
'third_party', 'gsutil', 'gsutil')
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
class GsutilMock(object):
def __init__(self, path, boto_path, timeout=None):
self.path = path
self.timeout = timeout
self.boto_path = boto_path
self.expected = []
self.history = []
self.lock = threading.Lock()
def add_expected(self, return_code, out, err):
self.expected.append((return_code, out, err))
def append_history(self, method, args):
self.history.append((method, args))
def call(self, *args):
with self.lock:
self.append_history('call', args)
if self.expected:
return self.expected.pop(0)[0]
else:
return 0
def check_call(self, *args):
with self.lock:
self.append_history('check_call', args)
if self.expected:
return self.expected.pop(0)
else:
return (0, '', '')
class GstoolsUnitTests(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.mkdtemp(prefix='gstools_test')
self.base_path = os.path.join(self.temp_dir, 'test_files')
shutil.copytree(os.path.join(TEST_DIR, 'gstools'), self.base_path)
def cleanUp(self):
shutil.rmtree(self.temp_dir)
def test_gsutil(self):
gsutil = download_from_google_storage.Gsutil(GSUTIL_DEFAULT_PATH, None)
self.assertEqual(gsutil.path, GSUTIL_DEFAULT_PATH)
code, _, err = gsutil.check_call()
self.assertEqual(code, 0)
self.assertEqual(err, '')
def test_get_sha1(self):
lorem_ipsum = os.path.join(self.base_path, 'lorem_ipsum.txt')
self.assertEqual(
download_from_google_storage.get_sha1(lorem_ipsum),
'7871c8e24da15bad8b0be2c36edc9dc77e37727f')
def test_get_md5(self):
lorem_ipsum = os.path.join(self.base_path, 'lorem_ipsum.txt')
self.assertEqual(
upload_to_google_storage.get_md5(lorem_ipsum),
'634d7c1ed3545383837428f031840a1e')
def test_get_md5_cached_read(self):
lorem_ipsum = os.path.join(self.base_path, 'lorem_ipsum.txt')
# Use a fake 'stale' MD5 sum. Expected behavior is to return stale sum.
self.assertEqual(
upload_to_google_storage.get_md5_cached(lorem_ipsum),
'734d7c1ed3545383837428f031840a1e')
def test_get_md5_cached_write(self):
lorem_ipsum2 = os.path.join(self.base_path, 'lorem_ipsum2.txt')
lorem_ipsum2_md5 = os.path.join(self.base_path, 'lorem_ipsum2.txt.md5')
if os.path.exists(lorem_ipsum2_md5):
os.remove(lorem_ipsum2_md5)
# Use a fake 'stale' MD5 sum. Expected behavior is to return stale sum.
self.assertEqual(
upload_to_google_storage.get_md5_cached(lorem_ipsum2),
'4c02d1eb455a0f22c575265d17b84b6d')
self.assertTrue(os.path.exists(lorem_ipsum2_md5))
self.assertEqual(
open(lorem_ipsum2_md5, 'rb').read(),
'4c02d1eb455a0f22c575265d17b84b6d')
os.remove(lorem_ipsum2_md5) # Clean up.
self.assertFalse(os.path.exists(lorem_ipsum2_md5))
class DownloadTests(unittest.TestCase):
def setUp(self):
self.gsutil = GsutilMock(GSUTIL_DEFAULT_PATH, None)
self.temp_dir = tempfile.mkdtemp(prefix='gstools_test')
self.checkout_test_files = os.path.join(
TEST_DIR, 'gstools', 'download_test_data')
self.base_path = os.path.join(
self.temp_dir, 'download_test_data')
shutil.copytree(self.checkout_test_files, self.base_path)
self.base_url = 'gs://sometesturl'
self.parser = optparse.OptionParser()
self.queue = Queue.Queue()
self.ret_codes = Queue.Queue()
self.lorem_ipsum = os.path.join(self.base_path, 'lorem_ipsum.txt')
self.lorem_ipsum_sha1 = '7871c8e24da15bad8b0be2c36edc9dc77e37727f'
self.maxDiff = None
def cleanUp(self):
shutil.rmtree(self.temp_dir)
def test_enumerate_files_non_recursive(self):
queue_size = download_from_google_storage.enumerate_work_queue(
self.base_path, self.queue, True, False, False, None, False, False)
expected_queue = [
('e6c4fbd4fe7607f3e6ebf68b2ea4ef694da7b4fe',
os.path.join(self.base_path, 'rootfolder_text.txt')),
('7871c8e24da15bad8b0be2c36edc9dc77e37727f',
os.path.join(self.base_path, 'uploaded_lorem_ipsum.txt'))]
self.assertEqual(sorted(expected_queue), sorted(self.queue.queue))
self.assertEqual(queue_size, 2)
def test_enumerate_files_recursive(self):
queue_size = download_from_google_storage.enumerate_work_queue(
self.base_path, self.queue, True, True, False, None, False, False)
expected_queue = [
('e6c4fbd4fe7607f3e6ebf68b2ea4ef694da7b4fe',
os.path.join(self.base_path, 'rootfolder_text.txt')),
('7871c8e24da15bad8b0be2c36edc9dc77e37727f',
os.path.join(self.base_path, 'uploaded_lorem_ipsum.txt')),
('b5415aa0b64006a95c0c409182e628881d6d6463',
os.path.join(self.base_path, 'subfolder', 'subfolder_text.txt'))]
self.assertEqual(sorted(expected_queue), sorted(self.queue.queue))
self.assertEqual(queue_size, 3)
def test_download_worker_single_file(self):
sha1_hash = '7871c8e24da15bad8b0be2c36edc9dc77e37727f'
input_filename = '%s/%s' % (self.base_url, sha1_hash)
output_filename = os.path.join(self.base_path, 'uploaded_lorem_ipsum.txt')
self.queue.put((sha1_hash, output_filename))
self.queue.put((None, None))
stdout_queue = Queue.Queue()
download_from_google_storage._downloader_worker_thread(
0, self.queue, False, self.base_url, self.gsutil,
stdout_queue, self.ret_codes, True)
expected_calls = [
('check_call',
('ls', input_filename)),
('check_call',
('cp', '-q', input_filename, output_filename))]
if sys.platform != 'win32':
expected_calls.append(
('check_call',
('ls',
'-L',
'gs://sometesturl/7871c8e24da15bad8b0be2c36edc9dc77e37727f')))
expected_output = [
'0> Downloading %s...' % output_filename]
expected_ret_codes = []
self.assertEqual(list(stdout_queue.queue), expected_output)
self.assertEqual(self.gsutil.history, expected_calls)
self.assertEqual(list(self.ret_codes.queue), expected_ret_codes)
def test_download_worker_skips_file(self):
sha1_hash = 'e6c4fbd4fe7607f3e6ebf68b2ea4ef694da7b4fe'
output_filename = os.path.join(self.base_path, 'rootfolder_text.txt')
self.queue.put((sha1_hash, output_filename))
self.queue.put((None, None))
stdout_queue = Queue.Queue()
download_from_google_storage._downloader_worker_thread(
0, self.queue, False, self.base_url, self.gsutil,
stdout_queue, self.ret_codes, True)
expected_output = [
'0> File %s exists and SHA1 matches. Skipping.' % output_filename
]
self.assertEqual(list(stdout_queue.queue), expected_output)
self.assertEqual(self.gsutil.history, [])
def test_download_worker_skips_not_found_file(self):
sha1_hash = '7871c8e24da15bad8b0be2c36edc9dc77e37727f'
input_filename = '%s/%s' % (self.base_url, sha1_hash)
output_filename = os.path.join(self.base_path, 'uploaded_lorem_ipsum.txt')
self.queue.put((sha1_hash, output_filename))
self.queue.put((None, None))
stdout_queue = Queue.Queue()
self.gsutil.add_expected(1, '', '') # Return error when 'ls' is called.
download_from_google_storage._downloader_worker_thread(
0, self.queue, False, self.base_url, self.gsutil,
stdout_queue, self.ret_codes, True)
expected_output = [
'0> File %s for %s does not exist, skipping.' % (
input_filename, output_filename),
]
expected_calls = [
('check_call',
('ls', input_filename))
]
expected_ret_codes = [
(1, 'File %s for %s does not exist.' % (
input_filename, output_filename))
]
self.assertEqual(list(stdout_queue.queue), expected_output)
self.assertEqual(self.gsutil.history, expected_calls)
self.assertEqual(list(self.ret_codes.queue), expected_ret_codes)
def test_download_cp_fails(self):
sha1_hash = '7871c8e24da15bad8b0be2c36edc9dc77e37727f'
input_filename = '%s/%s' % (self.base_url, sha1_hash)
output_filename = os.path.join(self.base_path, 'uploaded_lorem_ipsum.txt')
self.gsutil.add_expected(0, '', '')
self.gsutil.add_expected(101, '', 'Test error message.')
code = download_from_google_storage.download_from_google_storage(
input_filename=sha1_hash,
base_url=self.base_url,
gsutil=self.gsutil,
num_threads=1,
directory=False,
recursive=False,
force=True,
output=output_filename,
ignore_errors=False,
sha1_file=False,
verbose=True,
auto_platform=False)
expected_calls = [
('check_call',
('ls', input_filename)),
('check_call',
('cp', '-q', input_filename, output_filename))
]
if sys.platform != 'win32':
expected_calls.append(
('check_call',
('ls',
'-L',
'gs://sometesturl/7871c8e24da15bad8b0be2c36edc9dc77e37727f')))
self.assertEqual(self.gsutil.history, expected_calls)
self.assertEqual(code, 101)
def test_download_directory_no_recursive_non_force(self):
sha1_hash = '7871c8e24da15bad8b0be2c36edc9dc77e37727f'
input_filename = '%s/%s' % (self.base_url, sha1_hash)
output_filename = os.path.join(self.base_path, 'uploaded_lorem_ipsum.txt')
code = download_from_google_storage.download_from_google_storage(
input_filename=self.base_path,
base_url=self.base_url,
gsutil=self.gsutil,
num_threads=1,
directory=True,
recursive=False,
force=False,
output=None,
ignore_errors=False,
sha1_file=False,
verbose=True,
auto_platform=False)
expected_calls = [
('check_call',
('ls', input_filename)),
('check_call',
('cp', '-q', input_filename, output_filename))]
if sys.platform != 'win32':
expected_calls.append(
('check_call',
('ls',
'-L',
'gs://sometesturl/7871c8e24da15bad8b0be2c36edc9dc77e37727f')))
self.assertEqual(self.gsutil.history, expected_calls)
self.assertEqual(code, 0)
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
NERC-CEH/jules-jasmin
|
majic/joj/websetup_watch_driving_dataset.py
|
1
|
9887
|
"""
# Majic
# Copyright (C) 2014 CEH
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
import datetime
import logging
from joj.model import session_scope, DrivingDataset, DrivingDatasetParameterValue, DrivingDatasetLocation, \
LandCoverRegionCategory, LandCoverRegion
from joj.model.meta import Session
from joj.utils import constants
from joj.services.model_run_service import ModelRunService
log = logging.getLogger(__name__)
WATCH_DRIVING_DATA = \
[
['PSurf_WFD/PSurf_WFD', 'pstar', 'Surface pressure', 30000, 120000],
['Tair_WFD/Tair_WFD', 't', 'Near surface air temperature at 2m', 150, 400],
['Qair_WFD/Qair_WFD', 'q', 'Near surface specific humidity at 2m', 0, 0.1],
['Wind_WFD/Wind_WFD', 'wind', 'Near surface wind speed at 10m', 0, 30],
["LWdown_WFD/LWdown_WFD", 'lw_down', "Surface incident longwave radiation", 0, 2000],
['SWdown_WFD/SWdown_WFD', 'sw_down', 'Surface incident shortwave radiation', 0, 2000],
['Rainf_WFD_GPCC/Rainf_WFD_GPCC', 'tot_rain', 'Rainfall rate', 0, 0.02],
['Snowf_WFD_GPCC/Snowf_WFD_GPCC', 'tot_snow', 'Snowfall rate', 0, 0.004]
]
WATCH_SOIL_PROPS_FILE = "data/WATCH_2D/ancils/soil_igbp_bc_watch_0p5deg_capUM6.6_2D.nc"
WATCH_FRAC_FILE = "data/WATCH_2D/ancils/frac_igbp_watch_0p5deg_capUM6.6_2D.nc"
WATCH_LAND_FRAC_FILE = "data/WATCH_2D/ancils/WFD-land-lat-long-z_2D.nc"
WATCH_LATLON_FILE = "data/WATCH_2D/ancils/WFD-land-lat-long-z_2D.nc"
def _create_watch_driving_data_basic(conf):
watch_driving_dataset = DrivingDataset()
watch_driving_dataset.name = "WATCH Forcing Data 20th Century"
watch_driving_dataset.description = \
"A sub-diurnal meteorological forcing dataset: based on ERA-40 for the sub-monthly variability, " \
"tuned to CRU monthly observations. " \
"See Weedon et al, 2011, Journal of Hydrometeorology, doi: 10.1175/2011JHM1369.1"
watch_driving_dataset.geographic_region = 'Global'
watch_driving_dataset.temporal_resolution = '3 Hours'
watch_driving_dataset.spatial_resolution = '0.5 Degrees'
watch_driving_dataset.boundary_lat_north = 84
watch_driving_dataset.boundary_lat_south = -56
watch_driving_dataset.boundary_lon_west = -180
watch_driving_dataset.boundary_lon_east = 180
watch_driving_dataset.time_start = datetime.datetime(1901, 1, 1, 0, 0, 0)
if conf['full_data_range'].lower() == "true":
watch_driving_dataset.time_end = datetime.datetime(2001, 12, 31, 21, 0, 0)
else:
watch_driving_dataset.time_end = datetime.datetime(1901, 1, 31, 21, 0, 0)
watch_driving_dataset.view_order_index = 100
watch_driving_dataset.usage_order_index = 2
watch_driving_dataset.is_restricted_to_admins = False
return watch_driving_dataset
def _create_watch_regions(watch_driving_dataset, regions_csv_file):
categories = []
f = open(regions_csv_file, "r")
for line in f:
stripped_line = line.strip()
if not stripped_line.startswith('#') and len(stripped_line) > 0:
values = line.split(',')
if len(values) != 3:
log.error("Regions csv file has incorrect number of elements on line reading %s" % stripped_line)
exit(-1)
category_name = values[0].strip()
cat = None
for category in categories:
if category.name == category_name:
cat = category
if cat is None:
log.info(" adding category {}".format(category_name))
cat = LandCoverRegionCategory(name=category_name)
cat.driving_dataset = watch_driving_dataset
categories.append(cat)
region = LandCoverRegion()
region.name = values[1].strip()
region.mask_file = "data/WATCH_2D/masks/{}".format(values[2].strip())
region.category = cat
log.info(" adding region {} to {} with file {}".format(region.name, category_name, region.mask_file))
f.close()
def _create_watch_parameters_and_locations(cover_dst, land_cover_frac_dst, soild_prop_dst, watch_driving_dataset, conf):
file_template = 'data/WATCH_2D/driving/{}.ncml'
for path, var, name, min, max in WATCH_DRIVING_DATA:
location = DrivingDatasetLocation()
location.base_url = file_template.format(path)
location.dataset_type = cover_dst
location.driving_dataset = watch_driving_dataset
location.var_name = var
watch_jules_parameters = [
[constants.JULES_PARAM_DRIVE_DATA_START, "'1901-01-01 00:00:00'"],
[constants.JULES_PARAM_DRIVE_DATA_PERIOD, "10800"],
[constants.JULES_PARAM_DRIVE_FILE, "'data/WATCH_2D/driving/%vv/%vv_%y4%m2.nc'"],
[constants.JULES_PARAM_DRIVE_NVARS, "8"],
[constants.JULES_PARAM_DRIVE_VAR,
"'pstar' 't' 'q' 'wind' 'lw_down' 'sw_down' "
"'tot_rain' 'tot_snow'"],
[constants.JULES_PARAM_DRIVE_VAR_NAME, "'PSurf' 'Tair' 'Qair' 'Wind' 'LWdown' "
"'SWdown' 'Rainf' 'Snowf'"],
[constants.JULES_PARAM_DRIVE_TPL_NAME, "'PSurf_WFD' 'Tair_WFD' "
"'Qair_WFD' 'Wind_WFD' 'LWdown_WFD' 'SWdown_WFD' "
"'Rainf_WFD_GPCC' 'Snowf_WFD_GPCC'"],
[constants.JULES_PARAM_DRIVE_INTERP,
"'i' 'i' 'i' 'i' 'nf' 'nf' 'nf' 'nf'"],
[constants.JULES_PARAM_DRIVE_Z1_TQ_IN, "2.0"],
[constants.JULES_PARAM_DRIVE_Z1_UV_IN, "10.0"],
[constants.JULES_PARAM_INPUT_GRID_IS_1D, ".false."],
[constants.JULES_PARAM_INPUT_GRID_NX, "720"],
[constants.JULES_PARAM_INPUT_GRID_NY, "280"],
[constants.JULES_PARAM_INPUT_GRID_X_DIM_NAME, "'Longitude'"],
[constants.JULES_PARAM_INPUT_GRID_Y_DIM_NAME, "'Latitude'"],
[constants.JULES_PARAM_INPUT_TIME_DIM_NAME, "'Time'"],
[constants.JULES_PARAM_INPUT_TYPE_DIM_NAME, "'pseudo'"],
[constants.JULES_PARAM_LATLON_FILE, ("'%s'" % WATCH_LATLON_FILE)],
[constants.JULES_PARAM_LATLON_LAT_NAME, "'Grid_lat'"],
[constants.JULES_PARAM_LATLON_LON_NAME, "'Grid_lon'"],
[constants.JULES_PARAM_LAND_FRAC_FILE, ("'%s'" % WATCH_LAND_FRAC_FILE)],
[constants.JULES_PARAM_LAND_FRAC_LAND_FRAC_NAME, "'land'"],
[constants.JULES_PARAM_SURF_HGT_ZERO_HEIGHT, ".true."],
[constants.JULES_PARAM_FRAC_FILE, ("'%s'" % WATCH_FRAC_FILE)],
[constants.JULES_PARAM_FRAC_NAME, "'frac'"],
[constants.JULES_PARAM_SOIL_PROPS_CONST_Z, ".true."],
[constants.JULES_PARAM_SOIL_PROPS_FILE, ("'%s'" % WATCH_SOIL_PROPS_FILE)],
[constants.JULES_PARAM_SOIL_PROPS_NVARS, "9"],
[constants.JULES_PARAM_SOIL_PROPS_VAR,
"'b' 'sathh' 'satcon' 'sm_sat' 'sm_crit' 'sm_wilt' 'hcap' 'hcon' 'albsoil'"],
[constants.JULES_PARAM_SOIL_PROPS_VAR_NAME,
"'bexp' 'sathh' 'satcon' 'vsat' 'vcrit' 'vwilt' 'hcap' 'hcon' 'albsoil'"],
[constants.JULES_PARAM_INITIAL_NVARS, "10"],
[constants.JULES_PARAM_INITIAL_VAR,
"'sthuf' 'canopy' 'snow_tile' 'rgrain' 'tstar_tile' 't_soil' 'cs' 'gs' 'lai' 'canht'"],
[constants.JULES_PARAM_INITIAL_USE_FILE,
".false. .false. .false. .false. .false. .false. .false. .false. .false. .false."],
[constants.JULES_PARAM_INITIAL_CONST_VAL,
"0.9 0.0 0.0 50.0 275.0 278.0 10.0 0.0 1.0 2.0"],
[constants.JULES_PARAM_POST_PROCESSING_ID, "1"]
]
if conf['full_data_range'].lower() == "true":
watch_jules_parameters.append([constants.JULES_PARAM_DRIVE_DATA_END, "'2001-12-31 21:00:00'"])
else:
watch_jules_parameters.append([constants.JULES_PARAM_DRIVE_DATA_END, "'1901-01-31 21:00:00'"])
model_run_service = ModelRunService()
for constant, value in watch_jules_parameters:
log.info(" adding parameter {}::{}".format(constant[0], constant[1]))
DrivingDatasetParameterValue(model_run_service, watch_driving_dataset, constant, value)
def create_watch_driving_data(conf, cover_dst, land_cover_frac_dst, soil_prop_dst, regions_csv_file):
"""
Create the watch driving data
:param conf: configuration
:param cover_dst: cover data set type
:param land_cover_frac_dst: land cover fraction data set type
:param soil_prop_dst: soil property data set type
:return: name of dataset
"""
log.info("Creating Watch Driving Dataset:")
with session_scope(Session) as session:
watch_driving_dataset = _create_watch_driving_data_basic(conf)
_create_watch_regions(watch_driving_dataset, regions_csv_file)
_create_watch_parameters_and_locations(
cover_dst,
land_cover_frac_dst,
soil_prop_dst,
watch_driving_dataset,
conf)
session.add(watch_driving_dataset)
return watch_driving_dataset.name
|
gpl-2.0
|
pmisik/buildbot
|
master/buildbot/process/build.py
|
2
|
32806
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from functools import reduce
from twisted.internet import defer
from twisted.internet import error
from twisted.python import failure
from twisted.python import log
from twisted.python.failure import Failure
from buildbot import interfaces
from buildbot.process import buildstep
from buildbot.process import metrics
from buildbot.process import properties
from buildbot.process.results import CANCELLED
from buildbot.process.results import EXCEPTION
from buildbot.process.results import FAILURE
from buildbot.process.results import RETRY
from buildbot.process.results import SUCCESS
from buildbot.process.results import WARNINGS
from buildbot.process.results import computeResultAndTermination
from buildbot.process.results import statusToString
from buildbot.process.results import worst_status
from buildbot.reporters.utils import getURLForBuild
from buildbot.util import Notifier
from buildbot.util import bytes2unicode
from buildbot.util.eventual import eventually
class Build(properties.PropertiesMixin):
"""I represent a single build by a single worker. Specialized Builders can
use subclasses of Build to hold status information unique to those build
processes.
I control B{how} the build proceeds. The actual build is broken up into a
series of steps, saved in the .buildSteps[] array as a list of
L{buildbot.process.step.BuildStep} objects. Each step is a single remote
command, possibly a shell command.
After the build, I go away.
I can be used by a factory by setting buildClass on
L{buildbot.process.factory.BuildFactory}
@ivar requests: the list of L{BuildRequest}s that triggered me
"""
VIRTUAL_BUILDERNAME_PROP = "virtual_builder_name"
VIRTUAL_BUILDERDESCRIPTION_PROP = "virtual_builder_description"
VIRTUAL_BUILDERTAGS_PROP = "virtual_builder_tags"
workdir = "build"
reason = "changes"
finished = False
results = None
stopped = False
set_runtime_properties = True
subs = None
_sentinel = [] # used as a sentinel to indicate unspecified initial_value
def __init__(self, requests):
self.requests = requests
self.locks = []
# build a source stamp
self.sources = requests[0].mergeSourceStampsWith(requests[1:])
self.reason = requests[0].mergeReasons(requests[1:])
self.currentStep = None
self.workerEnvironment = {}
self.buildid = None
self._buildid_notifier = Notifier()
self.number = None
self.executedSteps = []
self.stepnames = {}
self.terminate = False
self._acquiringLock = None
self._builderid = None
# overall results, may downgrade after each step
self.results = SUCCESS
self.properties = properties.Properties()
# tracks execution during the build finish phase
self._locks_released = False
self._build_finished = False
# tracks execution during substantiation
self._is_substantiating = False
# tracks the config version for locks
self.config_version = None
def getProperties(self):
return self.properties
def setBuilder(self, builder):
"""
Set the given builder as our builder.
@type builder: L{buildbot.process.builder.Builder}
"""
self.builder = builder
self.master = builder.master
self.config_version = builder.config_version
@defer.inlineCallbacks
def setLocks(self, lockList):
self.locks = yield self.builder.botmaster.getLockFromLockAccesses(lockList,
self.config_version)
def setWorkerEnvironment(self, env):
# TODO: remove once we don't have anything depending on this method or attribute
# e.g., old-style steps (ShellMixin pulls the environment out of the
# builder directly)
self.workerEnvironment = env
def getSourceStamp(self, codebase=''):
for source in self.sources:
if source.codebase == codebase:
return source
return None
def getAllSourceStamps(self):
return list(self.sources)
@staticmethod
def allChangesFromSources(sources):
for s in sources:
for c in s.changes:
yield c
def allChanges(self):
return Build.allChangesFromSources(self.sources)
def allFiles(self):
# return a list of all source files that were changed
files = []
for c in self.allChanges():
for f in c.files:
files.append(f)
return files
def __repr__(self):
return "<Build {} number:{} results:{}>".format(self.builder.name, repr(self.number),
statusToString(self.results))
def blamelist(self):
# Note that this algorithm is also implemented in
# buildbot.reporters.utils.getResponsibleUsersForBuild, but using the data api.
# it is important for the UI to have the blamelist easily available.
# The best way is to make sure the owners property is set to full blamelist
blamelist = []
for c in self.allChanges():
if c.who not in blamelist:
blamelist.append(c.who)
for source in self.sources:
if source.patch: # Add patch author to blamelist
blamelist.append(source.patch_info[0])
blamelist.sort()
return blamelist
def changesText(self):
changetext = ""
for c in self.allChanges():
changetext += "-" * 60 + "\n\n" + c.asText() + "\n"
# consider sorting these by number
return changetext
def setStepFactories(self, step_factories):
"""Set a list of 'step factories', which are tuples of (class,
kwargs), where 'class' is generally a subclass of step.BuildStep .
These are used to create the Steps themselves when the Build starts
(as opposed to when it is first created). By creating the steps
later, their __init__ method will have access to things like
build.allFiles() ."""
self.stepFactories = list(step_factories)
useProgress = True
def getWorkerCommandVersion(self, command, oldversion=None):
return self.workerforbuilder.getWorkerCommandVersion(command, oldversion)
def getWorkerName(self):
return self.workername
@staticmethod
def setupPropertiesKnownBeforeBuildStarts(props, requests, builder,
workerforbuilder=None):
# Note that this function does not setup the 'builddir' worker property
# It's not possible to know it until before the actual worker has
# attached.
# start with global properties from the configuration
props.updateFromProperties(builder.master.config.properties)
# from the SourceStamps, which have properties via Change
sources = requests[0].mergeSourceStampsWith(requests[1:])
for change in Build.allChangesFromSources(sources):
props.updateFromProperties(change.properties)
# get any properties from requests (this is the path through which
# schedulers will send us properties)
for rq in requests:
props.updateFromProperties(rq.properties)
# get builder properties
builder.setupProperties(props)
# get worker properties
# navigate our way back to the L{buildbot.worker.Worker}
# object that came from the config, and get its properties
if workerforbuilder is not None:
workerforbuilder.worker.setupProperties(props)
@staticmethod
def setupBuildProperties(props, requests, sources=None, number=None):
# now set some properties of our own, corresponding to the
# build itself
props.setProperty("buildnumber", number, "Build")
if sources is None:
sources = requests[0].mergeSourceStampsWith(requests[1:])
if sources and len(sources) == 1:
# old interface for backwards compatibility
source = sources[0]
props.setProperty("branch", source.branch, "Build")
props.setProperty("revision", source.revision, "Build")
props.setProperty("repository", source.repository, "Build")
props.setProperty("codebase", source.codebase, "Build")
props.setProperty("project", source.project, "Build")
def setupWorkerBuildirProperty(self, workerforbuilder):
path_module = workerforbuilder.worker.path_module
# navigate our way back to the L{buildbot.worker.Worker}
# object that came from the config, and get its properties
if workerforbuilder.worker.worker_basedir:
builddir = path_module.join(
bytes2unicode(workerforbuilder.worker.worker_basedir),
bytes2unicode(self.builder.config.workerbuilddir))
self.setProperty("builddir", builddir, "Worker")
def setupWorkerForBuilder(self, workerforbuilder):
self.path_module = workerforbuilder.worker.path_module
self.workername = workerforbuilder.worker.workername
self.worker_info = workerforbuilder.worker.info
@defer.inlineCallbacks
def getBuilderId(self):
if self._builderid is None:
if self.hasProperty(self.VIRTUAL_BUILDERNAME_PROP):
self._builderid = yield self.builder.getBuilderIdForName(
self.getProperty(self.VIRTUAL_BUILDERNAME_PROP))
description = self.getProperty(
self.VIRTUAL_BUILDERDESCRIPTION_PROP,
self.builder.config.description)
tags = self.getProperty(
self.VIRTUAL_BUILDERTAGS_PROP,
self.builder.config.tags)
if type(tags) == type([]) and '_virtual_' not in tags:
tags.append('_virtual_')
self.master.data.updates.updateBuilderInfo(self._builderid,
description,
tags)
else:
self._builderid = yield self.builder.getBuilderId()
return self._builderid
@defer.inlineCallbacks
def startBuild(self, workerforbuilder):
"""This method sets up the build, then starts it by invoking the
first Step. It returns a Deferred which will fire when the build
finishes. This Deferred is guaranteed to never errback."""
self.workerforbuilder = workerforbuilder
self.conn = None
worker = workerforbuilder.worker
# Cache the worker information as variables instead of accessing via worker, as the worker
# will disappear during disconnection and some of these properties may still be needed.
self.workername = worker.workername
self.worker_info = worker.info
log.msg("{}.startBuild".format(self))
# TODO: this will go away when build collapsing is implemented; until
# then we just assign the build to the first buildrequest
brid = self.requests[0].id
builderid = yield self.getBuilderId()
self.buildid, self.number = \
yield self.master.data.updates.addBuild(
builderid=builderid,
buildrequestid=brid,
workerid=worker.workerid)
self._buildid_notifier.notify(self.buildid)
self.stopBuildConsumer = yield self.master.mq.startConsuming(self.controlStopBuild,
("control", "builds",
str(self.buildid),
"stop"))
# the preparation step counts the time needed for preparing the worker and getting the
# locks.
# we cannot use a real step as we don't have a worker yet.
self.preparation_step = buildstep.BuildStep(name="worker_preparation")
self.preparation_step.setBuild(self)
yield self.preparation_step.addStep()
Build.setupBuildProperties(self.getProperties(), self.requests,
self.sources, self.number)
# then narrow WorkerLocks down to the right worker
self.locks = [(l.getLockForWorker(self.workername),
a)
for l, a in self.locks]
metrics.MetricCountEvent.log('active_builds', 1)
# make sure properties are available to people listening on 'new'
# events
yield self.master.data.updates.setBuildProperties(self.buildid, self)
yield self.master.data.updates.setBuildStateString(self.buildid, 'starting')
yield self.master.data.updates.generateNewBuildEvent(self.buildid)
try:
self.setupBuild() # create .steps
except Exception:
yield self.buildPreparationFailure(Failure(), "setupBuild")
yield self.buildFinished(['Build.setupBuild', 'failed'], EXCEPTION)
return
# flush properties in the beginning of the build
yield self.master.data.updates.setBuildProperties(self.buildid, self)
yield self.master.data.updates.setBuildStateString(self.buildid,
'preparing worker')
try:
ready_or_failure = False
if workerforbuilder.worker and workerforbuilder.worker.acquireLocks():
self._is_substantiating = True
ready_or_failure = yield workerforbuilder.substantiate_if_needed(self)
except Exception:
ready_or_failure = Failure()
finally:
self._is_substantiating = False
# If prepare returns True then it is ready and we start a build
# If it returns failure then we don't start a new build.
if ready_or_failure is not True:
yield self.buildPreparationFailure(ready_or_failure, "worker_prepare")
if self.stopped:
yield self.buildFinished(["worker", "cancelled"], self.results)
elif isinstance(ready_or_failure, Failure) and \
ready_or_failure.check(interfaces.LatentWorkerCannotSubstantiate):
yield self.buildFinished(["worker", "cannot", "substantiate"], EXCEPTION)
else:
yield self.buildFinished(["worker", "not", "available"], RETRY)
return
# ping the worker to make sure they're still there. If they've
# fallen off the map (due to a NAT timeout or something), this
# will fail in a couple of minutes, depending upon the TCP
# timeout.
#
# TODO: This can unnecessarily suspend the starting of a build, in
# situations where the worker is live but is pushing lots of data to
# us in a build.
yield self.master.data.updates.setBuildStateString(self.buildid,
'pinging worker')
log.msg("starting build {}.. pinging the worker {}".format(self, workerforbuilder))
try:
ping_success_or_failure = yield workerforbuilder.ping()
except Exception:
ping_success_or_failure = Failure()
if ping_success_or_failure is not True:
yield self.buildPreparationFailure(ping_success_or_failure, "worker_ping")
yield self.buildFinished(["worker", "not", "pinged"], RETRY)
return
self.conn = workerforbuilder.worker.conn
# To retrieve the builddir property, the worker must be attached as we
# depend on its path_module. Latent workers become attached only after
# preparing them, so we can't setup the builddir property earlier like
# the rest of properties
self.setupWorkerBuildirProperty(workerforbuilder)
self.setupWorkerForBuilder(workerforbuilder)
self.subs = self.conn.notifyOnDisconnect(self.lostRemote)
# tell the remote that it's starting a build, too
try:
yield self.conn.remoteStartBuild(self.builder.name)
except Exception:
yield self.buildPreparationFailure(Failure(), "start_build")
yield self.buildFinished(["worker", "not", "building"], RETRY)
return
yield self.master.data.updates.setBuildStateString(self.buildid,
'acquiring locks')
yield self.acquireLocks()
readymsg = "worker {} ready".format(self.getWorkerName())
yield self.master.data.updates.setStepStateString(self.preparation_step.stepid, readymsg)
yield self.master.data.updates.finishStep(self.preparation_step.stepid, SUCCESS, False)
yield self.master.data.updates.setBuildStateString(self.buildid,
'building')
# start the sequence of steps
self.startNextStep()
@defer.inlineCallbacks
def buildPreparationFailure(self, why, state_string):
if self.stopped:
# if self.stopped, then this failure is a LatentWorker's failure to substantiate
# which we triggered on purpose in stopBuild()
log.msg("worker stopped while " + state_string, why)
yield self.master.data.updates.finishStep(self.preparation_step.stepid,
CANCELLED, False)
else:
log.err(why, "while " + state_string)
self.workerforbuilder.worker.putInQuarantine()
if isinstance(why, failure.Failure):
yield self.preparation_step.addLogWithFailure(why)
yield self.master.data.updates.setStepStateString(self.preparation_step.stepid,
"error while " + state_string)
yield self.master.data.updates.finishStep(self.preparation_step.stepid,
EXCEPTION, False)
@staticmethod
def _canAcquireLocks(lockList, workerforbuilder):
for lock, access in lockList:
worker_lock = lock.getLockForWorker(
workerforbuilder.worker.workername)
if not worker_lock.isAvailable(None, access):
return False
return True
def acquireLocks(self, res=None):
self._acquiringLock = None
if not self.locks:
return defer.succeed(None)
if self.stopped:
return defer.succeed(None)
log.msg("acquireLocks(build {}, locks {})".format(self, self.locks))
for lock, access in self.locks:
if not lock.isAvailable(self, access):
log.msg("Build {} waiting for lock {}".format(self, lock))
d = lock.waitUntilMaybeAvailable(self, access)
d.addCallback(self.acquireLocks)
self._acquiringLock = (lock, access, d)
return d
# all locks are available, claim them all
for lock, access in self.locks:
lock.claim(self, access)
return defer.succeed(None)
def setUniqueStepName(self, step):
# If there are any name collisions, we add a count to the loser
# until it is unique.
name = step.name
if name in self.stepnames:
count = self.stepnames[name]
count += 1
self.stepnames[name] = count
name = "{}_{}".format(step.name, count)
else:
self.stepnames[name] = 0
step.name = name
def setupBuildSteps(self, step_factories):
steps = []
for factory in step_factories:
step = buildstep.create_step_from_step_or_factory(factory)
step.setBuild(self)
step.setWorker(self.workerforbuilder.worker)
steps.append(step)
if self.useProgress:
step.setupProgress()
return steps
def setupBuild(self):
# create the actual BuildSteps.
self.steps = self.setupBuildSteps(self.stepFactories)
owners = set(self.blamelist())
# gather owners from build requests
owners.update({r.properties['owner'] for r in self.requests
if "owner" in r.properties})
if owners:
self.setProperty('owners', sorted(owners), 'Build')
self.text = [] # list of text string lists (text2)
def addStepsAfterCurrentStep(self, step_factories):
# Add the new steps after the step that is running.
# The running step has already been popped from self.steps
self.steps[0:0] = self.setupBuildSteps(step_factories)
def addStepsAfterLastStep(self, step_factories):
# Add the new steps to the end.
self.steps.extend(self.setupBuildSteps(step_factories))
def getNextStep(self):
"""This method is called to obtain the next BuildStep for this build.
When it returns None (or raises a StopIteration exception), the build
is complete."""
if not self.steps:
return None
if not self.conn:
return None
if self.terminate or self.stopped:
# Run any remaining alwaysRun steps, and skip over the others
while True:
s = self.steps.pop(0)
if s.alwaysRun:
return s
if not self.steps:
return None
else:
return self.steps.pop(0)
def startNextStep(self):
try:
s = self.getNextStep()
except StopIteration:
s = None
if not s:
return self.allStepsDone()
self.executedSteps.append(s)
self.currentStep = s
# the following function returns a deferred, but we don't wait for it
self._start_next_step_impl(s)
return defer.succeed(None)
@defer.inlineCallbacks
def _start_next_step_impl(self, step):
try:
results = yield step.startStep(self.conn)
yield self.master.data.updates.setBuildProperties(self.buildid, self)
self.currentStep = None
if self.finished:
return # build was interrupted, don't keep building
terminate = yield self.stepDone(results, step) # interpret/merge results
if terminate:
self.terminate = True
yield self.startNextStep()
except Exception as e:
log.msg("{} build got exception when running step {}".format(self, step))
log.err(e)
yield self.master.data.updates.setBuildProperties(self.buildid, self)
# Note that buildFinished can't throw exception
yield self.buildFinished(["build", "exception"], EXCEPTION)
@defer.inlineCallbacks
def stepDone(self, results, step):
"""This method is called when the BuildStep completes. It is passed a
status object from the BuildStep and is responsible for merging the
Step's results into those of the overall Build."""
terminate = False
text = None
if isinstance(results, tuple):
results, text = results
assert isinstance(results, type(SUCCESS)), "got %r" % (results,)
summary = yield step.getBuildResultSummary()
if 'build' in summary:
text = [summary['build']]
log.msg(" step '{}' complete: {} ({})".format(step.name, statusToString(results), text))
if text:
self.text.extend(text)
self.master.data.updates.setBuildStateString(self.buildid,
bytes2unicode(" ".join(self.text)))
self.results, terminate = computeResultAndTermination(step, results,
self.results)
if not self.conn:
# force the results to retry if the connection was lost
self.results = RETRY
terminate = True
return terminate
def lostRemote(self, conn=None):
# the worker went away. There are several possible reasons for this,
# and they aren't necessarily fatal. For now, kill the build, but
# TODO: see if we can resume the build when it reconnects.
log.msg("{}.lostRemote".format(self))
self.conn = None
self.text = ["lost", "connection"]
self.results = RETRY
if self.currentStep and self.currentStep.results is None:
# this should cause the step to finish.
log.msg(" stopping currentStep", self.currentStep)
self.currentStep.interrupt(Failure(error.ConnectionLost()))
else:
self.text = ["lost", "connection"]
self.stopped = True
if self._acquiringLock:
lock, access, d = self._acquiringLock
lock.stopWaitingUntilAvailable(self, access, d)
def controlStopBuild(self, key, params):
return self.stopBuild(**params)
def stopBuild(self, reason="<no reason given>", results=CANCELLED):
# the idea here is to let the user cancel a build because, e.g.,
# they realized they committed a bug and they don't want to waste
# the time building something that they know will fail. Another
# reason might be to abandon a stuck build. We want to mark the
# build as failed quickly rather than waiting for the worker's
# timeout to kill it on its own.
log.msg(" {}: stopping build: {} {}".format(self, reason, results))
if self.finished:
return
# TODO: include 'reason' in this point event
self.stopped = True
if self.currentStep and self.currentStep.results is None:
self.currentStep.interrupt(reason)
self.results = results
if self._acquiringLock:
lock, access, d = self._acquiringLock
lock.stopWaitingUntilAvailable(self, access, d)
elif self._is_substantiating:
# We're having a latent worker that hasn't been substantiated yet. We need to abort
# that to not have a latent worker without an associated build
self.workerforbuilder.insubstantiate_if_needed()
def allStepsDone(self):
if self.results == FAILURE:
text = ["failed"]
elif self.results == WARNINGS:
text = ["warnings"]
elif self.results == EXCEPTION:
text = ["exception"]
elif self.results == RETRY:
text = ["retry"]
elif self.results == CANCELLED:
text = ["cancelled"]
else:
text = ["build", "successful"]
text.extend(self.text)
return self.buildFinished(text, self.results)
@defer.inlineCallbacks
def buildFinished(self, text, results):
"""This method must be called when the last Step has completed. It
marks the Build as complete and returns the Builder to the 'idle'
state.
It takes two arguments which describe the overall build status:
text, results. 'results' is one of the possible results (see buildbot.process.results).
If 'results' is SUCCESS or WARNINGS, we will permit any dependent
builds to start. If it is 'FAILURE', those builds will be
abandoned.
This method never throws."""
try:
self.stopBuildConsumer.stopConsuming()
self.finished = True
if self.conn:
self.subs.unsubscribe()
self.subs = None
self.conn = None
log.msg(" {}: build finished".format(self))
self.results = worst_status(self.results, results)
eventually(self.releaseLocks)
metrics.MetricCountEvent.log('active_builds', -1)
yield self.master.data.updates.setBuildStateString(self.buildid,
bytes2unicode(" ".join(text)))
yield self.master.data.updates.finishBuild(self.buildid, self.results)
if self.results == EXCEPTION:
# When a build has an exception, put the worker in quarantine for a few seconds
# to make sure we try next build with another worker
self.workerforbuilder.worker.putInQuarantine()
elif self.results != RETRY:
# This worker looks sane if status is neither retry or exception
# Avoid a race in case the build step reboot the worker
if self.workerforbuilder.worker is not None:
self.workerforbuilder.worker.resetQuarantine()
# mark the build as finished
self.workerforbuilder.buildFinished()
self.builder.buildFinished(self, self.workerforbuilder)
self._tryScheduleBuildsAfterLockUnlock(build_finished=True)
except Exception:
log.err(None, 'from finishing a build; this is a '
'serious error - please file a bug at http://buildbot.net')
def releaseLocks(self):
if self.locks:
log.msg("releaseLocks({}): {}".format(self, self.locks))
for lock, access in self.locks:
if lock.isOwner(self, access):
lock.release(self, access)
self._tryScheduleBuildsAfterLockUnlock(locks_released=True)
def _tryScheduleBuildsAfterLockUnlock(self, locks_released=False,
build_finished=False):
# we need to inform the botmaster to attempt to schedule any pending
# build request if we released any locks. This is because buildrequest
# may be started for a completely unrelated builder and yet depend on
# a lock released by this build.
#
# TODO: the current approach is dumb as we just attempt to schedule
# all buildrequests. A much better idea would be to record the reason
# of why a buildrequest was not scheduled in the BuildRequestDistributor
# and then attempt to schedule only these buildrequests which may have
# had that reason resolved.
# this function is complicated by the fact that the botmaster must be
# informed only when all locks have been released and the actions in
# buildFinished have concluded. Since releaseLocks is called using
# eventually this may happen in any order.
self._locks_released = self._locks_released or locks_released
self._build_finished = self._build_finished or build_finished
if not self.locks:
return
if self._locks_released and self._build_finished:
self.builder.botmaster.maybeStartBuildsForAllBuilders()
def getSummaryStatistic(self, name, summary_fn, initial_value=_sentinel):
step_stats_list = [
st.getStatistic(name)
for st in self.executedSteps
if st.hasStatistic(name)]
if initial_value is self._sentinel:
return reduce(summary_fn, step_stats_list)
return reduce(summary_fn, step_stats_list, initial_value)
@defer.inlineCallbacks
def getUrl(self):
builder_id = yield self.getBuilderId()
return getURLForBuild(self.master, builder_id, self.number)
@defer.inlineCallbacks
def get_buildid(self):
if self.buildid is not None:
return self.buildid
buildid = yield self._buildid_notifier.wait()
return buildid
@defer.inlineCallbacks
def waitUntilFinished(self):
buildid = yield self.get_buildid()
yield self.master.mq.waitUntilEvent(('builds', str(buildid), 'finished'),
lambda: self.finished)
def getWorkerInfo(self):
return self.worker_info
|
gpl-2.0
|
hackshel/py-aluminium
|
src/__furture__/simplepool.py
|
2
|
3289
|
#!/usr/bin/env python
"""simple thread pool
@author: dn13(dn13@gmail.com)
@author: Fibrizof(dfang84@gmail.com)
"""
import threading
import Queue
import new
def WorkerPoolError( Exception ):
pass
class Task(threading.Thread):
def __init__(self, queue, result_queue):
threading.Thread.__init__(self)
self.queue = queue
self.result_queue = result_queue
self.running = True
def cancel(self):
self.running = False
self.queue.put(None)
def run(self):
while self.running:
call = self.queue.get()
if call:
try:
reslut = call()
self.result_queue.put(reslut)
except:
pass
self.queue.task_done()
class WorkerPool( object ):
def __init__( self, threadnum ):
self.threadnum = threadnum
self.q = Queue.Queue()
self.result_q = Queue.Queue()
self.ts = [ Task(self.q, self.result_q) for i in range(threadnum) ]
self._registfunctions = {}
self.is_in_join = False
for t in self.ts :
t.setDaemon(True)
t.start()
def __del__(self):
try:
# 调用两次的意义在于, 第一次将所有线程的running置成false, 在让他们发一次queue的信号
# 偷懒没有写成两个接口
for t in self.ts:
t.cancel()
for t in self.ts:
t.cancel()
except:
pass
def __call__( self, work ):
if not self.is_in_join:
self.q.put( work )
else:
raise WorkerPoolError, 'Pool has been joined'
def join( self ):
self.is_in_join = True
self.q.join()
self.is_in_join = False
return
def runwithpool( self, _old ):
def _new( *args, **kwargs ):
self.q.put( lambda : _old( *args, **kwargs ) )
return _new
def registtopool( self, _old ):
if _old.__name__ in self._registfunctions :
raise WorkerPoolError, 'function name exists'
self._registfunctions[_old.__name__] = _old
return _old
def get_all_result(self):
result_list = []
while True:
try:
result_list.append(self.result_q.get_nowait())
except Exception as e:
if 0 == self.result_q.qsize():
break
else:
continue
return result_list
def __getattr__( self, name ):
if name in self._registfunctions :
return self._registfunctions[name]
raise AttributeError, '%s not found' % name
if __name__ == '__main__' :
import thread
p = WorkerPool(5)
@p.runwithpool
def foo( a ):
print 'foo>', thread.get_ident(), '>', a
return
@p.registtopool
def bar( b ):
print 'bar>', thread.get_ident(), '>', b
for i in range(10):
foo(i)
p.bar(i+100)
p( lambda : bar(200) )
p.join()
|
bsd-3-clause
|
hyowon/servo
|
tests/wpt/web-platform-tests/html/infrastructure/urls/resolving-urls/query-encoding/resources/resource.py
|
113
|
6305
|
import os
import re
def main(request, response):
type = request.GET['type']
encoding = request.GET['encoding']
# We want the raw input for 'q'
q = re.search(r'q=([^&]+)', request.url_parts.query).groups()[0]
if type == 'html':
return [("Content-Type", "text/html; charset=utf-8")], q
elif type == 'css':
return [("Content-Type", "text/css; charset=utf-8")], "#test::before { content:'%s' }" % q
elif type == 'js':
return [("Content-Type", "text/javascript; charset=utf-8")], "%s = '%s';" % (request.GET['var'], q)
elif type == 'worker':
return [("Content-Type", "text/javascript")], "postMessage('%s'); close();" % q
elif type == 'sharedworker':
return [("Content-Type", "text/javascript")], "onconnect = function(e) { e.source.postMessage('%s'); close(); };" % q
elif type == 'worker_importScripts':
return ([("Content-Type", "text/javascript; charset=%s" % encoding)], # charset should be ignored for workers
"""try {
var x = 'importScripts failed to run';
importScripts('?q=\\u00E5&type=js&var=x&encoding=%s');
postMessage(x);
close();
} catch(ex) {
postMessage(String(ex));
}""" % encoding)
elif type == 'worker_worker':
return ([("Content-Type", "text/javascript; charset=%s" % encoding)], # charset should be ignored for workers
"""try {
var worker = new Worker('?q=\\u00E5&type=worker&encoding=%s');
worker.onmessage = function(e) {
postMessage(e.data);
close();
};
} catch(ex) {
postMessage(String(ex));
}""" % encoding)
elif type =='worker_sharedworker':
return ([("Content-Type", "text/javascript; charset=%s" % encoding)], # charset should be ignored for workers
"""try {
var worker = new SharedWorker('?q=\\u00E5&type=sharedworker&encoding=%s');
worker.port.onmessage = function(e) {
postMessage(e.data);
close();
};
} catch(ex) {
postMessage(String(ex));
}""" % encoding)
elif type == 'sharedworker_importScripts':
return ([("Content-Type", "text/javascript; charset=%s" % request.GET['encoding'])], # charset should be ignored for workers
"""onconnect = function(e) {
var connect_port = e.source;
try {
var x = 'importScripts failed to run';
importScripts('?q=\\u00E5&type=js&var=x&encoding=%s');
connect_port.postMessage(x);
close();
} catch(ex) {
connect_port.postMessage(String(ex));
}
};""" % encoding)
elif type == 'sharedworker_worker':
return ([("Content-Type", "text/javascript; charset=%s" % encoding)], # charset should be ignored for workers
"""onconnect = function(e) {
var connect_port = e.source;
try {
var worker = new Worker('?q=\\u00E5&type=worker&encoding=%s');
worker.onmessage = function(e) {
connect_port.postMessage(e.data);
close();
};
} catch(ex) {
connect_port.postMessage(String(ex));
}
};""" % encoding)
elif type == 'sharedworker_sharedworker':
return ([("Content-Type", "text/javascript; charset=%s" % encoding)], # charset should be ignored for workers
"""onconnect = function(e) {
var connect_port = e.source;
try {
onerror = function(msg) {
connect_port.postMessage(msg);
close();
return false;
};
var worker = new SharedWorker('?q=\\u00E5&type=sharedworker&encoding=%s');
worker.port.onmessage = function(e) {
connect_port.postMessage(e.data);
close();
};
} catch(ex) {
connect_port.postMessage(String(ex));
}
};""" % encoding)
elif type == 'eventstream':
return [("Content-Type", "text/event-stream")], "data: %s\n\n" % q
elif type == 'svg':
return [("Content-Type", "image/svg+xml")], "<svg xmlns='http://www.w3.org/2000/svg'>%s</svg>" % q
elif type == 'xmlstylesheet_css':
return ([("Content-Type", "application/xhtml+xml; charset=%s" % encoding)],
(u"""<?xml-stylesheet href="?q=å&type=css&encoding=%s"?><html xmlns="http://www.w3.org/1999/xhtml"/>""" % encoding)
.encode(encoding))
elif type == 'png':
if q == '%E5':
image = 'green-1x1.png'
elif q == '%C3%A5':
image = 'green-2x2.png'
elif q == '%3F':
image = 'green-16x16.png'
else:
image = 'green-256x256.png'
rv = open(os.path.join(request.doc_root, "images", image)).read()
return [("Content-Type", "image/png")], rv
elif type == 'video':
ext = request.GET['ext']
if q == '%E5':
video = 'A4' # duration: 3
elif q == '%C3%A5':
video = 'movie_5' # duration: 5
elif q == '%3F':
video = 'green-at-15' # duration: 30
else:
video = 'movie_300' # duration: 300
rv = open(os.path.join(request.doc_root, "media", "%s.%s" % (video, ext))).read()
if ext == 'ogv':
ext = 'ogg'
return [("Content-Type", "video/%s" % ext)], rv
elif type == 'webvtt':
return [("Content-Type", "text/vtt")], "WEBVTT\n\n00:00:00.000 --> 00:00:01.000\n%s" % q
|
mpl-2.0
|
deKupini/erp
|
addons/l10n_in_hr_payroll/report/payslip_report.py
|
8
|
3911
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields, osv
class payslip_report(osv.osv):
_name = "payslip.report"
_description = "Payslip Analysis"
_auto = False
_columns = {
'name':fields.char('Name', readonly=True),
'date_from': fields.date('Date From', readonly=True,),
'date_to': fields.date('Date To', readonly=True,),
'year': fields.char('Year', size=4, readonly=True),
'month': fields.selection([('01', 'January'), ('02', 'February'), ('03', 'March'), ('04', 'April'),
('05', 'May'), ('06', 'June'), ('07', 'July'), ('08', 'August'), ('09', 'September'),
('10', 'October'), ('11', 'November'), ('12', 'December')], 'Month', readonly=True),
'day': fields.char('Day', size=128, readonly=True),
'state': fields.selection([
('draft', 'Draft'),
('done', 'Done'),
('cancel', 'Rejected'),
], 'Status', readonly=True),
'employee_id': fields.many2one('hr.employee', 'Employee', readonly=True),
'nbr': fields.integer('# Payslip lines', readonly=True),
'number': fields.char('Number', readonly=True),
'struct_id': fields.many2one('hr.payroll.structure', 'Structure', readonly=True),
'company_id':fields.many2one('res.company', 'Company', readonly=True),
'paid': fields.boolean('Made Payment Order ? ', readonly=True),
'total': fields.float('Total', readonly=True),
'category_id':fields.many2one('hr.salary.rule.category', 'Category', readonly=True),
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'payslip_report')
cr.execute("""
create or replace view payslip_report as (
select
min(l.id) as id,
l.name,
p.struct_id,
p.state,
p.date_from,
p.date_to,
p.number,
p.company_id,
p.paid,
l.category_id,
l.employee_id,
sum(l.total) as total,
to_char(p.date_from, 'YYYY') as year,
to_char(p.date_from, 'MM') as month,
to_char(p.date_from, 'YYYY-MM-DD') as day,
to_char(p.date_to, 'YYYY') as to_year,
to_char(p.date_to, 'MM') as to_month,
to_char(p.date_to, 'YYYY-MM-DD') as to_day,
1 AS nbr
from
hr_payslip as p
left join hr_payslip_line as l on (p.id=l.slip_id)
where
l.employee_id IS NOT NULL
group by
p.number,l.name,p.date_from,p.date_to,p.state,p.company_id,p.paid,
l.employee_id,p.struct_id,l.category_id
)
""")
|
agpl-3.0
|
indie1982/osmc-fixes
|
package/mediacenter-skin-osmc/files/usr/share/kodi/addons/script.module.unidecode/lib/unidecode/x054.py
|
251
|
4583
|
data = (
'Mie ', # 0x00
'Xu ', # 0x01
'Mang ', # 0x02
'Chi ', # 0x03
'Ge ', # 0x04
'Xuan ', # 0x05
'Yao ', # 0x06
'Zi ', # 0x07
'He ', # 0x08
'Ji ', # 0x09
'Diao ', # 0x0a
'Cun ', # 0x0b
'Tong ', # 0x0c
'Ming ', # 0x0d
'Hou ', # 0x0e
'Li ', # 0x0f
'Tu ', # 0x10
'Xiang ', # 0x11
'Zha ', # 0x12
'Xia ', # 0x13
'Ye ', # 0x14
'Lu ', # 0x15
'A ', # 0x16
'Ma ', # 0x17
'Ou ', # 0x18
'Xue ', # 0x19
'Yi ', # 0x1a
'Jun ', # 0x1b
'Chou ', # 0x1c
'Lin ', # 0x1d
'Tun ', # 0x1e
'Yin ', # 0x1f
'Fei ', # 0x20
'Bi ', # 0x21
'Qin ', # 0x22
'Qin ', # 0x23
'Jie ', # 0x24
'Bu ', # 0x25
'Fou ', # 0x26
'Ba ', # 0x27
'Dun ', # 0x28
'Fen ', # 0x29
'E ', # 0x2a
'Han ', # 0x2b
'Ting ', # 0x2c
'Hang ', # 0x2d
'Shun ', # 0x2e
'Qi ', # 0x2f
'Hong ', # 0x30
'Zhi ', # 0x31
'Shen ', # 0x32
'Wu ', # 0x33
'Wu ', # 0x34
'Chao ', # 0x35
'Ne ', # 0x36
'Xue ', # 0x37
'Xi ', # 0x38
'Chui ', # 0x39
'Dou ', # 0x3a
'Wen ', # 0x3b
'Hou ', # 0x3c
'Ou ', # 0x3d
'Wu ', # 0x3e
'Gao ', # 0x3f
'Ya ', # 0x40
'Jun ', # 0x41
'Lu ', # 0x42
'E ', # 0x43
'Ge ', # 0x44
'Mei ', # 0x45
'Ai ', # 0x46
'Qi ', # 0x47
'Cheng ', # 0x48
'Wu ', # 0x49
'Gao ', # 0x4a
'Fu ', # 0x4b
'Jiao ', # 0x4c
'Hong ', # 0x4d
'Chi ', # 0x4e
'Sheng ', # 0x4f
'Ne ', # 0x50
'Tun ', # 0x51
'Fu ', # 0x52
'Yi ', # 0x53
'Dai ', # 0x54
'Ou ', # 0x55
'Li ', # 0x56
'Bai ', # 0x57
'Yuan ', # 0x58
'Kuai ', # 0x59
'[?] ', # 0x5a
'Qiang ', # 0x5b
'Wu ', # 0x5c
'E ', # 0x5d
'Shi ', # 0x5e
'Quan ', # 0x5f
'Pen ', # 0x60
'Wen ', # 0x61
'Ni ', # 0x62
'M ', # 0x63
'Ling ', # 0x64
'Ran ', # 0x65
'You ', # 0x66
'Di ', # 0x67
'Zhou ', # 0x68
'Shi ', # 0x69
'Zhou ', # 0x6a
'Tie ', # 0x6b
'Xi ', # 0x6c
'Yi ', # 0x6d
'Qi ', # 0x6e
'Ping ', # 0x6f
'Zi ', # 0x70
'Gu ', # 0x71
'Zi ', # 0x72
'Wei ', # 0x73
'Xu ', # 0x74
'He ', # 0x75
'Nao ', # 0x76
'Xia ', # 0x77
'Pei ', # 0x78
'Yi ', # 0x79
'Xiao ', # 0x7a
'Shen ', # 0x7b
'Hu ', # 0x7c
'Ming ', # 0x7d
'Da ', # 0x7e
'Qu ', # 0x7f
'Ju ', # 0x80
'Gem ', # 0x81
'Za ', # 0x82
'Tuo ', # 0x83
'Duo ', # 0x84
'Pou ', # 0x85
'Pao ', # 0x86
'Bi ', # 0x87
'Fu ', # 0x88
'Yang ', # 0x89
'He ', # 0x8a
'Zha ', # 0x8b
'He ', # 0x8c
'Hai ', # 0x8d
'Jiu ', # 0x8e
'Yong ', # 0x8f
'Fu ', # 0x90
'Que ', # 0x91
'Zhou ', # 0x92
'Wa ', # 0x93
'Ka ', # 0x94
'Gu ', # 0x95
'Ka ', # 0x96
'Zuo ', # 0x97
'Bu ', # 0x98
'Long ', # 0x99
'Dong ', # 0x9a
'Ning ', # 0x9b
'Tha ', # 0x9c
'Si ', # 0x9d
'Xian ', # 0x9e
'Huo ', # 0x9f
'Qi ', # 0xa0
'Er ', # 0xa1
'E ', # 0xa2
'Guang ', # 0xa3
'Zha ', # 0xa4
'Xi ', # 0xa5
'Yi ', # 0xa6
'Lie ', # 0xa7
'Zi ', # 0xa8
'Mie ', # 0xa9
'Mi ', # 0xaa
'Zhi ', # 0xab
'Yao ', # 0xac
'Ji ', # 0xad
'Zhou ', # 0xae
'Ge ', # 0xaf
'Shuai ', # 0xb0
'Zan ', # 0xb1
'Xiao ', # 0xb2
'Ke ', # 0xb3
'Hui ', # 0xb4
'Kua ', # 0xb5
'Huai ', # 0xb6
'Tao ', # 0xb7
'Xian ', # 0xb8
'E ', # 0xb9
'Xuan ', # 0xba
'Xiu ', # 0xbb
'Wai ', # 0xbc
'Yan ', # 0xbd
'Lao ', # 0xbe
'Yi ', # 0xbf
'Ai ', # 0xc0
'Pin ', # 0xc1
'Shen ', # 0xc2
'Tong ', # 0xc3
'Hong ', # 0xc4
'Xiong ', # 0xc5
'Chi ', # 0xc6
'Wa ', # 0xc7
'Ha ', # 0xc8
'Zai ', # 0xc9
'Yu ', # 0xca
'Di ', # 0xcb
'Pai ', # 0xcc
'Xiang ', # 0xcd
'Ai ', # 0xce
'Hen ', # 0xcf
'Kuang ', # 0xd0
'Ya ', # 0xd1
'Da ', # 0xd2
'Xiao ', # 0xd3
'Bi ', # 0xd4
'Yue ', # 0xd5
'[?] ', # 0xd6
'Hua ', # 0xd7
'Sasou ', # 0xd8
'Kuai ', # 0xd9
'Duo ', # 0xda
'[?] ', # 0xdb
'Ji ', # 0xdc
'Nong ', # 0xdd
'Mou ', # 0xde
'Yo ', # 0xdf
'Hao ', # 0xe0
'Yuan ', # 0xe1
'Long ', # 0xe2
'Pou ', # 0xe3
'Mang ', # 0xe4
'Ge ', # 0xe5
'E ', # 0xe6
'Chi ', # 0xe7
'Shao ', # 0xe8
'Li ', # 0xe9
'Na ', # 0xea
'Zu ', # 0xeb
'He ', # 0xec
'Ku ', # 0xed
'Xiao ', # 0xee
'Xian ', # 0xef
'Lao ', # 0xf0
'Bo ', # 0xf1
'Zhe ', # 0xf2
'Zha ', # 0xf3
'Liang ', # 0xf4
'Ba ', # 0xf5
'Mie ', # 0xf6
'Le ', # 0xf7
'Sui ', # 0xf8
'Fou ', # 0xf9
'Bu ', # 0xfa
'Han ', # 0xfb
'Heng ', # 0xfc
'Geng ', # 0xfd
'Shuo ', # 0xfe
'Ge ', # 0xff
)
|
gpl-2.0
|
dgrat/ardupilot
|
Tools/LogAnalyzer/LogAnalyzer.py
|
63
|
12567
|
#!/usr/bin/env python
#
# A module to analyze and identify any common problems which can be determined from log files
#
# Initial code by Andrew Chapman (amchapman@gmail.com), 16th Jan 2014
#
# some logging oddities noticed while doing this, to be followed up on:
# - tradheli MOT labels Mot1,Mot2,Mot3,Mot4,GGain
# - Pixhawk doesn't output one of the FMT labels... forget which one
# - MAG offsets seem to be constant (only seen data on Pixhawk)
# - MAG offsets seem to be cast to int before being output? (param is -84.67, logged as -84)
# - copter+plane use 'V' in their vehicle type/version/build line, rover uses lower case 'v'. Copter+Rover give a build number, plane does not
# - CTUN.ThrOut on copter is 0-1000, on plane+rover it is 0-100
# TODO: add test for noisy baro values
# TODO: support loading binary log files (use Tridge's mavlogdump?)
from __future__ import print_function
import DataflashLog
import pprint # temp
import imp
import glob
import inspect
import os, sys
import argparse
import datetime
import time
from xml.sax.saxutils import escape
from VehicleType import VehicleType
class TestResult(object):
'''all tests return a standardized result type'''
class StatusType:
# NA means not applicable for this log (e.g. copter tests against a plane log), UNKNOWN means it is missing data required for the test
GOOD, FAIL, WARN, UNKNOWN, NA = range(5)
status = None
statusMessage = "" # can be multi-line
class Test(object):
'''base class to be inherited by log tests. Each test should be quite granular so we have lots of small tests with clear results'''
def __init__(self):
self.name = ""
self.result = None # will be an instance of TestResult after being run
self.execTime = None
self.enable = True
def run(self, logdata, verbose=False):
pass
class TestSuite(object):
'''registers test classes, loading using a basic plugin architecture, and can run them all in one run() operation'''
def __init__(self):
self.tests = []
self.logfile = None
self.logdata = None
# dynamically load in Test subclasses from the 'tests' folder
# to prevent one being loaded, move it out of that folder, or set that test's .enable attribute to False
dirName = os.path.dirname(os.path.abspath(__file__))
testScripts = glob.glob(dirName + '/tests/*.py')
testClasses = []
for script in testScripts:
m = imp.load_source("m",script)
for name, obj in inspect.getmembers(m, inspect.isclass):
if name not in testClasses and inspect.getsourcefile(obj) == script:
testClasses.append(name)
self.tests.append(obj())
# and here's an example of explicitly loading a Test class if you wanted to do that
# m = imp.load_source("m", dirName + '/tests/TestBadParams.py')
# self.tests.append(m.TestBadParams())
def run(self, logdata, verbose):
'''run all registered tests in a single call, gathering execution timing info'''
self.logdata = logdata
if 'GPS' not in self.logdata.channels and 'GPS2' in self.logdata.channels:
# *cough*
self.logdata.channels['GPS'] = self.logdata.channels['GPS2']
self.logfile = logdata.filename
for test in self.tests:
# run each test in turn, gathering timing info
if test.enable:
startTime = time.time()
test.run(self.logdata, verbose) # RUN THE TEST
endTime = time.time()
test.execTime = 1000 * (endTime-startTime)
def outputPlainText(self, outputStats):
'''output test results in plain text'''
print('Dataflash log analysis report for file: ' + self.logfile)
print('Log size: %.2fmb (%d lines)' % (self.logdata.filesizeKB / 1024.0, self.logdata.lineCount))
print('Log duration: %s' % str(datetime.timedelta(seconds=self.logdata.durationSecs)) + '\n')
if self.logdata.vehicleType == VehicleType.Copter and self.logdata.getCopterType():
print('Vehicle Type: %s (%s)' % (self.logdata.vehicleTypeString, self.logdata.getCopterType()))
else:
print('Vehicle Type: %s' % self.logdata.vehicleTypeString)
print('Firmware Version: %s (%s)' % (self.logdata.firmwareVersion, self.logdata.firmwareHash))
print('Hardware: %s' % self.logdata.hardwareType)
print('Free RAM: %s' % self.logdata.freeRAM)
if self.logdata.skippedLines:
print("\nWARNING: %d malformed log lines skipped during read" % self.logdata.skippedLines)
print('\n')
print("Test Results:")
for test in self.tests:
if not test.enable:
continue
statusMessageFirstLine = test.result.statusMessage.strip('\n\r').split('\n')[0]
statusMessageExtra = test.result.statusMessage.strip('\n\r').split('\n')[1:]
execTime = ""
if outputStats:
execTime = " (%6.2fms)" % (test.execTime)
if test.result.status == TestResult.StatusType.GOOD:
print(" %20s: GOOD %-55s%s" % (test.name, statusMessageFirstLine, execTime))
elif test.result.status == TestResult.StatusType.FAIL:
print(" %20s: FAIL %-55s%s [GRAPH]" % (test.name, statusMessageFirstLine, execTime))
elif test.result.status == TestResult.StatusType.WARN:
print(" %20s: WARN %-55s%s [GRAPH]" % (test.name, statusMessageFirstLine, execTime))
elif test.result.status == TestResult.StatusType.NA:
# skip any that aren't relevant for this vehicle/hardware/etc
continue
else:
print(" %20s: UNKNOWN %-55s%s" % (test.name, statusMessageFirstLine, execTime))
#if statusMessageExtra:
for line in statusMessageExtra:
print(" %29s %s" % ("",line))
print('\n')
print('The Log Analyzer is currently BETA code.\nFor any support or feedback on the log analyzer please email Andrew Chapman (amchapman@gmail.com)')
print('\n')
def outputXML(self, xmlFile):
'''output test results to an XML file'''
# open the file for writing
xml = None
try:
if xmlFile == '-':
xml = sys.stdout
else:
xml = open(xmlFile, 'w')
except:
sys.stderr.write("Error opening output xml file: %s" % xmlFile)
sys.exit(1)
# output header info
xml.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n")
xml.write("<loganalysis>\n")
xml.write("<header>\n")
xml.write(" <logfile>" + escape(self.logfile) + "</logfile>\n")
xml.write(" <sizekb>" + escape(repr(self.logdata.filesizeKB)) + "</sizekb>\n")
xml.write(" <sizelines>" + escape(repr(self.logdata.lineCount)) + "</sizelines>\n")
xml.write(" <duration>" + escape(str(datetime.timedelta(seconds=self.logdata.durationSecs))) + "</duration>\n")
xml.write(" <vehicletype>" + escape(self.logdata.vehicleTypeString) + "</vehicletype>\n")
if self.logdata.vehicleType == VehicleType.Copter and self.logdata.getCopterType():
xml.write(" <coptertype>" + escape(self.logdata.getCopterType()) + "</coptertype>\n")
xml.write(" <firmwareversion>" + escape(self.logdata.firmwareVersion) + "</firmwareversion>\n")
xml.write(" <firmwarehash>" + escape(self.logdata.firmwareHash) + "</firmwarehash>\n")
xml.write(" <hardwaretype>" + escape(self.logdata.hardwareType) + "</hardwaretype>\n")
xml.write(" <freemem>" + escape(repr(self.logdata.freeRAM)) + "</freemem>\n")
xml.write(" <skippedlines>" + escape(repr(self.logdata.skippedLines)) + "</skippedlines>\n")
xml.write("</header>\n")
# output parameters
xml.write("<params>\n")
for param, value in self.logdata.parameters.items():
xml.write(" <param name=\"%s\" value=\"%s\" />\n" % (param,escape(repr(value))))
xml.write("</params>\n")
# output test results
xml.write("<results>\n")
for test in self.tests:
if not test.enable:
continue
xml.write(" <result>\n")
if test.result.status == TestResult.StatusType.GOOD:
xml.write(" <name>" + escape(test.name) + "</name>\n")
xml.write(" <status>GOOD</status>\n")
xml.write(" <message>" + escape(test.result.statusMessage) + "</message>\n")
elif test.result.status == TestResult.StatusType.FAIL:
xml.write(" <name>" + escape(test.name) + "</name>\n")
xml.write(" <status>FAIL</status>\n")
xml.write(" <message>" + escape(test.result.statusMessage) + "</message>\n")
xml.write(" <data>(test data will be embedded here at some point)</data>\n")
elif test.result.status == TestResult.StatusType.WARN:
xml.write(" <name>" + escape(test.name) + "</name>\n")
xml.write(" <status>WARN</status>\n")
xml.write(" <message>" + escape(test.result.statusMessage) + "</message>\n")
xml.write(" <data>(test data will be embedded here at some point)</data>\n")
elif test.result.status == TestResult.StatusType.NA:
xml.write(" <name>" + escape(test.name) + "</name>\n")
xml.write(" <status>NA</status>\n")
else:
xml.write(" <name>" + escape(test.name) + "</name>\n")
xml.write(" <status>UNKNOWN</status>\n")
xml.write(" <message>" + escape(test.result.statusMessage) + "</message>\n")
xml.write(" </result>\n")
xml.write("</results>\n")
xml.write("</loganalysis>\n")
xml.close()
def main():
dirName = os.path.dirname(os.path.abspath(__file__))
# deal with command line arguments
parser = argparse.ArgumentParser(description='Analyze an APM Dataflash log for known issues')
parser.add_argument('logfile', type=argparse.FileType('r'), help='path to Dataflash log file (or - for stdin)')
parser.add_argument('-f', '--format', metavar='', type=str, action='store', choices=['bin','log','auto'], default='auto', help='log file format: \'bin\',\'log\' or \'auto\'')
parser.add_argument('-q', '--quiet', metavar='', action='store_const', const=True, help='quiet mode, do not print results')
parser.add_argument('-p', '--profile', metavar='', action='store_const', const=True, help='output performance profiling data')
parser.add_argument('-s', '--skip_bad', metavar='', action='store_const', const=True, help='skip over corrupt dataflash lines')
parser.add_argument('-e', '--empty', metavar='', action='store_const', const=True, help='run an initial check for an empty log')
parser.add_argument('-x', '--xml', type=str, metavar='XML file', nargs='?', const='', default='', help='write output to specified XML file (or - for stdout)')
parser.add_argument('-v', '--verbose', metavar='', action='store_const', const=True, help='verbose output')
args = parser.parse_args()
# load the log
startTime = time.time()
logdata = DataflashLog.DataflashLog(args.logfile.name, format=args.format, ignoreBadlines=args.skip_bad) # read log
endTime = time.time()
if args.profile:
print("Log file read time: %.2f seconds" % (endTime-startTime))
# check for empty log if requested
if args.empty:
emptyErr = DataflashLog.DataflashLogHelper.isLogEmpty(logdata)
if emptyErr:
sys.stderr.write("Empty log file: %s, %s" % (logdata.filename, emptyErr))
sys.exit(1)
#run the tests, and gather timings
testSuite = TestSuite()
startTime = time.time()
testSuite.run(logdata, args.verbose) # run tests
endTime = time.time()
if args.profile:
print("Test suite run time: %.2f seconds" % (endTime-startTime))
# deal with output
if not args.quiet:
testSuite.outputPlainText(args.profile)
if args.xml:
testSuite.outputXML(args.xml)
if not args.quiet:
print("XML output written to file: %s\n" % args.xml)
if __name__ == "__main__":
main()
|
gpl-3.0
|
fbossy/SickRage
|
tornado/test/twisted_test.py
|
18
|
25239
|
# Author: Ovidiu Predescu
# Date: July 2011
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unittest for the twisted-style reactor.
"""
from __future__ import absolute_import, division, print_function, with_statement
import logging
import os
import shutil
import signal
import sys
import tempfile
import threading
import warnings
try:
import fcntl
from twisted.internet.defer import Deferred, inlineCallbacks, returnValue
from twisted.internet.interfaces import IReadDescriptor, IWriteDescriptor
from twisted.internet.protocol import Protocol
from twisted.python import log
from tornado.platform.twisted import TornadoReactor, TwistedIOLoop
from zope.interface import implementer
have_twisted = True
except ImportError:
have_twisted = False
# The core of Twisted 12.3.0 is available on python 3, but twisted.web is not
# so test for it separately.
try:
from twisted.web.client import Agent, readBody
from twisted.web.resource import Resource
from twisted.web.server import Site
# As of Twisted 15.0.0, twisted.web is present but fails our
# tests due to internal str/bytes errors.
have_twisted_web = sys.version_info < (3,)
except ImportError:
have_twisted_web = False
try:
import thread # py2
except ImportError:
import _thread as thread # py3
from tornado.escape import utf8
from tornado import gen
from tornado.httpclient import AsyncHTTPClient
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.platform.auto import set_close_exec
from tornado.platform.select import SelectIOLoop
from tornado.testing import bind_unused_port
from tornado.test.util import unittest
from tornado.util import import_object
from tornado.web import RequestHandler, Application
skipIfNoTwisted = unittest.skipUnless(have_twisted,
"twisted module not present")
skipIfNoSingleDispatch = unittest.skipIf(
gen.singledispatch is None, "singledispatch module not present")
def save_signal_handlers():
saved = {}
for sig in [signal.SIGINT, signal.SIGTERM, signal.SIGCHLD]:
saved[sig] = signal.getsignal(sig)
if "twisted" in repr(saved):
if not issubclass(IOLoop.configured_class(), TwistedIOLoop):
# when the global ioloop is twisted, we expect the signal
# handlers to be installed. Otherwise, it means we're not
# cleaning up after twisted properly.
raise Exception("twisted signal handlers already installed")
return saved
def restore_signal_handlers(saved):
for sig, handler in saved.items():
signal.signal(sig, handler)
class ReactorTestCase(unittest.TestCase):
def setUp(self):
self._saved_signals = save_signal_handlers()
self._io_loop = IOLoop()
self._reactor = TornadoReactor(self._io_loop)
def tearDown(self):
self._io_loop.close(all_fds=True)
restore_signal_handlers(self._saved_signals)
@skipIfNoTwisted
class ReactorWhenRunningTest(ReactorTestCase):
def test_whenRunning(self):
self._whenRunningCalled = False
self._anotherWhenRunningCalled = False
self._reactor.callWhenRunning(self.whenRunningCallback)
self._reactor.run()
self.assertTrue(self._whenRunningCalled)
self.assertTrue(self._anotherWhenRunningCalled)
def whenRunningCallback(self):
self._whenRunningCalled = True
self._reactor.callWhenRunning(self.anotherWhenRunningCallback)
self._reactor.stop()
def anotherWhenRunningCallback(self):
self._anotherWhenRunningCalled = True
@skipIfNoTwisted
class ReactorCallLaterTest(ReactorTestCase):
def test_callLater(self):
self._laterCalled = False
self._now = self._reactor.seconds()
self._timeout = 0.001
dc = self._reactor.callLater(self._timeout, self.callLaterCallback)
self.assertEqual(self._reactor.getDelayedCalls(), [dc])
self._reactor.run()
self.assertTrue(self._laterCalled)
self.assertTrue(self._called - self._now > self._timeout)
self.assertEqual(self._reactor.getDelayedCalls(), [])
def callLaterCallback(self):
self._laterCalled = True
self._called = self._reactor.seconds()
self._reactor.stop()
@skipIfNoTwisted
class ReactorTwoCallLaterTest(ReactorTestCase):
def test_callLater(self):
self._later1Called = False
self._later2Called = False
self._now = self._reactor.seconds()
self._timeout1 = 0.0005
dc1 = self._reactor.callLater(self._timeout1, self.callLaterCallback1)
self._timeout2 = 0.001
dc2 = self._reactor.callLater(self._timeout2, self.callLaterCallback2)
self.assertTrue(self._reactor.getDelayedCalls() == [dc1, dc2] or
self._reactor.getDelayedCalls() == [dc2, dc1])
self._reactor.run()
self.assertTrue(self._later1Called)
self.assertTrue(self._later2Called)
self.assertTrue(self._called1 - self._now > self._timeout1)
self.assertTrue(self._called2 - self._now > self._timeout2)
self.assertEqual(self._reactor.getDelayedCalls(), [])
def callLaterCallback1(self):
self._later1Called = True
self._called1 = self._reactor.seconds()
def callLaterCallback2(self):
self._later2Called = True
self._called2 = self._reactor.seconds()
self._reactor.stop()
@skipIfNoTwisted
class ReactorCallFromThreadTest(ReactorTestCase):
def setUp(self):
super(ReactorCallFromThreadTest, self).setUp()
self._mainThread = thread.get_ident()
def tearDown(self):
self._thread.join()
super(ReactorCallFromThreadTest, self).tearDown()
def _newThreadRun(self):
self.assertNotEqual(self._mainThread, thread.get_ident())
if hasattr(self._thread, 'ident'): # new in python 2.6
self.assertEqual(self._thread.ident, thread.get_ident())
self._reactor.callFromThread(self._fnCalledFromThread)
def _fnCalledFromThread(self):
self.assertEqual(self._mainThread, thread.get_ident())
self._reactor.stop()
def _whenRunningCallback(self):
self._thread = threading.Thread(target=self._newThreadRun)
self._thread.start()
def testCallFromThread(self):
self._reactor.callWhenRunning(self._whenRunningCallback)
self._reactor.run()
@skipIfNoTwisted
class ReactorCallInThread(ReactorTestCase):
def setUp(self):
super(ReactorCallInThread, self).setUp()
self._mainThread = thread.get_ident()
def _fnCalledInThread(self, *args, **kwargs):
self.assertNotEqual(thread.get_ident(), self._mainThread)
self._reactor.callFromThread(lambda: self._reactor.stop())
def _whenRunningCallback(self):
self._reactor.callInThread(self._fnCalledInThread)
def testCallInThread(self):
self._reactor.callWhenRunning(self._whenRunningCallback)
self._reactor.run()
class Reader(object):
def __init__(self, fd, callback):
self._fd = fd
self._callback = callback
def logPrefix(self):
return "Reader"
def close(self):
self._fd.close()
def fileno(self):
return self._fd.fileno()
def readConnectionLost(self, reason):
self.close()
def connectionLost(self, reason):
self.close()
def doRead(self):
self._callback(self._fd)
if have_twisted:
Reader = implementer(IReadDescriptor)(Reader)
class Writer(object):
def __init__(self, fd, callback):
self._fd = fd
self._callback = callback
def logPrefix(self):
return "Writer"
def close(self):
self._fd.close()
def fileno(self):
return self._fd.fileno()
def connectionLost(self, reason):
self.close()
def doWrite(self):
self._callback(self._fd)
if have_twisted:
Writer = implementer(IWriteDescriptor)(Writer)
@skipIfNoTwisted
class ReactorReaderWriterTest(ReactorTestCase):
def _set_nonblocking(self, fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
def setUp(self):
super(ReactorReaderWriterTest, self).setUp()
r, w = os.pipe()
self._set_nonblocking(r)
self._set_nonblocking(w)
set_close_exec(r)
set_close_exec(w)
self._p1 = os.fdopen(r, "rb", 0)
self._p2 = os.fdopen(w, "wb", 0)
def tearDown(self):
super(ReactorReaderWriterTest, self).tearDown()
self._p1.close()
self._p2.close()
def _testReadWrite(self):
"""
In this test the writer writes an 'x' to its fd. The reader
reads it, check the value and ends the test.
"""
self.shouldWrite = True
def checkReadInput(fd):
self.assertEquals(fd.read(1), b'x')
self._reactor.stop()
def writeOnce(fd):
if self.shouldWrite:
self.shouldWrite = False
fd.write(b'x')
self._reader = Reader(self._p1, checkReadInput)
self._writer = Writer(self._p2, writeOnce)
self._reactor.addWriter(self._writer)
# Test that adding the reader twice adds it only once to
# IOLoop.
self._reactor.addReader(self._reader)
self._reactor.addReader(self._reader)
def testReadWrite(self):
self._reactor.callWhenRunning(self._testReadWrite)
self._reactor.run()
def _testNoWriter(self):
"""
In this test we have no writer. Make sure the reader doesn't
read anything.
"""
def checkReadInput(fd):
self.fail("Must not be called.")
def stopTest():
# Close the writer here since the IOLoop doesn't know
# about it.
self._writer.close()
self._reactor.stop()
self._reader = Reader(self._p1, checkReadInput)
# We create a writer, but it should never be invoked.
self._writer = Writer(self._p2, lambda fd: fd.write('x'))
# Test that adding and removing the writer leaves us with no writer.
self._reactor.addWriter(self._writer)
self._reactor.removeWriter(self._writer)
# Test that adding and removing the reader doesn't cause
# unintended effects.
self._reactor.addReader(self._reader)
# Wake up after a moment and stop the test
self._reactor.callLater(0.001, stopTest)
def testNoWriter(self):
self._reactor.callWhenRunning(self._testNoWriter)
self._reactor.run()
# Test various combinations of twisted and tornado http servers,
# http clients, and event loop interfaces.
@skipIfNoTwisted
@unittest.skipIf(not have_twisted_web, 'twisted web not present')
class CompatibilityTests(unittest.TestCase):
def setUp(self):
self.saved_signals = save_signal_handlers()
self.io_loop = IOLoop()
self.io_loop.make_current()
self.reactor = TornadoReactor(self.io_loop)
def tearDown(self):
self.reactor.disconnectAll()
self.io_loop.clear_current()
self.io_loop.close(all_fds=True)
restore_signal_handlers(self.saved_signals)
def start_twisted_server(self):
class HelloResource(Resource):
isLeaf = True
def render_GET(self, request):
return "Hello from twisted!"
site = Site(HelloResource())
port = self.reactor.listenTCP(0, site, interface='127.0.0.1')
self.twisted_port = port.getHost().port
def start_tornado_server(self):
class HelloHandler(RequestHandler):
def get(self):
self.write("Hello from tornado!")
app = Application([('/', HelloHandler)],
log_function=lambda x: None)
server = HTTPServer(app, io_loop=self.io_loop)
sock, self.tornado_port = bind_unused_port()
server.add_sockets([sock])
def run_ioloop(self):
self.stop_loop = self.io_loop.stop
self.io_loop.start()
self.reactor.fireSystemEvent('shutdown')
def run_reactor(self):
self.stop_loop = self.reactor.stop
self.stop = self.reactor.stop
self.reactor.run()
def tornado_fetch(self, url, runner):
responses = []
client = AsyncHTTPClient(self.io_loop)
def callback(response):
responses.append(response)
self.stop_loop()
client.fetch(url, callback=callback)
runner()
self.assertEqual(len(responses), 1)
responses[0].rethrow()
return responses[0]
def twisted_fetch(self, url, runner):
# http://twistedmatrix.com/documents/current/web/howto/client.html
chunks = []
client = Agent(self.reactor)
d = client.request(b'GET', utf8(url))
class Accumulator(Protocol):
def __init__(self, finished):
self.finished = finished
def dataReceived(self, data):
chunks.append(data)
def connectionLost(self, reason):
self.finished.callback(None)
def callback(response):
finished = Deferred()
response.deliverBody(Accumulator(finished))
return finished
d.addCallback(callback)
def shutdown(failure):
if hasattr(self, 'stop_loop'):
self.stop_loop()
elif failure is not None:
# loop hasn't been initialized yet; try our best to
# get an error message out. (the runner() interaction
# should probably be refactored).
try:
failure.raiseException()
except:
logging.error('exception before starting loop', exc_info=True)
d.addBoth(shutdown)
runner()
self.assertTrue(chunks)
return ''.join(chunks)
def twisted_coroutine_fetch(self, url, runner):
body = [None]
@gen.coroutine
def f():
# This is simpler than the non-coroutine version, but it cheats
# by reading the body in one blob instead of streaming it with
# a Protocol.
client = Agent(self.reactor)
response = yield client.request(b'GET', utf8(url))
with warnings.catch_warnings():
# readBody has a buggy DeprecationWarning in Twisted 15.0:
# https://twistedmatrix.com/trac/changeset/43379
warnings.simplefilter('ignore', category=DeprecationWarning)
body[0] = yield readBody(response)
self.stop_loop()
self.io_loop.add_callback(f)
runner()
return body[0]
def testTwistedServerTornadoClientIOLoop(self):
self.start_twisted_server()
response = self.tornado_fetch(
'http://127.0.0.1:%d' % self.twisted_port, self.run_ioloop)
self.assertEqual(response.body, 'Hello from twisted!')
def testTwistedServerTornadoClientReactor(self):
self.start_twisted_server()
response = self.tornado_fetch(
'http://127.0.0.1:%d' % self.twisted_port, self.run_reactor)
self.assertEqual(response.body, 'Hello from twisted!')
def testTornadoServerTwistedClientIOLoop(self):
self.start_tornado_server()
response = self.twisted_fetch(
'http://127.0.0.1:%d' % self.tornado_port, self.run_ioloop)
self.assertEqual(response, 'Hello from tornado!')
def testTornadoServerTwistedClientReactor(self):
self.start_tornado_server()
response = self.twisted_fetch(
'http://127.0.0.1:%d' % self.tornado_port, self.run_reactor)
self.assertEqual(response, 'Hello from tornado!')
@skipIfNoSingleDispatch
def testTornadoServerTwistedCoroutineClientIOLoop(self):
self.start_tornado_server()
response = self.twisted_coroutine_fetch(
'http://127.0.0.1:%d' % self.tornado_port, self.run_ioloop)
self.assertEqual(response, 'Hello from tornado!')
@skipIfNoTwisted
@skipIfNoSingleDispatch
class ConvertDeferredTest(unittest.TestCase):
def test_success(self):
@inlineCallbacks
def fn():
if False:
# inlineCallbacks doesn't work with regular functions;
# must have a yield even if it's unreachable.
yield
returnValue(42)
f = gen.convert_yielded(fn())
self.assertEqual(f.result(), 42)
def test_failure(self):
@inlineCallbacks
def fn():
if False:
yield
1 / 0
f = gen.convert_yielded(fn())
with self.assertRaises(ZeroDivisionError):
f.result()
if have_twisted:
# Import and run as much of twisted's test suite as possible.
# This is unfortunately rather dependent on implementation details,
# but there doesn't appear to be a clean all-in-one conformance test
# suite for reactors.
#
# This is a list of all test suites using the ReactorBuilder
# available in Twisted 11.0.0 and 11.1.0 (and a blacklist of
# specific test methods to be disabled).
twisted_tests = {
'twisted.internet.test.test_core.ObjectModelIntegrationTest': [],
'twisted.internet.test.test_core.SystemEventTestsBuilder': [
'test_iterate', # deliberately not supported
# Fails on TwistedIOLoop and AsyncIOLoop.
'test_runAfterCrash',
],
'twisted.internet.test.test_fdset.ReactorFDSetTestsBuilder': [
"test_lostFileDescriptor", # incompatible with epoll and kqueue
],
'twisted.internet.test.test_process.ProcessTestsBuilder': [
# Only work as root. Twisted's "skip" functionality works
# with py27+, but not unittest2 on py26.
'test_changeGID',
'test_changeUID',
],
# Process tests appear to work on OSX 10.7, but not 10.6
#'twisted.internet.test.test_process.PTYProcessTestsBuilder': [
# 'test_systemCallUninterruptedByChildExit',
# ],
'twisted.internet.test.test_tcp.TCPClientTestsBuilder': [
'test_badContext', # ssl-related; see also SSLClientTestsMixin
],
'twisted.internet.test.test_tcp.TCPPortTestsBuilder': [
# These use link-local addresses and cause firewall prompts on mac
'test_buildProtocolIPv6AddressScopeID',
'test_portGetHostOnIPv6ScopeID',
'test_serverGetHostOnIPv6ScopeID',
'test_serverGetPeerOnIPv6ScopeID',
],
'twisted.internet.test.test_tcp.TCPConnectionTestsBuilder': [],
'twisted.internet.test.test_tcp.WriteSequenceTests': [],
'twisted.internet.test.test_tcp.AbortConnectionTestCase': [],
'twisted.internet.test.test_threads.ThreadTestsBuilder': [],
'twisted.internet.test.test_time.TimeTestsBuilder': [],
# Extra third-party dependencies (pyOpenSSL)
#'twisted.internet.test.test_tls.SSLClientTestsMixin': [],
'twisted.internet.test.test_udp.UDPServerTestsBuilder': [],
'twisted.internet.test.test_unix.UNIXTestsBuilder': [
# Platform-specific. These tests would be skipped automatically
# if we were running twisted's own test runner.
'test_connectToLinuxAbstractNamespace',
'test_listenOnLinuxAbstractNamespace',
# These tests use twisted's sendmsg.c extension and sometimes
# fail with what looks like uninitialized memory errors
# (more common on pypy than cpython, but I've seen it on both)
'test_sendFileDescriptor',
'test_sendFileDescriptorTriggersPauseProducing',
'test_descriptorDeliveredBeforeBytes',
'test_avoidLeakingFileDescriptors',
],
'twisted.internet.test.test_unix.UNIXDatagramTestsBuilder': [
'test_listenOnLinuxAbstractNamespace',
],
'twisted.internet.test.test_unix.UNIXPortTestsBuilder': [],
}
for test_name, blacklist in twisted_tests.items():
try:
test_class = import_object(test_name)
except (ImportError, AttributeError):
continue
for test_func in blacklist:
if hasattr(test_class, test_func):
# The test_func may be defined in a mixin, so clobber
# it instead of delattr()
setattr(test_class, test_func, lambda self: None)
def make_test_subclass(test_class):
class TornadoTest(test_class):
_reactors = ["tornado.platform.twisted._TestReactor"]
def setUp(self):
# Twisted's tests expect to be run from a temporary
# directory; they create files in their working directory
# and don't always clean up after themselves.
self.__curdir = os.getcwd()
self.__tempdir = tempfile.mkdtemp()
os.chdir(self.__tempdir)
super(TornadoTest, self).setUp()
def tearDown(self):
super(TornadoTest, self).tearDown()
os.chdir(self.__curdir)
shutil.rmtree(self.__tempdir)
def buildReactor(self):
self.__saved_signals = save_signal_handlers()
return test_class.buildReactor(self)
def unbuildReactor(self, reactor):
test_class.unbuildReactor(self, reactor)
# Clean up file descriptors (especially epoll/kqueue
# objects) eagerly instead of leaving them for the
# GC. Unfortunately we can't do this in reactor.stop
# since twisted expects to be able to unregister
# connections in a post-shutdown hook.
reactor._io_loop.close(all_fds=True)
restore_signal_handlers(self.__saved_signals)
TornadoTest.__name__ = test_class.__name__
return TornadoTest
test_subclass = make_test_subclass(test_class)
globals().update(test_subclass.makeTestCaseClasses())
# Since we're not using twisted's test runner, it's tricky to get
# logging set up well. Most of the time it's easiest to just
# leave it turned off, but while working on these tests you may want
# to uncomment one of the other lines instead.
log.defaultObserver.stop()
# import sys; log.startLogging(sys.stderr, setStdout=0)
# log.startLoggingWithObserver(log.PythonLoggingObserver().emit, setStdout=0)
# import logging; logging.getLogger('twisted').setLevel(logging.WARNING)
if have_twisted:
class LayeredTwistedIOLoop(TwistedIOLoop):
"""Layers a TwistedIOLoop on top of a TornadoReactor on a SelectIOLoop.
This is of course silly, but is useful for testing purposes to make
sure we're implementing both sides of the various interfaces
correctly. In some tests another TornadoReactor is layered on top
of the whole stack.
"""
def initialize(self):
# When configured to use LayeredTwistedIOLoop we can't easily
# get the next-best IOLoop implementation, so use the lowest common
# denominator.
self.real_io_loop = SelectIOLoop()
reactor = TornadoReactor(io_loop=self.real_io_loop)
super(LayeredTwistedIOLoop, self).initialize(reactor=reactor)
self.add_callback(self.make_current)
def close(self, all_fds=False):
super(LayeredTwistedIOLoop, self).close(all_fds=all_fds)
# HACK: This is the same thing that test_class.unbuildReactor does.
for reader in self.reactor._internalReaders:
self.reactor.removeReader(reader)
reader.connectionLost(None)
self.real_io_loop.close(all_fds=all_fds)
def stop(self):
# One of twisted's tests fails if I don't delay crash()
# until the reactor has started, but if I move this to
# TwistedIOLoop then the tests fail when I'm *not* running
# tornado-on-twisted-on-tornado. I'm clearly missing something
# about the startup/crash semantics, but since stop and crash
# are really only used in tests it doesn't really matter.
self.reactor.callWhenRunning(self.reactor.crash)
if __name__ == "__main__":
unittest.main()
|
gpl-3.0
|
pedropena/iteexe
|
twisted/test/test_defer.py
|
14
|
20998
|
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for defer module.
"""
from __future__ import nested_scopes
from twisted.trial import unittest, util
from twisted.internet import reactor, defer
from twisted.python import failure, log
class GenericError(Exception): pass
class DeferredTestCase(unittest.TestCase):
def setUp(self):
self.callback_results = None
self.errback_results = None
self.callback2_results = None
def _callback(self, *args, **kw):
self.callback_results = args, kw
return args[0]
def _callback2(self, *args, **kw):
self.callback2_results = args, kw
def _errback(self, *args, **kw):
self.errback_results = args, kw
def testCallbackWithoutArgs(self):
deferred = defer.Deferred()
deferred.addCallback(self._callback)
deferred.callback("hello")
self.failUnlessEqual(self.errback_results, None)
self.failUnlessEqual(self.callback_results, (('hello',), {}))
def testCallbackWithArgs(self):
deferred = defer.Deferred()
deferred.addCallback(self._callback, "world")
deferred.callback("hello")
self.failUnlessEqual(self.errback_results, None)
self.failUnlessEqual(self.callback_results, (('hello', 'world'), {}))
def testCallbackWithKwArgs(self):
deferred = defer.Deferred()
deferred.addCallback(self._callback, world="world")
deferred.callback("hello")
self.failUnlessEqual(self.errback_results, None)
self.failUnlessEqual(self.callback_results,
(('hello',), {'world': 'world'}))
def testTwoCallbacks(self):
deferred = defer.Deferred()
deferred.addCallback(self._callback)
deferred.addCallback(self._callback2)
deferred.callback("hello")
self.failUnlessEqual(self.errback_results, None)
self.failUnlessEqual(self.callback_results,
(('hello',), {}))
self.failUnlessEqual(self.callback2_results,
(('hello',), {}))
def testDeferredList(self):
defr1 = defer.Deferred()
defr2 = defer.Deferred()
defr3 = defer.Deferred()
dl = defer.DeferredList([defr1, defr2, defr3])
result = []
def cb(resultList, result=result):
result.extend(resultList)
def catch(err):
return None
dl.addCallbacks(cb, cb)
defr1.callback("1")
defr2.addErrback(catch)
# "catch" is added to eat the GenericError that will be passed on by
# the DeferredList's callback on defr2. If left unhandled, the
# Failure object would cause a log.err() warning about "Unhandled
# error in Deferred". Twisted's pyunit watches for log.err calls and
# treats them as failures. So "catch" must eat the error to prevent
# it from flunking the test.
defr2.errback(GenericError("2"))
defr3.callback("3")
self.failUnlessEqual([result[0],
#result[1][1] is now a Failure instead of an Exception
(result[1][0], str(result[1][1].value)),
result[2]],
[(defer.SUCCESS, "1"),
(defer.FAILURE, "2"),
(defer.SUCCESS, "3")])
def testEmptyDeferredList(self):
result = []
def cb(resultList, result=result):
result.append(resultList)
dl = defer.DeferredList([])
dl.addCallbacks(cb)
self.failUnlessEqual(result, [[]])
result[:] = []
dl = defer.DeferredList([], fireOnOneCallback=1)
dl.addCallbacks(cb)
self.failUnlessEqual(result, [])
def testDeferredListFireOnOneError(self):
defr1 = defer.Deferred()
defr2 = defer.Deferred()
defr3 = defer.Deferred()
dl = defer.DeferredList([defr1, defr2, defr3], fireOnOneErrback=1)
result = []
dl.addErrback(result.append)
# consume errors after they pass through the DeferredList (to avoid
# 'Unhandled error in Deferred'.
def catch(err):
return None
defr2.addErrback(catch)
# fire one Deferred's callback, no result yet
defr1.callback("1")
self.failUnlessEqual(result, [])
# fire one Deferred's errback -- now we have a result
defr2.errback(GenericError("from def2"))
self.failUnlessEqual(len(result), 1)
# extract the result from the list
failure = result[0]
# the type of the failure is a FirstError
self.failUnless(issubclass(failure.type, defer.FirstError),
'issubclass(failure.type, defer.FirstError) failed: '
'failure.type is %r' % (failure.type,)
)
# FirstErrors act like tuples for backwards compatibility :(
firstError = failure.value
self.failUnlessEqual(firstError[0], firstError.subFailure)
self.failUnlessEqual(firstError[1], firstError.index)
# check that the GenericError("2") from the deferred at index 1
# (defr2) is intact inside failure.value
self.failUnlessEqual(firstError.subFailure.type, GenericError)
self.failUnlessEqual(firstError.subFailure.value.args, ("from def2",))
self.failUnlessEqual(firstError.index, 1)
def testDeferredListDontConsumeErrors(self):
d1 = defer.Deferred()
dl = defer.DeferredList([d1])
errorTrap = []
d1.addErrback(errorTrap.append)
result = []
dl.addCallback(result.append)
d1.errback(GenericError('Bang'))
self.failUnlessEqual('Bang', errorTrap[0].value.args[0])
self.failUnlessEqual(1, len(result))
self.failUnlessEqual('Bang', result[0][0][1].value.args[0])
def testDeferredListConsumeErrors(self):
d1 = defer.Deferred()
dl = defer.DeferredList([d1], consumeErrors=True)
errorTrap = []
d1.addErrback(errorTrap.append)
result = []
dl.addCallback(result.append)
d1.errback(GenericError('Bang'))
self.failUnlessEqual([], errorTrap)
self.failUnlessEqual(1, len(result))
self.failUnlessEqual('Bang', result[0][0][1].value.args[0])
def testDeferredListFireOnOneErrorWithAlreadyFiredDeferreds(self):
# Create some deferreds, and errback one
d1 = defer.Deferred()
d2 = defer.Deferred()
d1.errback(GenericError('Bang'))
# *Then* build the DeferredList, with fireOnOneErrback=True
dl = defer.DeferredList([d1, d2], fireOnOneErrback=True)
result = []
dl.addErrback(result.append)
self.failUnlessEqual(1, len(result))
d1.addErrback(lambda e: None) # Swallow error
def testDeferredListWithAlreadyFiredDeferreds(self):
# Create some deferreds, and err one, call the other
d1 = defer.Deferred()
d2 = defer.Deferred()
d1.errback(GenericError('Bang'))
d2.callback(2)
# *Then* build the DeferredList
dl = defer.DeferredList([d1, d2])
result = []
dl.addCallback(result.append)
self.failUnlessEqual(1, len(result))
d1.addErrback(lambda e: None) # Swallow error
def testTimeOut(self):
d = defer.Deferred()
d.setTimeout(1.0)
l = []
d.addErrback(l.append)
# Make sure the reactor is shutdown
d.addBoth(lambda x, r=reactor: r.crash())
self.assertEquals(l, [])
reactor.run()
self.assertEquals(len(l), 1)
self.assertEquals(l[0].type, defer.TimeoutError)
def testImmediateSuccess(self):
l = []
d = defer.succeed("success")
d.addCallback(l.append)
self.assertEquals(l, ["success"])
def testImmediateSuccess2(self):
l = []
d = defer.succeed("success")
# this is how trial.util.deferredResult works
# NOTE: never actually use trial.util.deferredResult. It doesn't work.
d.setTimeout(1.0)
d.addCallback(l.append)
self.assertEquals(l, ["success"])
def testImmediateFailure(self):
l = []
d = defer.fail(GenericError("fail"))
d.addErrback(l.append)
self.assertEquals(str(l[0].value), "fail")
def testPausedFailure(self):
l = []
d = defer.fail(GenericError("fail"))
d.pause()
d.addErrback(l.append)
self.assertEquals(l, [])
d.unpause()
self.assertEquals(str(l[0].value), "fail")
def testCallbackErrors(self):
l = []
d = defer.Deferred().addCallback(lambda _: 1/0).addErrback(l.append)
d.callback(1)
self.assert_(isinstance(l[0].value, ZeroDivisionError))
l = []
d = defer.Deferred().addCallback(
lambda _: failure.Failure(ZeroDivisionError())).addErrback(l.append)
d.callback(1)
self.assert_(isinstance(l[0].value, ZeroDivisionError))
def testUnpauseBeforeCallback(self):
d = defer.Deferred()
d.pause()
d.addCallback(self._callback)
d.unpause()
def testReturnDeferred(self):
d = defer.Deferred()
d2 = defer.Deferred()
d2.pause()
d.addCallback(lambda r, d2=d2: d2)
d.addCallback(self._callback)
d.callback(1)
assert self.callback_results is None, "Should not have been called yet."
d2.callback(2)
assert self.callback_results is None, "Still should not have been called yet."
d2.unpause()
assert self.callback_results[0][0] == 2, "Result should have been from second deferred:%s"% (self.callback_results,)
def testGatherResults(self):
# test successful list of deferreds
l = []
defer.gatherResults([defer.succeed(1), defer.succeed(2)]).addCallback(l.append)
self.assertEquals(l, [[1, 2]])
# test failing list of deferreds
l = []
dl = [defer.succeed(1), defer.fail(ValueError)]
defer.gatherResults(dl).addErrback(l.append)
self.assertEquals(len(l), 1)
self.assert_(isinstance(l[0], failure.Failure))
# get rid of error
dl[1].addErrback(lambda e: 1)
def testMaybeDeferred(self):
S, E = [], []
d = defer.maybeDeferred((lambda x: x + 5), 10)
d.addCallbacks(S.append, E.append)
self.assertEquals(E, [])
self.assertEquals(S, [15])
S, E = [], []
try:
'10' + 5
except TypeError, e:
expected = str(e)
d = defer.maybeDeferred((lambda x: x + 5), '10')
d.addCallbacks(S.append, E.append)
self.assertEquals(S, [])
self.assertEquals(len(E), 1)
self.assertEquals(str(E[0].value), expected)
d = defer.Deferred()
reactor.callLater(0.2, d.callback, 'Success')
d.addCallback(self.assertEquals, 'Success')
d.addCallback(self._testMaybeError)
return d
def _testMaybeError(self, ignored):
d = defer.Deferred()
reactor.callLater(0.2, d.errback, failure.Failure(RuntimeError()))
self.assertFailure(d, RuntimeError)
return d
class AlreadyCalledTestCase(unittest.TestCase):
def setUp(self):
self._deferredWasDebugging = defer.getDebugging()
defer.setDebugging(True)
def tearDown(self):
defer.setDebugging(self._deferredWasDebugging)
def _callback(self, *args, **kw):
pass
def _errback(self, *args, **kw):
pass
def _call_1(self, d):
d.callback("hello")
def _call_2(self, d):
d.callback("twice")
def _err_1(self, d):
d.errback(failure.Failure(RuntimeError()))
def _err_2(self, d):
d.errback(failure.Failure(RuntimeError()))
def testAlreadyCalled_CC(self):
d = defer.Deferred()
d.addCallbacks(self._callback, self._errback)
self._call_1(d)
self.failUnlessRaises(defer.AlreadyCalledError, self._call_2, d)
def testAlreadyCalled_CE(self):
d = defer.Deferred()
d.addCallbacks(self._callback, self._errback)
self._call_1(d)
self.failUnlessRaises(defer.AlreadyCalledError, self._err_2, d)
def testAlreadyCalled_EE(self):
d = defer.Deferred()
d.addCallbacks(self._callback, self._errback)
self._err_1(d)
self.failUnlessRaises(defer.AlreadyCalledError, self._err_2, d)
def testAlreadyCalled_EC(self):
d = defer.Deferred()
d.addCallbacks(self._callback, self._errback)
self._err_1(d)
self.failUnlessRaises(defer.AlreadyCalledError, self._call_2, d)
def _count(self, linetype, func, lines, expected):
count = 0
for line in lines:
if (line.startswith(' %s:' % linetype) and
line.endswith(' %s' % func)):
count += 1
self.failUnless(count == expected)
def _check(self, e, caller, invoker1, invoker2):
# make sure the debugging information is vaguely correct
lines = e.args[0].split("\n")
# the creator should list the creator (testAlreadyCalledDebug) but not
# _call_1 or _call_2 or other invokers
self._count('C', caller, lines, 1)
self._count('C', '_call_1', lines, 0)
self._count('C', '_call_2', lines, 0)
self._count('C', '_err_1', lines, 0)
self._count('C', '_err_2', lines, 0)
# invoker should list the first invoker but not the second
self._count('I', invoker1, lines, 1)
self._count('I', invoker2, lines, 0)
def testAlreadyCalledDebug_CC(self):
d = defer.Deferred()
d.addCallbacks(self._callback, self._errback)
self._call_1(d)
try:
self._call_2(d)
except defer.AlreadyCalledError, e:
self._check(e, "testAlreadyCalledDebug_CC", "_call_1", "_call_2")
else:
self.fail("second callback failed to raise AlreadyCalledError")
def testAlreadyCalledDebug_CE(self):
d = defer.Deferred()
d.addCallbacks(self._callback, self._errback)
self._call_1(d)
try:
self._err_2(d)
except defer.AlreadyCalledError, e:
self._check(e, "testAlreadyCalledDebug_CE", "_call_1", "_err_2")
else:
self.fail("second errback failed to raise AlreadyCalledError")
def testAlreadyCalledDebug_EC(self):
d = defer.Deferred()
d.addCallbacks(self._callback, self._errback)
self._err_1(d)
try:
self._call_2(d)
except defer.AlreadyCalledError, e:
self._check(e, "testAlreadyCalledDebug_EC", "_err_1", "_call_2")
else:
self.fail("second callback failed to raise AlreadyCalledError")
def testAlreadyCalledDebug_EE(self):
d = defer.Deferred()
d.addCallbacks(self._callback, self._errback)
self._err_1(d)
try:
self._err_2(d)
except defer.AlreadyCalledError, e:
self._check(e, "testAlreadyCalledDebug_EE", "_err_1", "_err_2")
else:
self.fail("second errback failed to raise AlreadyCalledError")
def testNoDebugging(self):
defer.setDebugging(False)
d = defer.Deferred()
d.addCallbacks(self._callback, self._errback)
self._call_1(d)
try:
self._call_2(d)
except defer.AlreadyCalledError, e:
self.failIf(e.args)
else:
self.fail("second callback failed to raise AlreadyCalledError")
def testSwitchDebugging(self):
# Make sure Deferreds can deal with debug state flipping
# around randomly. This is covering a particular fixed bug.
defer.setDebugging(False)
d = defer.Deferred()
d.addBoth(lambda ign: None)
defer.setDebugging(True)
d.callback(None)
defer.setDebugging(False)
d = defer.Deferred()
d.callback(None)
defer.setDebugging(True)
d.addBoth(lambda ign: None)
class LogTestCase(unittest.TestCase):
def setUp(self):
self.c = []
log.addObserver(self.c.append)
def tearDown(self):
log.removeObserver(self.c.append)
def testErrorLog(self):
c = self.c
defer.Deferred().addCallback(lambda x: 1/0).callback(1)
# do you think it is rad to have memory leaks glyph
## d = defer.Deferred()
## d.addCallback(lambda x: 1/0)
## d.callback(1)
## del d
c2 = [e for e in c if e["isError"]]
self.assertEquals(len(c2), 2)
c2[1]["failure"].trap(ZeroDivisionError)
log.flushErrors(ZeroDivisionError)
class DeferredTestCaseII(unittest.TestCase):
def setUp(self):
self.callbackRan = 0
def testDeferredListEmpty(self):
"""Testing empty DeferredList."""
dl = defer.DeferredList([])
dl.addCallback(self.cb_empty)
def cb_empty(self, res):
self.callbackRan = 1
self.failUnlessEqual([], res)
def tearDown(self):
self.failUnless(self.callbackRan, "Callback was never run.")
class OtherPrimitives(unittest.TestCase):
def _incr(self, result):
self.counter += 1
def setUp(self):
self.counter = 0
def testLock(self):
lock = defer.DeferredLock()
lock.acquire().addCallback(self._incr)
self.failUnless(lock.locked)
self.assertEquals(self.counter, 1)
lock.acquire().addCallback(self._incr)
self.failUnless(lock.locked)
self.assertEquals(self.counter, 1)
lock.release()
self.failUnless(lock.locked)
self.assertEquals(self.counter, 2)
lock.release()
self.failIf(lock.locked)
self.assertEquals(self.counter, 2)
self.assertRaises(TypeError, lock.run)
firstUnique = object()
secondUnique = object()
controlDeferred = defer.Deferred()
def helper(self, b):
self.b = b
return controlDeferred
resultDeferred = lock.run(helper, self=self, b=firstUnique)
self.failUnless(lock.locked)
self.assertEquals(self.b, firstUnique)
resultDeferred.addCallback(lambda x: setattr(self, 'result', x))
lock.acquire().addCallback(self._incr)
self.failUnless(lock.locked)
self.assertEquals(self.counter, 2)
controlDeferred.callback(secondUnique)
self.assertEquals(self.result, secondUnique)
self.failUnless(lock.locked)
self.assertEquals(self.counter, 3)
lock.release()
self.failIf(lock.locked)
def testSemaphore(self):
N = 13
sem = defer.DeferredSemaphore(N)
controlDeferred = defer.Deferred()
def helper(self, arg):
self.arg = arg
return controlDeferred
results = []
uniqueObject = object()
resultDeferred = sem.run(helper, self=self, arg=uniqueObject)
resultDeferred.addCallback(results.append)
resultDeferred.addCallback(self._incr)
self.assertEquals(results, [])
self.assertEquals(self.arg, uniqueObject)
controlDeferred.callback(None)
self.assertEquals(results.pop(), None)
self.assertEquals(self.counter, 1)
self.counter = 0
for i in range(1, 1 + N):
sem.acquire().addCallback(self._incr)
self.assertEquals(self.counter, i)
sem.acquire().addCallback(self._incr)
self.assertEquals(self.counter, N)
sem.release()
self.assertEquals(self.counter, N + 1)
for i in range(1, 1 + N):
sem.release()
self.assertEquals(self.counter, N + 1)
def testQueue(self):
N, M = 2, 2
queue = defer.DeferredQueue(N, M)
gotten = []
for i in range(M):
queue.get().addCallback(gotten.append)
self.assertRaises(defer.QueueUnderflow, queue.get)
for i in range(M):
queue.put(i)
self.assertEquals(gotten, range(i + 1))
for i in range(N):
queue.put(N + i)
self.assertEquals(gotten, range(M))
self.assertRaises(defer.QueueOverflow, queue.put, None)
gotten = []
for i in range(N):
queue.get().addCallback(gotten.append)
self.assertEquals(gotten, range(N, N + i + 1))
queue = defer.DeferredQueue()
gotten = []
for i in range(N):
queue.get().addCallback(gotten.append)
for i in range(N):
queue.put(i)
self.assertEquals(gotten, range(N))
queue = defer.DeferredQueue(size=0)
self.assertRaises(defer.QueueOverflow, queue.put, None)
queue = defer.DeferredQueue(backlog=0)
self.assertRaises(defer.QueueUnderflow, queue.get)
|
gpl-2.0
|
mozilla/normandy
|
normandy/studies/migrations/0004_auto_20190115_0812.py
|
1
|
1814
|
# Generated by Django 2.0.9 on 2019-01-15 08:12
import hashlib
import json
import untangle
import zipfile
from django.core.files.storage import default_storage
from django.db import migrations
def populate_metadata(apps, schema_editor):
Extension = apps.get_model("studies", "Extension")
for extension in Extension.objects.all():
with default_storage.open(extension.xpi.name) as f:
with zipfile.ZipFile(f) as zf:
files = set(zf.namelist())
if "manifest.json" in files:
with zf.open("manifest.json") as manifest_file:
data = json.load(manifest_file)
extension.extension_id = (
data.get("applications", {}).get("gecko", {}).get("id", None)
)
extension.version = data.get("version")
elif "install.rdf" in files:
extension.is_legacy = True
with zf.open("install.rdf", "r") as rdf_file:
data = untangle.parse(rdf_file.read().decode())
extension.extension_id = data.RDF.Description.em_id.cdata
extension.version = data.RDF.Description.em_version.cdata
else:
raise Exception("Invalid XPI.")
if not extension.extension_id or not extension.version:
raise Exception("Extension ID or version not set.")
f.seek(0)
extension.hash = hashlib.sha256(f.read()).hexdigest()
extension.save()
def noop(*args, **kwargs):
pass
class Migration(migrations.Migration):
dependencies = [("studies", "0003_auto_20190115_0812")]
operations = [migrations.RunPython(populate_metadata, noop)]
|
mpl-2.0
|
PrashantKuntala/Machine-Learning
|
Feature Selection/scripts/pearsonFilter.py
|
1
|
1377
|
#!/usr/bin/python
#
# author : prashant kuntala
# date : 31th October,2016
# last modified : 11/4/16
# script that takes feature file and ranks the features based on Pearson Correlation.
from __future__ import division
import os
import numpy as np
import scipy.stats
# reading the data file into an array
data = np.genfromtxt("features.dat", delimiter = ',')
data = np.matrix(data[:,[range(0,20000)]])
print type(data)
print "dimensions of your feature vector : " + str(data.shape)
#reading the train labels
trainlabels = np.matrix(np.genfromtxt("dextertrainlabels.dat",delimiter = ','))
trainlabels = trainlabels.T
print "dimensions of your trainlabels vector : " + str(trainlabels.shape)
ymean = np.mean(trainlabels)
ystd = np.std(trainlabels)
pstRank = []
for i in range(0,20000):
numerator = 0
xmean = np.mean(data[:,i])
xstd = np.std(data[:,i])
if((xstd*ystd) == 0):
pstRank.append(0)
else:
for j in range(0,300):
numerator = numerator + ((data[j,i] - xmean)*(trainlabels[j] - ymean))
pcc = abs(numerator.item(0)/(xstd*ystd))
#print type(numerator)
#print type(pcc)
pstRank.append(pcc.item(0))
#print pstRank
pstRank = list(np.argsort(pstRank))
#print pstRank
string = ""
for i in range(0,len(pstRank)):
string = string + str(pstRank[i]+1)+"\n"
openfile = open(os.getcwd()+"/PearsonRanking.dat","w")
openfile.write(string)
openfile.close()
|
mit
|
IronLanguages/ironpython2
|
Src/StdLib/Lib/test/test_glob.py
|
4
|
6944
|
import glob
import os
import shutil
import sys
import unittest
from test.test_support import run_unittest, TESTFN
def fsdecode(s):
return unicode(s, sys.getfilesystemencoding())
class GlobTests(unittest.TestCase):
def norm(self, *parts):
return os.path.normpath(os.path.join(self.tempdir, *parts))
def mktemp(self, *parts):
filename = self.norm(*parts)
base, file = os.path.split(filename)
if not os.path.exists(base):
os.makedirs(base)
f = open(filename, 'w')
f.close()
def setUp(self):
self.tempdir = TESTFN + "_dir"
self.mktemp('a', 'D')
self.mktemp('aab', 'F')
self.mktemp('.aa', 'G')
self.mktemp('.bb', 'H')
self.mktemp('aaa', 'zzzF')
self.mktemp('ZZZ')
self.mktemp('a', 'bcd', 'EF')
self.mktemp('a', 'bcd', 'efg', 'ha')
if hasattr(os, 'symlink'):
os.symlink(self.norm('broken'), self.norm('sym1'))
os.symlink('broken', self.norm('sym2'))
os.symlink(os.path.join('a', 'bcd'), self.norm('sym3'))
def tearDown(self):
shutil.rmtree(self.tempdir)
def glob(self, *parts):
if len(parts) == 1:
pattern = parts[0]
else:
pattern = os.path.join(*parts)
p = os.path.join(self.tempdir, pattern)
res = glob.glob(p)
self.assertItemsEqual(glob.iglob(p), res)
ures = [fsdecode(x) for x in res]
self.assertItemsEqual(glob.glob(fsdecode(p)), ures)
self.assertItemsEqual(glob.iglob(fsdecode(p)), ures)
return res
def assertSequencesEqual_noorder(self, l1, l2):
l1 = list(l1)
l2 = list(l2)
self.assertEqual(set(l1), set(l2))
self.assertEqual(sorted(l1), sorted(l2))
def test_glob_literal(self):
eq = self.assertSequencesEqual_noorder
eq(self.glob('a'), [self.norm('a')])
eq(self.glob('a', 'D'), [self.norm('a', 'D')])
eq(self.glob('aab'), [self.norm('aab')])
eq(self.glob('zymurgy'), [])
res = glob.glob('*')
self.assertEqual({type(r) for r in res}, {str})
res = glob.glob(os.path.join(os.curdir, '*'))
self.assertEqual({type(r) for r in res}, {str})
# test return types are unicode, but only if os.listdir
# returns unicode filenames
tmp = os.listdir(fsdecode(os.curdir))
if {type(x) for x in tmp} == {unicode}:
res = glob.glob(u'*')
self.assertEqual({type(r) for r in res}, {unicode})
res = glob.glob(os.path.join(fsdecode(os.curdir), u'*'))
self.assertEqual({type(r) for r in res}, {unicode})
def test_glob_one_directory(self):
eq = self.assertSequencesEqual_noorder
eq(self.glob('a*'), map(self.norm, ['a', 'aab', 'aaa']))
eq(self.glob('*a'), map(self.norm, ['a', 'aaa']))
eq(self.glob('.*'), map(self.norm, ['.aa', '.bb']))
eq(self.glob('?aa'), map(self.norm, ['aaa']))
eq(self.glob('aa?'), map(self.norm, ['aaa', 'aab']))
eq(self.glob('aa[ab]'), map(self.norm, ['aaa', 'aab']))
eq(self.glob('*q'), [])
def test_glob_nested_directory(self):
eq = self.assertSequencesEqual_noorder
if os.path.normcase("abCD") == "abCD":
# case-sensitive filesystem
eq(self.glob('a', 'bcd', 'E*'), [self.norm('a', 'bcd', 'EF')])
else:
# case insensitive filesystem
eq(self.glob('a', 'bcd', 'E*'), [self.norm('a', 'bcd', 'EF'),
self.norm('a', 'bcd', 'efg')])
eq(self.glob('a', 'bcd', '*g'), [self.norm('a', 'bcd', 'efg')])
def test_glob_directory_names(self):
eq = self.assertSequencesEqual_noorder
eq(self.glob('*', 'D'), [self.norm('a', 'D')])
eq(self.glob('*', '*a'), [])
eq(self.glob('a', '*', '*', '*a'),
[self.norm('a', 'bcd', 'efg', 'ha')])
eq(self.glob('?a?', '*F'), [self.norm('aaa', 'zzzF'),
self.norm('aab', 'F')])
def test_glob_directory_with_trailing_slash(self):
# Patterns ending with a slash shouldn't match non-dirs
res = glob.glob(self.norm('Z*Z') + os.sep)
self.assertEqual(res, [])
res = glob.glob(self.norm('ZZZ') + os.sep)
self.assertEqual(res, [])
# When there is a wildcard pattern which ends with os.sep, glob()
# doesn't blow up.
res = glob.glob(self.norm('aa*') + os.sep)
self.assertEqual(len(res), 2)
# either of these results is reasonable
self.assertIn(set(res), [
{self.norm('aaa'), self.norm('aab')},
{self.norm('aaa') + os.sep, self.norm('aab') + os.sep},
])
def test_glob_unicode_directory_with_trailing_slash(self):
# Same as test_glob_directory_with_trailing_slash, but with an
# unicode argument.
res = glob.glob(fsdecode(self.norm('Z*Z') + os.sep))
self.assertEqual(res, [])
res = glob.glob(fsdecode(self.norm('ZZZ') + os.sep))
self.assertEqual(res, [])
res = glob.glob(fsdecode(self.norm('aa*') + os.sep))
self.assertEqual(len(res), 2)
# either of these results is reasonable
self.assertIn(set(res), [
{fsdecode(self.norm('aaa')), fsdecode(self.norm('aab'))},
{fsdecode(self.norm('aaa') + os.sep),
fsdecode(self.norm('aab') + os.sep)},
])
@unittest.skipUnless(hasattr(os, 'symlink'), "Requires symlink support")
def test_glob_symlinks(self):
eq = self.assertSequencesEqual_noorder
eq(self.glob('sym3'), [self.norm('sym3')])
eq(self.glob('sym3', '*'), [self.norm('sym3', 'EF'),
self.norm('sym3', 'efg')])
self.assertIn(self.glob('sym3' + os.sep),
[[self.norm('sym3')], [self.norm('sym3') + os.sep]])
eq(self.glob('*', '*F'),
[self.norm('aaa', 'zzzF'), self.norm('aab', 'F'),
self.norm('sym3', 'EF')])
@unittest.skipUnless(hasattr(os, 'symlink'), "Requires symlink support")
def test_glob_broken_symlinks(self):
eq = self.assertSequencesEqual_noorder
eq(self.glob('sym*'), [self.norm('sym1'), self.norm('sym2'),
self.norm('sym3')])
eq(self.glob('sym1'), [self.norm('sym1')])
eq(self.glob('sym2'), [self.norm('sym2')])
@unittest.skipUnless(sys.platform == "win32", "Win32 specific test")
def test_glob_magic_in_drive(self):
eq = self.assertSequencesEqual_noorder
eq(glob.glob('*:'), [])
eq(glob.glob(u'*:'), [])
eq(glob.glob('?:'), [])
eq(glob.glob(u'?:'), [])
def test_main():
run_unittest(GlobTests)
if __name__ == "__main__":
test_main()
|
apache-2.0
|
anthonyalmarza/hendrix
|
setup.py
|
1
|
1886
|
from hendrix import __version__
import errno
import os
import sys
from setuptools import setup, find_packages
def file_name(rel_path):
dir_path = os.path.dirname(__file__)
return os.path.join(dir_path, rel_path)
def read(rel_path):
with open(file_name(rel_path)) as f:
return f.read()
def readlines(rel_path):
with open(file_name(rel_path)) as f:
ret = f.readlines()
return ret
def mkdir_p(path):
"recreate mkdir -p functionality"
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
share_path = os.path.join(
os.path.dirname(sys.executable),
'share/hendrix'
)
mkdir_p(share_path)
setup(
author="hangarunderground",
author_email="hendrix@reelio.com",
name="hendrix",
packages=find_packages(),
version=__version__,
url="https://github.com/hangarunderground/hendrix",
download_url=(
"https://github.com/hangarunderground/hendrix/tarball/"
"v"+__version__+"-beta"
),
description="A deployment module for Django that uses Twisted.",
long_description=read('docs/long_desc.rst'),
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
],
keywords=["django", "twisted", "async", "logging"],
scripts=[
'hendrix/utils/scripts/hx',
'hendrix/utils/scripts/install-hendrix-service'
],
data_files=[
(share_path, ['hendrix/utils/templates/init.d.j2', ]),
],
install_requires=readlines('requirements'),
extras_require={'ssl': ['pyopenssl', ]}
)
|
mit
|
t794104/ansible
|
test/units/modules/remote_management/lxca/test_lxca_cmms.py
|
43
|
4394
|
import json
import pytest
from units.compat import mock
from ansible.modules.remote_management.lxca import lxca_cmms
@pytest.fixture(scope='module')
@mock.patch("ansible.module_utils.remote_management.lxca.common.close_conn", autospec=True)
def setup_module(close_conn):
close_conn.return_value = True
class TestMyModule():
@pytest.mark.parametrize('patch_ansible_module',
[
{},
{
"auth_url": "https://10.240.14.195",
"login_user": "USERID",
},
{
"auth_url": "https://10.240.14.195",
"login_password": "Password",
},
{
"login_user": "USERID",
"login_password": "Password",
},
],
indirect=['patch_ansible_module'])
@pytest.mark.usefixtures('patch_ansible_module')
@mock.patch("ansible.module_utils.remote_management.lxca.common.setup_conn", autospec=True)
@mock.patch("ansible.modules.remote_management.lxca.lxca_cmms.execute_module", autospec=True)
def test_without_required_parameters(self, _setup_conn, _execute_module,
mocker, capfd, setup_module):
"""Failure must occurs when all parameters are missing"""
with pytest.raises(SystemExit):
_setup_conn.return_value = "Fake connection"
_execute_module.return_value = "Fake execution"
lxca_cmms.main()
out, err = capfd.readouterr()
results = json.loads(out)
assert results['failed']
assert 'missing required arguments' in results['msg']
@mock.patch("ansible.module_utils.remote_management.lxca.common.setup_conn", autospec=True)
@mock.patch("ansible.modules.remote_management.lxca.lxca_cmms.execute_module", autospec=True)
@mock.patch("ansible.modules.remote_management.lxca.lxca_cmms.AnsibleModule", autospec=True)
def test__argument_spec(self, ansible_mod_cls, _execute_module, _setup_conn, setup_module):
expected_arguments_spec = dict(
login_user=dict(required=True),
login_password=dict(required=True, no_log=True),
command_options=dict(default='cmms', choices=['cmms', 'cmms_by_uuid',
'cmms_by_chassis_uuid']),
auth_url=dict(required=True),
uuid=dict(default=None),
chassis=dict(default=None),
)
_setup_conn.return_value = "Fake connection"
_execute_module.return_value = []
mod_obj = ansible_mod_cls.return_value
args = {
"auth_url": "https://10.243.30.195",
"login_user": "USERID",
"login_password": "password",
"command_options": "cmms",
}
mod_obj.params = args
lxca_cmms.main()
assert(mock.call(argument_spec=expected_arguments_spec,
supports_check_mode=False) == ansible_mod_cls.call_args)
@mock.patch("ansible.module_utils.remote_management.lxca.common.setup_conn", autospec=True)
@mock.patch("ansible.modules.remote_management.lxca.lxca_cmms._cmms_by_uuid",
autospec=True)
@mock.patch("ansible.modules.remote_management.lxca.lxca_cmms.AnsibleModule",
autospec=True)
def test__cmms_empty_list(self, ansible_mod_cls, _get_cmms, _setup_conn, setup_module):
mod_obj = ansible_mod_cls.return_value
args = {
"auth_url": "https://10.243.30.195",
"login_user": "USERID",
"login_password": "password",
"uuid": "3C737AA5E31640CE949B10C129A8B01F",
"command_options": "cmms_by_uuid",
}
mod_obj.params = args
_setup_conn.return_value = "Fake connection"
empty_nodes_list = []
_get_cmms.return_value = empty_nodes_list
ret_cmms = _get_cmms(mod_obj, args)
assert mock.call(mod_obj, mod_obj.params) == _get_cmms.call_args
assert _get_cmms.return_value == ret_cmms
|
gpl-3.0
|
pdeesawat/PSIT58_test_01
|
Test_Python_code/final_code/Malaysia/earthquake.py
|
1
|
2089
|
import plotly.plotly as py
import plotly.graph_objs as go
#Get data
data = open('Real_Final_database_02.csv')
alldata = data.readlines()
listdata = []
for i in alldata:
listdata.append(i.strip().split(','))
#Seperate information
year = []
affect = []
damage = []
death =[]
for j in listdata:
if j[0] == 'Malaysia' and j[2] == 'Earthquake':
year.append(int(j[1]))
affect.append(int(j[3]))
damage.append(int(j[4]))
death.append(int(j[5]))
# Create and style traces
trace1 = go.Scatter(
x=year,
y=affect,
mode='lines+markers',
name="'Total Affected'",
hoverinfo='Total Affected',
line = dict(
shape='spline',
color = ('00CC00'),
width = 1.5),
)
trace2 = go.Scatter(
x=year,
y=damage,
mode='lines+markers',
name='Total Damage \'000 US',
hoverinfo='Total Damage \'000 US',
line = dict(
shape='spline',
color = ('3399FF'),
width = 1.5),
yaxis='y2'
)
trace3 = go.Scatter(
x=year,
y=death,
mode='lines+markers',
name='Total Death',
hoverinfo='Total Death',
line = dict(
shape='spline',
color = ('FF3300'),
width = 1.5),
yaxis='y3'
)
data = [trace1, trace2, trace3]
layout = go.Layout(
title='Earthquake in Malaysia',
yaxis=dict(
title='Total affected',
titlefont=dict(
color='00CC00'
),
tickfont=dict(
color='00CC00'
)
),
yaxis2=dict(
title='Total Damage \'000 US',
titlefont=dict(
color='3399FF'
),
tickfont=dict(
color='3399FF'
),
anchor='free',
overlaying='y',
side='left',
position=0.15
),
yaxis3=dict(
title='Total Death',
titlefont=dict(
color='FF3300'
),
tickfont=dict(
color='FF3300'
),
anchor='x',
overlaying='y',
side='right'
)
)
fig = go.Figure(data=data, layout=layout)
plot_url = py.plot(fig, filename='bennyy')
|
apache-2.0
|
scrollback/kuma
|
vendor/packages/sqlalchemy/test/perf/masseagerload.py
|
7
|
1564
|
from sqlalchemy import *
from sqlalchemy.orm import *
from sqlalchemy.test import profiling
NUM = 500
DIVISOR = 50
engine = create_engine('sqlite://')
meta = MetaData(engine)
items = Table('items', meta,
Column('item_id', Integer, primary_key=True),
Column('value', String(100)))
subitems = Table('subitems', meta,
Column('sub_id', Integer, primary_key=True),
Column('parent_id', Integer, ForeignKey('items.item_id')),
Column('value', String(100)))
class Item(object):pass
class SubItem(object):pass
mapper(Item, items, properties={'subs':relationship(SubItem, lazy='joined')})
mapper(SubItem, subitems)
def load():
global l
l = []
for x in range(1,NUM/DIVISOR + 1):
l.append({'item_id':x, 'value':'this is item #%d' % x})
#print l
items.insert().execute(*l)
for x in range(1, NUM/DIVISOR + 1):
l = []
for y in range(1, DIVISOR + 1):
z = ((x-1) * DIVISOR) + y
l.append({'sub_id':z,'value':'this is item #%d' % z, 'parent_id':x})
#print l
subitems.insert().execute(*l)
@profiling.profiled('massjoinedload', always=True, sort=['cumulative'])
def massjoinedload(session):
session.begin()
query = session.query(Item)
l = query.all()
print "loaded ", len(l), " items each with ", len(l[0].subs), "subitems"
def all():
meta.create_all()
try:
load()
massjoinedload(create_session())
finally:
meta.drop_all()
if __name__ == '__main__':
all()
|
mpl-2.0
|
scripteed/mtasa-blue
|
vendor/google-breakpad/src/third_party/protobuf/protobuf/gtest/test/gtest_xml_output_unittest.py
|
306
|
9711
|
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module"""
__author__ = 'eefacm@gmail.com (Sean Mcafee)'
import errno
import os
import sys
from xml.dom import minidom, Node
import gtest_test_utils
import gtest_xml_test_utils
GTEST_OUTPUT_FLAG = "--gtest_output"
GTEST_DEFAULT_OUTPUT_FILE = "test_detail.xml"
GTEST_PROGRAM_NAME = "gtest_xml_output_unittest_"
SUPPORTS_STACK_TRACES = False
if SUPPORTS_STACK_TRACES:
STACK_TRACE_TEMPLATE = "\nStack trace:\n*"
else:
STACK_TRACE_TEMPLATE = ""
EXPECTED_NON_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="15" failures="4" disabled="2" errors="0" time="*" name="AllTests">
<testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="SuccessfulTest"/>
</testsuite>
<testsuite name="FailedTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="Fails" status="run" time="*" classname="FailedTest">
<failure message="Value of: 2
Expected: 1" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="MixedResultTest" tests="3" failures="1" disabled="1" errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="MixedResultTest"/>
<testcase name="Fails" status="run" time="*" classname="MixedResultTest">
<failure message="Value of: 2
Expected: 1" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1%(stack)s]]></failure>
<failure message="Value of: 3
Expected: 2" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 3
Expected: 2%(stack)s]]></failure>
</testcase>
<testcase name="DISABLED_test" status="notrun" time="*" classname="MixedResultTest"/>
</testsuite>
<testsuite name="XmlQuotingTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="OutputsCData" status="run" time="*" classname="XmlQuotingTest">
<failure message="Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]></top>" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]>]]><![CDATA[</top>%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="InvalidCharactersTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="InvalidCharactersInMessage" status="run" time="*" classname="InvalidCharactersTest">
<failure message="Failed
Invalid characters in brackets []" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
Invalid characters in brackets []%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="DisabledTest" tests="1" failures="0" disabled="1" errors="0" time="*">
<testcase name="DISABLED_test_not_run" status="notrun" time="*" classname="DisabledTest"/>
</testsuite>
<testsuite name="PropertyRecordingTest" tests="4" failures="0" disabled="0" errors="0" time="*">
<testcase name="OneProperty" status="run" time="*" classname="PropertyRecordingTest" key_1="1"/>
<testcase name="IntValuedProperty" status="run" time="*" classname="PropertyRecordingTest" key_int="1"/>
<testcase name="ThreeProperties" status="run" time="*" classname="PropertyRecordingTest" key_1="1" key_2="2" key_3="3"/>
<testcase name="TwoValuesForOneKeyUsesLastValue" status="run" time="*" classname="PropertyRecordingTest" key_1="2"/>
</testsuite>
<testsuite name="NoFixtureTest" tests="3" failures="0" disabled="0" errors="0" time="*">
<testcase name="RecordProperty" status="run" time="*" classname="NoFixtureTest" key="1"/>
<testcase name="ExternalUtilityThatCallsRecordIntValuedProperty" status="run" time="*" classname="NoFixtureTest" key_for_utility_int="1"/>
<testcase name="ExternalUtilityThatCallsRecordStringValuedProperty" status="run" time="*" classname="NoFixtureTest" key_for_utility_string="1"/>
</testsuite>
</testsuites>""" % {'stack': STACK_TRACE_TEMPLATE}
EXPECTED_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="0" failures="0" disabled="0" errors="0" time="*" name="AllTests">
</testsuites>"""
class GTestXMLOutputUnitTest(gtest_xml_test_utils.GTestXMLTestCase):
"""
Unit test for Google Test's XML output functionality.
"""
def testNonEmptyXmlOutput(self):
"""
Runs a test program that generates a non-empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_NON_EMPTY_XML, 1)
def testEmptyXmlOutput(self):
"""
Runs a test program that generates an empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput("gtest_no_test_unittest",
EXPECTED_EMPTY_XML, 0)
def testDefaultOutputFile(self):
"""
Confirms that Google Test produces an XML output file with the expected
default name if no name is explicitly specified.
"""
output_file = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_DEFAULT_OUTPUT_FILE)
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(
"gtest_no_test_unittest")
try:
os.remove(output_file)
except OSError, e:
if e.errno != errno.ENOENT:
raise
p = gtest_test_utils.Subprocess(
[gtest_prog_path, "%s=xml" % GTEST_OUTPUT_FLAG],
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
self.assert_(os.path.isfile(output_file))
def testSuppressedXmlOutput(self):
"""
Tests that no XML file is generated if the default XML listener is
shut down before RUN_ALL_TESTS is invoked.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_PROGRAM_NAME + "out.xml")
if os.path.isfile(xml_path):
os.remove(xml_path)
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(GTEST_PROGRAM_NAME)
command = [gtest_prog_path,
"%s=xml:%s" % (GTEST_OUTPUT_FLAG, xml_path),
"--shut_down_xml"]
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
self.assert_(False,
"%s was killed by signal %d" % (gtest_prog_name, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(1, p.exit_code,
"'%s' exited with code %s, which doesn't match "
"the expected exit code %s."
% (command, p.exit_code, 1))
self.assert_(not os.path.isfile(xml_path))
def _TestXmlOutput(self, gtest_prog_name, expected_xml, expected_exit_code):
"""
Asserts that the XML document generated by running the program
gtest_prog_name matches expected_xml, a string containing another
XML document. Furthermore, the program's exit code must be
expected_exit_code.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
gtest_prog_name + "out.xml")
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(gtest_prog_name)
command = [gtest_prog_path, "%s=xml:%s" % (GTEST_OUTPUT_FLAG, xml_path)]
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
self.assert_(False,
"%s was killed by signal %d" % (gtest_prog_name, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(expected_exit_code, p.exit_code,
"'%s' exited with code %s, which doesn't match "
"the expected exit code %s."
% (command, p.exit_code, expected_exit_code))
expected = minidom.parseString(expected_xml)
actual = minidom.parse(xml_path)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual .unlink()
if __name__ == '__main__':
os.environ['GTEST_STACK_TRACE_DEPTH'] = '1'
gtest_test_utils.Main()
|
gpl-3.0
|
rubencabrera/odoo
|
addons/l10n_be/wizard/l10n_be_account_vat_declaration.py
|
309
|
10685
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# Adapted by Noviat to
# - enforce correct vat number
# - support negative balance
# - assign amount of tax code 71-72 correclty to grid 71 or 72
# - support Noviat tax code scheme
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
from openerp.osv import fields, osv
from openerp.tools.translate import _
class l10n_be_vat_declaration(osv.osv_memory):
""" Vat Declaration """
_name = "l1on_be.vat.declaration"
_description = "Vat Declaration"
def _get_xml_data(self, cr, uid, context=None):
if context.get('file_save', False):
return base64.encodestring(context['file_save'].encode('utf8'))
return ''
_columns = {
'name': fields.char('File Name'),
'period_id': fields.many2one('account.period','Period', required=True),
'tax_code_id': fields.many2one('account.tax.code', 'Tax Code', domain=[('parent_id', '=', False)], required=True),
'msg': fields.text('File created', readonly=True),
'file_save': fields.binary('Save File'),
'ask_restitution': fields.boolean('Ask Restitution',help='It indicates whether a restitution is to make or not?'),
'ask_payment': fields.boolean('Ask Payment',help='It indicates whether a payment is to make or not?'),
'client_nihil': fields.boolean('Last Declaration, no clients in client listing', help='Tick this case only if it concerns only the last statement on the civil or cessation of activity: ' \
'no clients to be included in the client listing.'),
'comments': fields.text('Comments'),
}
def _get_tax_code(self, cr, uid, context=None):
obj_tax_code = self.pool.get('account.tax.code')
obj_user = self.pool.get('res.users')
company_id = obj_user.browse(cr, uid, uid, context=context).company_id.id
tax_code_ids = obj_tax_code.search(cr, uid, [('company_id', '=', company_id), ('parent_id', '=', False)], context=context)
return tax_code_ids and tax_code_ids[0] or False
_defaults = {
'msg': 'Save the File with '".xml"' extension.',
'file_save': _get_xml_data,
'name': 'vat_declaration.xml',
'tax_code_id': _get_tax_code,
}
def create_xml(self, cr, uid, ids, context=None):
obj_tax_code = self.pool.get('account.tax.code')
obj_acc_period = self.pool.get('account.period')
obj_user = self.pool.get('res.users')
obj_partner = self.pool.get('res.partner')
mod_obj = self.pool.get('ir.model.data')
if context is None:
context = {}
list_of_tags = ['00','01','02','03','44','45','46','47','48','49','54','55','56','57','59','61','62','63','64','71','72','81','82','83','84','85','86','87','88','91']
data_tax = self.browse(cr, uid, ids[0])
if data_tax.tax_code_id:
obj_company = data_tax.tax_code_id.company_id
else:
obj_company = obj_user.browse(cr, uid, uid, context=context).company_id
vat_no = obj_company.partner_id.vat
if not vat_no:
raise osv.except_osv(_('Insufficient Data!'), _('No VAT number associated with your company.'))
vat_no = vat_no.replace(' ','').upper()
vat = vat_no[2:]
tax_code_ids = obj_tax_code.search(cr, uid, [('parent_id','child_of',data_tax.tax_code_id.id), ('company_id','=',obj_company.id)], context=context)
ctx = context.copy()
data = self.read(cr, uid, ids)[0]
ctx['period_id'] = data['period_id'][0]
tax_info = obj_tax_code.read(cr, uid, tax_code_ids, ['code','sum_period'], context=ctx)
default_address = obj_partner.address_get(cr, uid, [obj_company.partner_id.id])
default_address_id = default_address.get("default", obj_company.partner_id.id)
address_id= obj_partner.browse(cr, uid, default_address_id, context)
account_period = obj_acc_period.browse(cr, uid, data['period_id'][0], context=context)
issued_by = vat_no[:2]
comments = data['comments'] or ''
send_ref = str(obj_company.partner_id.id) + str(account_period.date_start[5:7]) + str(account_period.date_stop[:4])
starting_month = account_period.date_start[5:7]
ending_month = account_period.date_stop[5:7]
quarter = str(((int(starting_month) - 1) / 3) + 1)
if not address_id.email:
raise osv.except_osv(_('Insufficient Data!'),_('No email address associated with the company.'))
if not address_id.phone:
raise osv.except_osv(_('Insufficient Data!'),_('No phone associated with the company.'))
file_data = {
'issued_by': issued_by,
'vat_no': vat_no,
'only_vat': vat_no[2:],
'cmpny_name': obj_company.name,
'address': "%s %s"%(address_id.street or "",address_id.street2 or ""),
'post_code': address_id.zip or "",
'city': address_id.city or "",
'country_code': address_id.country_id and address_id.country_id.code or "",
'email': address_id.email or "",
'phone': address_id.phone.replace('.','').replace('/','').replace('(','').replace(')','').replace(' ',''),
'send_ref': send_ref,
'quarter': quarter,
'month': starting_month,
'year': str(account_period.date_stop[:4]),
'client_nihil': (data['client_nihil'] and 'YES' or 'NO'),
'ask_restitution': (data['ask_restitution'] and 'YES' or 'NO'),
'ask_payment': (data['ask_payment'] and 'YES' or 'NO'),
'comments': comments,
}
data_of_file = """<?xml version="1.0"?>
<ns2:VATConsignment xmlns="http://www.minfin.fgov.be/InputCommon" xmlns:ns2="http://www.minfin.fgov.be/VATConsignment" VATDeclarationsNbr="1">
<ns2:Representative>
<RepresentativeID identificationType="NVAT" issuedBy="%(issued_by)s">%(only_vat)s</RepresentativeID>
<Name>%(cmpny_name)s</Name>
<Street>%(address)s</Street>
<PostCode>%(post_code)s</PostCode>
<City>%(city)s</City>
<CountryCode>%(country_code)s</CountryCode>
<EmailAddress>%(email)s</EmailAddress>
<Phone>%(phone)s</Phone>
</ns2:Representative>
<ns2:VATDeclaration SequenceNumber="1" DeclarantReference="%(send_ref)s">
<ns2:Declarant>
<VATNumber xmlns="http://www.minfin.fgov.be/InputCommon">%(only_vat)s</VATNumber>
<Name>%(cmpny_name)s</Name>
<Street>%(address)s</Street>
<PostCode>%(post_code)s</PostCode>
<City>%(city)s</City>
<CountryCode>%(country_code)s</CountryCode>
<EmailAddress>%(email)s</EmailAddress>
<Phone>%(phone)s</Phone>
</ns2:Declarant>
<ns2:Period>
""" % (file_data)
if starting_month != ending_month:
#starting month and ending month of selected period are not the same
#it means that the accounting isn't based on periods of 1 month but on quarters
data_of_file += '\t\t<ns2:Quarter>%(quarter)s</ns2:Quarter>\n\t\t' % (file_data)
else:
data_of_file += '\t\t<ns2:Month>%(month)s</ns2:Month>\n\t\t' % (file_data)
data_of_file += '\t<ns2:Year>%(year)s</ns2:Year>' % (file_data)
data_of_file += '\n\t\t</ns2:Period>\n'
data_of_file += '\t\t<ns2:Data>\t'
cases_list = []
for item in tax_info:
if item['code'] == '91' and ending_month != 12:
#the tax code 91 can only be send for the declaration of December
continue
if item['code'] and item['sum_period']:
if item['code'] == 'VI':
if item['sum_period'] >= 0:
item['code'] = '71'
else:
item['code'] = '72'
if item['code'] in list_of_tags:
cases_list.append(item)
cases_list.sort()
for item in cases_list:
grid_amount_data = {
'code': str(int(item['code'])),
'amount': '%.2f' % abs(item['sum_period']),
}
data_of_file += '\n\t\t\t<ns2:Amount GridNumber="%(code)s">%(amount)s</ns2:Amount''>' % (grid_amount_data)
data_of_file += '\n\t\t</ns2:Data>'
data_of_file += '\n\t\t<ns2:ClientListingNihil>%(client_nihil)s</ns2:ClientListingNihil>' % (file_data)
data_of_file += '\n\t\t<ns2:Ask Restitution="%(ask_restitution)s" Payment="%(ask_payment)s"/>' % (file_data)
data_of_file += '\n\t\t<ns2:Comment>%(comments)s</ns2:Comment>' % (file_data)
data_of_file += '\n\t</ns2:VATDeclaration> \n</ns2:VATConsignment>'
model_data_ids = mod_obj.search(cr, uid,[('model','=','ir.ui.view'),('name','=','view_vat_save')], context=context)
resource_id = mod_obj.read(cr, uid, model_data_ids, fields=['res_id'], context=context)[0]['res_id']
context = dict(context or {})
context['file_save'] = data_of_file
return {
'name': _('Save XML For Vat declaration'),
'context': context,
'view_type': 'form',
'view_mode': 'form',
'res_model': 'l1on_be.vat.declaration',
'views': [(resource_id,'form')],
'view_id': 'view_vat_save',
'type': 'ir.actions.act_window',
'target': 'new',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
hhaoyan/keras
|
keras/layers/noise.py
|
66
|
1791
|
from __future__ import absolute_import
import numpy as np
from .core import MaskedLayer
import theano
import theano.tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
class GaussianNoise(MaskedLayer):
'''
Corruption process with GaussianNoise
'''
def __init__(self, sigma):
super(GaussianNoise, self).__init__()
self.sigma = sigma
self.srng = RandomStreams(seed=np.random.randint(10e6))
def get_output(self, train=False):
X = self.get_input(train)
if not train or self.sigma == 0:
return X
else:
return X + self.srng.normal(size=X.shape, avg=0.0, std=self.sigma,
dtype=theano.config.floatX)
def get_config(self):
return {"name": self.__class__.__name__,
"sigma": self.sigma}
class GaussianDropout(MaskedLayer):
'''
Multiplicative Gaussian Noise
Reference:
Dropout: A Simple Way to Prevent Neural Networks from Overfitting
Srivastava, Hinton, et al. 2014
http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf
'''
def __init__(self, p):
super(GaussianDropout, self).__init__()
self.p = p
self.srng = RandomStreams(seed=np.random.randint(10e6))
def get_output(self, train):
X = self.get_input(train)
if train:
# self.p refers to drop probability rather than retain probability (as in paper) to match Dropout layer syntax
X *= self.srng.normal(size=X.shape, avg=1.0, std=T.sqrt(self.p / (1.0 - self.p)), dtype=theano.config.floatX)
return X
def get_config(self):
return {"name": self.__class__.__name__,
"p": self.p}
|
mit
|
cxxgtxy/tensorflow
|
tensorflow/contrib/tensor_forest/hybrid/python/models/forest_to_data_then_nn_test.py
|
159
|
3275
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the hybrid tensor forest model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
# pylint: disable=unused-import
from tensorflow.contrib.tensor_forest.hybrid.python.models import forest_to_data_then_nn
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.framework.ops import Operation
from tensorflow.python.framework.ops import Tensor
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import googletest
class ForestToDataThenNNTest(test_util.TensorFlowTestCase):
def setUp(self):
self.params = tensor_forest.ForestHParams(
num_classes=2,
num_features=31,
layer_size=11,
num_layers=13,
num_trees=3,
connection_probability=0.1,
hybrid_tree_depth=4,
regularization_strength=0.01,
regularization="",
base_random_seed=10,
feature_bagging_fraction=1.0,
learning_rate=0.01,
weight_init_mean=0.0,
weight_init_std=0.1)
self.params.regression = False
self.params.num_nodes = 2**self.params.hybrid_tree_depth - 1
self.params.num_leaves = 2**(self.params.hybrid_tree_depth - 1)
self.params.num_features_per_node = (self.params.feature_bagging_fraction *
self.params.num_features)
def testInferenceConstruction(self):
# pylint: disable=W0612
data = constant_op.constant(
[[random.uniform(-1, 1) for i in range(self.params.num_features)]
for _ in range(100)])
with variable_scope.variable_scope(
"ForestToDataThenNNTest_testInferenceContruction"):
graph_builder = forest_to_data_then_nn.ForestToDataThenNN(self.params)
graph = graph_builder.inference_graph(data, None)
self.assertTrue(isinstance(graph, Tensor))
def testTrainingConstruction(self):
# pylint: disable=W0612
data = constant_op.constant(
[[random.uniform(-1, 1) for i in range(self.params.num_features)]
for _ in range(100)])
labels = [1 for _ in range(100)]
with variable_scope.variable_scope(
"ForestToDataThenNNTest.testTrainingContruction"):
graph_builder = forest_to_data_then_nn.ForestToDataThenNN(self.params)
graph = graph_builder.training_graph(data, labels, None)
self.assertTrue(isinstance(graph, Operation))
if __name__ == "__main__":
googletest.main()
|
apache-2.0
|
ldoktor/autotest
|
client/fsdev_disks.py
|
2
|
20751
|
import sys, os, re
from autotest.client import utils, fsinfo, fsdev_mgr, partition
from autotest.client.shared import error
fd_mgr = fsdev_mgr.FsdevManager()
# For unmounting / formatting file systems we may have to use a device name
# that is different from the real device name that we have to use to set I/O
# scheduler tunables.
_DISKPART_FILE = '/proc/partitions'
def get_disk_list(std_mounts_only=True, get_all_disks=False):
"""
Get a list of dictionaries with information about disks on this system.
@param std_mounts_only: Whether the function should return only disks that
have a mount point defined (True) or even devices that doesn't
(False).
@param get_all_disks: Whether the function should return only partitioned
disks (False) or return every disk, regardless of being partitioned
or not (True).
@return: List of dictionaries with disk information (see more below).
The 'disk_list' array returned by get_disk_list() has an entry for each
disk drive we find on the box. Each of these entries is a map with the
following 3 string values:
'device' disk device name (i.e. the part after /dev/)
'mountpt' disk mount path
'tunable' disk name for setting scheduler tunables (/sys/block/sd??)
The last value is an integer that indicates the current mount status
of the drive:
'mounted' 0 = not currently mounted
1 = mounted r/w on the expected path
-1 = mounted readonly or at an unexpected path
When the 'std_mounts_only' argument is True we don't include drives
mounted on 'unusual' mount points in the result. If a given device is
partitioned, it will return all partitions that exist on it. If it's not,
it will return the device itself (ie, if there are /dev/sdb1 and /dev/sdb2,
those will be returned but not /dev/sdb. if there is only a /dev/sdc, that
one will be returned).
"""
# Get hold of the currently mounted file systems
mounts = utils.system_output('mount').splitlines()
# Grab all the interesting disk partition names from /proc/partitions,
# and build up the table of drives present in the system.
hd_list = []
# h for IDE drives, s for SATA/SCSI drives, v for Virtio drives
hd_regexp = re.compile("([hsv]d[a-z]+3)$")
partfile = open(_DISKPART_FILE)
for partline in partfile:
parts = partline.strip().split()
if len(parts) != 4 or partline.startswith('major'):
continue
# Get hold of the partition name
partname = parts[3]
if not get_all_disks:
# The partition name better end with a digit
# (get only partitioned disks)
if not partname[-1:].isdigit():
continue
# Process any site-specific filters on the partition name
if not fd_mgr.use_partition(partname):
continue
# We need to know the IDE/SATA/... device name for setting tunables
tunepath = fd_mgr.map_drive_name(partname)
# Check whether the device is mounted (and how)
mstat = 0
fstype = ''
fsopts = ''
fsmkfs = '?'
# Prepare the full device path for matching
chkdev = '/dev/' + partname
mountpt = None
for mln in mounts:
splt = mln.split()
# Typical 'mount' output line looks like this (indices
# for the split() result shown below):
#
# <device> on <mount_point> type <fstp> <options>
# 0 1 2 3 4 5
if splt[0].strip() == chkdev.strip():
# Make sure the mount point looks reasonable
mountpt = fd_mgr.check_mount_point(partname, splt[2])
if not mountpt:
mstat = -1
break
# Grab the file system type and mount options
fstype = splt[4]
fsopts = splt[5]
# Check for something other than a r/w mount
if fsopts[:3] != '(rw':
mstat = -1
break
# The drive is mounted at the 'normal' mount point
mstat = 1
# Does the caller only want to allow 'standard' mount points?
if std_mounts_only and mstat < 0:
continue
device_name = ''
if not get_all_disks:
# Was this partition mounted at all?
if not mountpt:
mountpt = fd_mgr.check_mount_point(partname, None)
# Ask the client where we should mount this partition
if not mountpt:
continue
else:
if partname[-1:].isdigit():
device_name = re.sub("\d", "", "/dev/%s" % partname)
if get_all_disks:
if not device_name:
continue
# Looks like we have a valid disk drive, add it to the list
hd_list.append({ 'device' : partname,
'mountpt': mountpt,
'tunable': tunepath,
'fs_type': fstype,
'fs_opts': fsopts,
'fs_mkfs': fsmkfs,
'mounted': mstat })
return hd_list
def mkfs_all_disks(job, disk_list, fs_type, fs_makeopt, fs_mnt_opt):
"""
Prepare all the drives in 'disk_list' for testing. For each disk this means
unmounting any mount points that use the disk, running mkfs with 'fs_type'
as the file system type and 'fs_makeopt' as the 'mkfs' options, and finally
remounting the freshly formatted drive using the flags in 'fs_mnt_opt'.
"""
for disk in disk_list:
# For now, ext4 isn't quite ready for prime time
if fs_type == "ext4":
fs_type = "ext4dev"
# Grab the device and mount paths for the drive
dev_path = os.path.join('/dev', disk["device"])
mnt_path = disk['mountpt']
# Create a file system instance
try:
fs = job.filesystem(device=dev_path, mountpoint=mnt_path)
except Exception:
raise Exception("Could not create a filesystem on '%s'" % dev_path)
# Make sure the volume is unmounted
if disk["mounted"]:
try:
fs.unmount(mnt_path)
except Exception, info:
raise Exception("umount failed: exception = %s, args = %s" %
(sys.exc_info()[0], info.args))
except Exception:
raise Exception("Could not unmount device ", dev_path)
# Is the drive already formatted with the right file system?
skip_mkfs = match_fs(disk, dev_path, fs_type, fs_makeopt)
# Next step is to create a fresh file system (if we need to)
try:
if not skip_mkfs:
fs.mkfs(fstype = fs_type, args = fs_makeopt)
except Exception:
raise Exception("Could not 'mkfs " + "-t " + fs_type + " " +
fs_makeopt + " " + dev_path + "'")
# Mount the drive with the appropriate FS options
try:
opts = ""
if fs_mnt_opt != "":
opts += " -o " + fs_mnt_opt
fs.mount(mountpoint = mnt_path, fstype = fs_type, args = opts)
except NameError, info:
raise Exception("mount name error: %s" % info)
except Exception, info:
raise Exception("mount failed: exception = %s, args = %s" %
(type(info), info.args))
# If we skipped mkfs we need to wipe the partition clean
if skip_mkfs:
fs.wipe()
# Record the new file system type and options in the disk list
disk["mounted"] = True
disk["fs_type"] = fs_type
disk["fs_mkfs"] = fs_makeopt
disk["fs_opts"] = fs_mnt_opt
# Try to wipe the file system slate clean
utils.drop_caches()
# XXX(gps): Remove this code once refactoring is complete to get rid of these
# nasty test description strings.
def _legacy_str_to_test_flags(fs_desc_string):
"""Convert a legacy FS_LIST string into a partition.FsOptions instance."""
match = re.search('(.*?)/(.*?)/(.*?)/(.*)$', fs_desc_string.strip())
if not match:
raise ValueError('unrecognized FS list entry %r' % fs_desc_string)
flags_obj = partition.FsOptions(fstype=match.group(1).strip(),
mkfs_flags=match.group(2).strip(),
mount_options=match.group(3).strip(),
fs_tag=match.group(4).strip())
return flags_obj
def prepare_disks(job, fs_desc, disk1_only=False, disk_list=None):
"""
Prepare drive(s) to contain the file system type / options given in the
description line 'fs_desc'. When 'disk_list' is not None, we prepare all
the drives in that list; otherwise we pick the first available data drive
(which is usually hdc3) and prepare just that one drive.
Args:
fs_desc: A partition.FsOptions instance describing the test -OR- a
legacy string describing the same in '/' separated format:
'fstype / mkfs opts / mount opts / short name'.
disk1_only: Boolean, defaults to False. If True, only test the first
disk.
disk_list: A list of disks to prepare. If None is given we default to
asking get_disk_list().
Returns:
(mount path of the first disk, short name of the test, list of disks)
OR (None, '', None) if no fs_desc was given.
"""
# Special case - do nothing if caller passes no description.
if not fs_desc:
return (None, '', None)
if not isinstance(fs_desc, partition.FsOptions):
fs_desc = _legacy_str_to_test_flags(fs_desc)
# If no disk list was given, we'll get it ourselves
if not disk_list:
disk_list = get_disk_list()
# Make sure we have the appropriate 'mkfs' binary for the file system
mkfs_bin = 'mkfs.' + fs_desc.filesystem
if fs_desc.filesystem == 'ext4':
mkfs_bin = 'mkfs.ext4dev'
try:
utils.system('which ' + mkfs_bin)
except Exception:
try:
mkfs_bin = os.path.join(job.toolsdir, mkfs_bin)
utils.system('cp -ufp %s /sbin' % mkfs_bin)
except Exception:
raise error.TestError('No mkfs binary available for ' +
fs_desc.filesystem)
# For 'ext4' we need to add '-E test_fs' to the mkfs options
if fs_desc.filesystem == 'ext4':
fs_desc.mkfs_flags += ' -E test_fs'
# If the caller only needs one drive, grab the first one only
if disk1_only:
disk_list = disk_list[0:1]
# We have all the info we need to format the drives
mkfs_all_disks(job, disk_list, fs_desc.filesystem,
fs_desc.mkfs_flags, fs_desc.mount_options)
# Return(mount path of the first disk, test tag value, disk_list)
return (disk_list[0]['mountpt'], fs_desc.fs_tag, disk_list)
def restore_disks(job, restore=False, disk_list=None):
"""
Restore ext2 on the drives in 'disk_list' if 'restore' is True; when
disk_list is None, we do nothing.
"""
if restore and disk_list is not None:
prepare_disks(job, 'ext2 / -q -i20480 -m1 / / restore_ext2',
disk1_only=False,
disk_list=disk_list)
def wipe_disks(job, disk_list):
"""
Wipe all of the drives in 'disk_list' using the 'wipe' functionality
in the filesystem class.
"""
for disk in disk_list:
partition.wipe_filesystem(job, disk['mountpt'])
def match_fs(disk, dev_path, fs_type, fs_makeopt):
"""
Matches the user provided fs_type and fs_makeopt with the current disk.
"""
if disk["fs_type"] != fs_type:
return False
elif disk["fs_mkfs"] == fs_makeopt:
# No need to mkfs the volume, we only need to remount it
return True
elif fsinfo.match_mkfs_option(fs_type, dev_path, fs_makeopt):
if disk["fs_mkfs"] != '?':
raise Exception("mkfs option strings differ but auto-detection"
" code thinks they're identical")
else:
return True
else:
return False
##############################################################################
# The following variables/methods are used to invoke fsdev in 'library' mode
FSDEV_JOB = None
FSDEV_FS_DESC = None
FSDEV_RESTORE = None
FSDEV_PREP_CNT = 0
FSDEV_DISK1_ONLY = None
FSDEV_DISKLIST = None
def use_fsdev_lib(fs_desc, disk1_only, reinit_disks):
"""
Called from the control file to indicate that fsdev is to be used.
"""
global FSDEV_FS_DESC
global FSDEV_RESTORE
global FSDEV_DISK1_ONLY
global FSDEV_PREP_CNT
# This is a bit tacky - we simply save the arguments in global variables
FSDEV_FS_DESC = fs_desc
FSDEV_DISK1_ONLY = disk1_only
FSDEV_RESTORE = reinit_disks
# We need to keep track how many times 'prepare' is called
FSDEV_PREP_CNT = 0
def prepare_fsdev(job):
"""
Called from the test file to get the necessary drive(s) ready; return
a pair of values: the absolute path to the first drive's mount point
plus the complete disk list (which is useful for tests that need to
use more than one drive).
"""
global FSDEV_JOB
global FSDEV_DISKLIST
global FSDEV_PREP_CNT
if not FSDEV_FS_DESC:
return (None, None)
# Avoid preparing the same thing more than once
FSDEV_PREP_CNT += 1
if FSDEV_PREP_CNT > 1:
return (FSDEV_DISKLIST[0]['mountpt'], FSDEV_DISKLIST)
FSDEV_JOB = job
(path, toss, disks) = prepare_disks(job, fs_desc = FSDEV_FS_DESC,
disk1_only = FSDEV_DISK1_ONLY,
disk_list = None)
FSDEV_DISKLIST = disks
return (path, disks)
def finish_fsdev(force_cleanup=False):
"""
This method can be called from the test file to optionally restore
all the drives used by the test to a standard ext2 format. Note that
if use_fsdev_lib() was invoked with 'reinit_disks' not set to True,
this method does nothing. Note also that only fsdev "server-side"
dynamic control files should ever set force_cleanup to True.
"""
if FSDEV_PREP_CNT == 1 or force_cleanup:
restore_disks(job = FSDEV_JOB,
restore = FSDEV_RESTORE,
disk_list = FSDEV_DISKLIST)
##############################################################################
class fsdev_disks:
"""
Disk drive handling class used for file system development
"""
def __init__(self, job):
self.job = job
# Some clients need to access the 'fsdev manager' instance directly
def get_fsdev_mgr(self):
return fd_mgr
def config_sched_tunables(self, desc_file):
# Parse the file that describes the scheduler tunables and their paths
self.tune_loc = eval(open(desc_file).read())
# Figure out what kernel we're running on
kver = utils.system_output('uname -r')
kver = re.match("([0-9]+\.[0-9]+\.[0-9]+).*", kver)
kver = kver.group(1)
# Make sure we know how to handle the kernel we're running on
tune_files = self.tune_loc[kver]
if tune_files is None:
raise Exception("Scheduler tunables not available for kernel " +
kver)
# Save the kernel version for later
self.kernel_ver = kver
# For now we always use 'anticipatory'
tune_paths = tune_files["anticipatory"]
# Create a dictionary out of the tunables array
self.tune_loc = {}
for tx in range(len(tune_paths)):
# Grab the next tunable path from the array
tpath = tune_paths[tx]
# Strip any leading directory names
tuner = tpath
while 1:
slash = tuner.find("/")
if slash < 0:
break
tuner = tuner[slash+1:]
# Add mapping to the dictionary
self.tune_loc[tuner] = tpath
def load_sched_tunable_values(self, val_file):
# Prepare the array of tunable values
self.tune_list = []
# Read the config parameters and find the values that match our kernel
for cfgline in open(val_file):
cfgline = cfgline.strip()
if len(cfgline) == 0:
continue
if cfgline.startswith("#"):
continue
if cfgline.startswith("tune[") == 0:
raise Exception("Config entry not recognized: " + cfgline)
endKV = cfgline.find("]:")
if endKV < 0:
raise Exception("Config entry missing closing bracket: "
+ cfgline)
if cfgline[5:endKV] != self.kernel_ver[0:endKV-5]:
continue
tune_parm = cfgline[endKV+2:].strip()
equal = tune_parm.find("=")
if equal < 1 or equal == len(tune_parm) - 1:
raise Exception("Config entry doesn't have 'parameter=value' :"
+ cfgline)
tune_name = tune_parm[:equal]
tune_val = tune_parm[equal+1:]
# See if we have a matching entry in the path dictionary
try:
tune_path = self.tune_loc[tune_name]
except Exception:
raise Exception("Unknown config entry: " + cfgline)
self.tune_list.append((tune_name, tune_path, tune_val))
def set_sched_tunables(self, disks):
"""
Given a list of disks in the format returned by get_disk_list() above,
set the I/O scheduler values on all the disks to the values loaded
earlier by load_sched_tunables().
"""
for dx in range(len(disks)):
disk = disks[dx]['tunable']
# Set the scheduler first before setting any other tunables
self.set_tunable(disk, "scheduler",
self.tune_loc["scheduler"],
"anticipatory")
# Now set all the tunable parameters we've been given
for tune_desc in self.tune_list:
self.set_tunable(disk, tune_desc[0],
tune_desc[1],
tune_desc[2])
def set_tunable(self, disk, name, path, val):
"""
Given a disk name, a path to a tunable value under _TUNE_PATH and the
new value for the parameter, set the value and verify that the value
has been successfully set.
"""
fpath = partition.get_iosched_path(disk, path)
# Things might go wrong so we'll catch exceptions
try:
step = "open tunable path"
tunef = open(fpath, 'w', buffering=-1)
step = "write new tunable value"
tunef.write(val)
step = "close the tunable path"
tunef.close()
step = "read back new tunable value"
nval = open(fpath, 'r', buffering=-1).read().strip()
# For 'scheduler' we need to fish out the bracketed value
if name == "scheduler":
nval = re.match(".*\[(.*)\].*", nval).group(1)
except IOError, info:
# Special case: for some reason 'max_sectors_kb' often doesn't work
# with large values; try '128' if we haven't tried it already.
if name == "max_sectors_kb" and info.errno == 22 and val != '128':
self.set_tunable(disk, name, path, '128')
return;
# Something went wrong, probably a 'config' problem of some kind
raise Exception("Unable to set tunable value '" + name +
"' at step '" + step + "': " + str(info))
except Exception:
# We should only ever see 'IOError' above, but just in case ...
raise Exception("Unable to set tunable value for " + name)
# Make sure the new value is what we expected
if nval != val:
raise Exception("Unable to correctly set tunable value for "
+ name +": desired " + val + ", but found " + nval)
return
|
gpl-2.0
|
chyeh727/django
|
django/db/__init__.py
|
376
|
2322
|
from django.core import signals
from django.db.utils import (
DEFAULT_DB_ALIAS, DJANGO_VERSION_PICKLE_KEY, ConnectionHandler,
ConnectionRouter, DatabaseError, DataError, Error, IntegrityError,
InterfaceError, InternalError, NotSupportedError, OperationalError,
ProgrammingError,
)
__all__ = [
'backend', 'connection', 'connections', 'router', 'DatabaseError',
'IntegrityError', 'InternalError', 'ProgrammingError', 'DataError',
'NotSupportedError', 'Error', 'InterfaceError', 'OperationalError',
'DEFAULT_DB_ALIAS', 'DJANGO_VERSION_PICKLE_KEY'
]
connections = ConnectionHandler()
router = ConnectionRouter()
# `connection`, `DatabaseError` and `IntegrityError` are convenient aliases
# for backend bits.
# DatabaseWrapper.__init__() takes a dictionary, not a settings module, so we
# manually create the dictionary from the settings, passing only the settings
# that the database backends care about.
# We load all these up for backwards compatibility, you should use
# connections['default'] instead.
class DefaultConnectionProxy(object):
"""
Proxy for accessing the default DatabaseWrapper object's attributes. If you
need to access the DatabaseWrapper object itself, use
connections[DEFAULT_DB_ALIAS] instead.
"""
def __getattr__(self, item):
return getattr(connections[DEFAULT_DB_ALIAS], item)
def __setattr__(self, name, value):
return setattr(connections[DEFAULT_DB_ALIAS], name, value)
def __delattr__(self, name):
return delattr(connections[DEFAULT_DB_ALIAS], name)
def __eq__(self, other):
return connections[DEFAULT_DB_ALIAS] == other
def __ne__(self, other):
return connections[DEFAULT_DB_ALIAS] != other
connection = DefaultConnectionProxy()
# Register an event to reset saved queries when a Django request is started.
def reset_queries(**kwargs):
for conn in connections.all():
conn.queries_log.clear()
signals.request_started.connect(reset_queries)
# Register an event to reset transaction state and close connections past
# their lifetime.
def close_old_connections(**kwargs):
for conn in connections.all():
conn.close_if_unusable_or_obsolete()
signals.request_started.connect(close_old_connections)
signals.request_finished.connect(close_old_connections)
|
bsd-3-clause
|
Ayrx/cryptography
|
src/_cffi_src/commoncrypto/common_digest.py
|
10
|
1574
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
INCLUDES = """
#include <CommonCrypto/CommonDigest.h>
"""
TYPES = """
typedef uint32_t CC_LONG;
typedef uint64_t CC_LONG64;
typedef struct CC_MD5state_st {
...;
} CC_MD5_CTX;
typedef struct CC_SHA1state_st {
...;
} CC_SHA1_CTX;
typedef struct CC_SHA256state_st {
...;
} CC_SHA256_CTX;
typedef struct CC_SHA512state_st {
...;
} CC_SHA512_CTX;
"""
FUNCTIONS = """
int CC_MD5_Init(CC_MD5_CTX *);
int CC_MD5_Update(CC_MD5_CTX *, const void *, CC_LONG);
int CC_MD5_Final(unsigned char *, CC_MD5_CTX *);
int CC_SHA1_Init(CC_SHA1_CTX *);
int CC_SHA1_Update(CC_SHA1_CTX *, const void *, CC_LONG);
int CC_SHA1_Final(unsigned char *, CC_SHA1_CTX *);
int CC_SHA224_Init(CC_SHA256_CTX *);
int CC_SHA224_Update(CC_SHA256_CTX *, const void *, CC_LONG);
int CC_SHA224_Final(unsigned char *, CC_SHA256_CTX *);
int CC_SHA256_Init(CC_SHA256_CTX *);
int CC_SHA256_Update(CC_SHA256_CTX *, const void *, CC_LONG);
int CC_SHA256_Final(unsigned char *, CC_SHA256_CTX *);
int CC_SHA384_Init(CC_SHA512_CTX *);
int CC_SHA384_Update(CC_SHA512_CTX *, const void *, CC_LONG);
int CC_SHA384_Final(unsigned char *, CC_SHA512_CTX *);
int CC_SHA512_Init(CC_SHA512_CTX *);
int CC_SHA512_Update(CC_SHA512_CTX *, const void *, CC_LONG);
int CC_SHA512_Final(unsigned char *, CC_SHA512_CTX *);
"""
MACROS = """
"""
CUSTOMIZATIONS = """
"""
|
bsd-3-clause
|
telwertowski/QGIS
|
tests/src/python/test_authmanager_pki_ows.py
|
15
|
7719
|
# -*- coding: utf-8 -*-
"""
Tests for auth manager WMS/WFS using QGIS Server through PKI
enabled qgis_wrapped_server.py.
This is an integration test for QGIS Desktop Auth Manager WFS and WMS provider
and QGIS Server WFS/WMS that check if QGIS can use a stored auth manager auth
configuration to access an HTTP Basic protected endpoint.
From build dir, run: ctest -R PyQgsAuthManagerPKIOWSTest -V
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
import os
import sys
import re
import subprocess
import tempfile
import urllib
import stat
__author__ = 'Alessandro Pasotti'
__date__ = '25/10/2016'
__copyright__ = 'Copyright 2016, The QGIS Project'
from shutil import rmtree
from utilities import unitTestDataPath, waitServer
from qgis.core import (
QgsApplication,
QgsAuthManager,
QgsAuthMethodConfig,
QgsVectorLayer,
QgsRasterLayer,
)
from qgis.PyQt.QtNetwork import QSslCertificate
from qgis.testing import (
start_app,
unittest,
)
try:
QGIS_SERVER_ENDPOINT_PORT = os.environ['QGIS_SERVER_ENDPOINT_PORT']
except:
QGIS_SERVER_ENDPOINT_PORT = '0' # Auto
QGIS_AUTH_DB_DIR_PATH = tempfile.mkdtemp()
os.environ['QGIS_AUTH_DB_DIR_PATH'] = QGIS_AUTH_DB_DIR_PATH
qgis_app = start_app()
class TestAuthManager(unittest.TestCase):
@classmethod
def setUpAuth(cls):
"""Run before all tests and set up authentication"""
authm = QgsApplication.authManager()
assert (authm.setMasterPassword('masterpassword', True))
cls.sslrootcert_path = os.path.join(cls.certsdata_path, 'chains_subissuer-issuer-root_issuer2-root2.pem')
cls.sslcert = os.path.join(cls.certsdata_path, 'gerardus_cert.pem')
cls.sslkey = os.path.join(cls.certsdata_path, 'gerardus_key.pem')
assert os.path.isfile(cls.sslcert)
assert os.path.isfile(cls.sslkey)
assert os.path.isfile(cls.sslrootcert_path)
os.chmod(cls.sslcert, stat.S_IRUSR)
os.chmod(cls.sslkey, stat.S_IRUSR)
os.chmod(cls.sslrootcert_path, stat.S_IRUSR)
cls.auth_config = QgsAuthMethodConfig("PKI-Paths")
cls.auth_config.setConfig('certpath', cls.sslcert)
cls.auth_config.setConfig('keypath', cls.sslkey)
cls.auth_config.setName('test_pki_auth_config')
cls.username = 'Gerardus'
cls.sslrootcert = QSslCertificate.fromPath(cls.sslrootcert_path)
assert cls.sslrootcert is not None
authm.storeCertAuthorities(cls.sslrootcert)
authm.rebuildCaCertsCache()
authm.rebuildTrustedCaCertsCache()
assert (authm.storeAuthenticationConfig(cls.auth_config)[0])
assert cls.auth_config.isValid()
# cls.server_cert = os.path.join(cls.certsdata_path, 'localhost_ssl_cert.pem')
cls.server_cert = os.path.join(cls.certsdata_path, '127_0_0_1_ssl_cert.pem')
# cls.server_key = os.path.join(cls.certsdata_path, 'localhost_ssl_key.pem')
cls.server_key = os.path.join(cls.certsdata_path, '127_0_0_1_ssl_key.pem')
cls.server_rootcert = cls.sslrootcert_path
os.chmod(cls.server_cert, stat.S_IRUSR)
os.chmod(cls.server_key, stat.S_IRUSR)
os.chmod(cls.server_rootcert, stat.S_IRUSR)
os.environ['QGIS_SERVER_HOST'] = cls.hostname
os.environ['QGIS_SERVER_PORT'] = str(cls.port)
os.environ['QGIS_SERVER_PKI_KEY'] = cls.server_key
os.environ['QGIS_SERVER_PKI_CERTIFICATE'] = cls.server_cert
os.environ['QGIS_SERVER_PKI_USERNAME'] = cls.username
os.environ['QGIS_SERVER_PKI_AUTHORITY'] = cls.server_rootcert
@classmethod
def setUpClass(cls):
"""Run before all tests:
Creates an auth configuration"""
cls.port = QGIS_SERVER_ENDPOINT_PORT
# Clean env just to be sure
env_vars = ['QUERY_STRING', 'QGIS_PROJECT_FILE']
for ev in env_vars:
try:
del os.environ[ev]
except KeyError:
pass
cls.testdata_path = unitTestDataPath('qgis_server')
cls.certsdata_path = os.path.join(unitTestDataPath('auth_system'), 'certs_keys')
cls.project_path = os.path.join(cls.testdata_path, "test_project.qgs")
# cls.hostname = 'localhost'
cls.protocol = 'https'
cls.hostname = '127.0.0.1'
cls.setUpAuth()
server_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'qgis_wrapped_server.py')
cls.server = subprocess.Popen([sys.executable, server_path],
env=os.environ, stdout=subprocess.PIPE)
line = cls.server.stdout.readline()
cls.port = int(re.findall(b':(\d+)', line)[0])
assert cls.port != 0
# Wait for the server process to start
assert waitServer('%s://%s:%s' % (cls.protocol, cls.hostname, cls.port)), "Server is not responding! %s://%s:%s" % (cls.protocol, cls.hostname, cls.port)
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
cls.server.terminate()
rmtree(QGIS_AUTH_DB_DIR_PATH)
del cls.server
def setUp(self):
"""Run before each test."""
pass
def tearDown(self):
"""Run after each test."""
pass
@classmethod
def _getWFSLayer(cls, type_name, layer_name=None, authcfg=None):
"""
WFS layer factory
"""
if layer_name is None:
layer_name = 'wfs_' + type_name
parms = {
'srsname': 'EPSG:4326',
'typename': type_name,
'url': '%s://%s:%s/?map=%s' % (cls.protocol, cls.hostname, cls.port, cls.project_path),
'version': 'auto',
'table': '',
}
if authcfg is not None:
parms.update({'authcfg': authcfg})
uri = ' '.join([("%s='%s'" % (k, v)) for k, v in list(parms.items())])
wfs_layer = QgsVectorLayer(uri, layer_name, 'WFS')
return wfs_layer
@classmethod
def _getWMSLayer(cls, layers, layer_name=None, authcfg=None):
"""
WMS layer factory
"""
if layer_name is None:
layer_name = 'wms_' + layers.replace(',', '')
parms = {
'crs': 'EPSG:4326',
'url': '%s://%s:%s/?map=%s' % (cls.protocol, cls.hostname, cls.port, cls.project_path),
'format': 'image/png',
# This is needed because of a really weird implementation in QGIS Server, that
# replaces _ in the the real layer name with spaces
'layers': urllib.parse.quote(layers.replace('_', ' ')),
'styles': '',
'version': 'auto',
# 'sql': '',
}
if authcfg is not None:
parms.update({'authcfg': authcfg})
uri = '&'.join([("%s=%s" % (k, v.replace('=', '%3D'))) for k, v in list(parms.items())])
wms_layer = QgsRasterLayer(uri, layer_name, 'wms')
return wms_layer
def testValidAuthAccess(self):
"""
Access the protected layer with valid credentials
Note: cannot test invalid access in a separate test because
it would fail the subsequent (valid) calls due to cached connections
"""
wfs_layer = self._getWFSLayer('testlayer_èé', authcfg=self.auth_config.id())
self.assertTrue(wfs_layer.isValid())
wms_layer = self._getWMSLayer('testlayer_èé', authcfg=self.auth_config.id())
self.assertTrue(wms_layer.isValid())
if __name__ == '__main__':
unittest.main()
|
gpl-2.0
|
abhishekgahlot/flask
|
scripts/flaskext_compat.py
|
6
|
5023
|
# -*- coding: utf-8 -*-
"""
flaskext_compat
~~~~~~~~~~~~~~~
Implements the ``flask.ext`` virtual package for versions of Flask
older than 0.7. This module is a noop if Flask 0.8 was detected.
Usage::
import flaskext_compat
flaskext_compat.activate()
from flask.ext import foo
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import sys
import os
import imp
class ExtensionImporter(object):
"""This importer redirects imports from this submodule to other locations.
This makes it possible to transition from the old flaskext.name to the
newer flask_name without people having a hard time.
"""
def __init__(self, module_choices, wrapper_module):
self.module_choices = module_choices
self.wrapper_module = wrapper_module
self.prefix = wrapper_module + '.'
self.prefix_cutoff = wrapper_module.count('.') + 1
def __eq__(self, other):
return self.__class__.__module__ == other.__class__.__module__ and \
self.__class__.__name__ == other.__class__.__name__ and \
self.wrapper_module == other.wrapper_module and \
self.module_choices == other.module_choices
def __ne__(self, other):
return not self.__eq__(other)
def install(self):
sys.meta_path[:] = [x for x in sys.meta_path if self != x] + [self]
def find_module(self, fullname):
if fullname.startswith(self.prefix):
return self
def load_module(self, fullname):
if fullname in sys.modules:
return sys.modules[fullname]
modname = fullname.split('.', self.prefix_cutoff)[self.prefix_cutoff]
for path in self.module_choices:
realname = path % modname
try:
__import__(realname)
except ImportError:
exc_type, exc_value, tb = sys.exc_info()
# since we only establish the entry in sys.modules at the
# end this seems to be redundant, but if recursive imports
# happen we will call into the move import a second time.
# On the second invocation we still don't have an entry for
# fullname in sys.modules, but we will end up with the same
# fake module name and that import will succeed since this
# one already has a temporary entry in the modules dict.
# Since this one "succeeded" temporarily that second
# invocation now will have created a fullname entry in
# sys.modules which we have to kill.
sys.modules.pop(fullname, None)
# If it's an important traceback we reraise it, otherwise
# we swallow it and try the next choice. The skipped frame
# is the one from __import__ above which we don't care about.
if self.is_important_traceback(realname, tb):
raise exc_type, exc_value, tb.tb_next
continue
module = sys.modules[fullname] = sys.modules[realname]
if '.' not in modname:
setattr(sys.modules[self.wrapper_module], modname, module)
return module
raise ImportError('No module named %s' % fullname)
def is_important_traceback(self, important_module, tb):
"""Walks a traceback's frames and checks if any of the frames
originated in the given important module. If that is the case then we
were able to import the module itself but apparently something went
wrong when the module was imported. (Eg: import of an import failed).
"""
while tb is not None:
if self.is_important_frame(important_module, tb):
return True
tb = tb.tb_next
return False
def is_important_frame(self, important_module, tb):
"""Checks a single frame if it's important."""
g = tb.tb_frame.f_globals
if '__name__' not in g:
return False
module_name = g['__name__']
# Python 2.7 Behavior. Modules are cleaned up late so the
# name shows up properly here. Success!
if module_name == important_module:
return True
# Some python versions will clean up modules so early that the
# module name at that point is no longer set. Try guessing from
# the filename then.
filename = os.path.abspath(tb.tb_frame.f_code.co_filename)
test_string = os.path.sep + important_module.replace('.', os.path.sep)
return test_string + '.py' in filename or \
test_string + os.path.sep + '__init__.py' in filename
def activate():
import flask
ext_module = imp.new_module('flask.ext')
ext_module.__path__ = []
flask.ext = sys.modules['flask.ext'] = ext_module
importer = ExtensionImporter(['flask_%s', 'flaskext.%s'], 'flask.ext')
importer.install()
|
bsd-3-clause
|
blueboxgroup/nova
|
doc/source/conf.py
|
13
|
9460
|
# -*- coding: utf-8 -*-
#
# nova documentation build configuration file, created by
# sphinx-quickstart on Sat May 1 15:17:47 2010.
#
# This file is execfile()d with the current directory set to
# its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('./'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'ext.nova_todo',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.ifconfig',
'sphinx.ext.graphviz',
'oslosphinx',
"ext.support_matrix",
]
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
# Changing the path so that the Hudson build output contains GA code
# and the source docs do not contain the code so local, offline sphinx builds
# are "clean."
templates_path = []
if os.getenv('HUDSON_PUBLISH_DOCS'):
templates_path = ['_ga', '_templates']
else:
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'nova'
copyright = u'2010-present, OpenStack Foundation'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
from nova.version import version_info
# The full version, including alpha/beta/rc tags.
release = version_info.release_string()
# The short X.Y version.
version = version_info.version_string()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
unused_docs = [
'api_ext/rst_extension_template',
'vmwareapi_readme',
'installer',
]
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use
# for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['nova.']
# -- Options for man page output ----------------------------------------------
# Grouping the document tree for man pages.
# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
man_pages = [
('man/nova-all', 'nova-all', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-api-ec2', 'nova-api-ec2', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-api-metadata', 'nova-api-metadata', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-api-os-compute', 'nova-api-os-compute',
u'Cloud controller fabric', [u'OpenStack'], 1),
('man/nova-api', 'nova-api', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-cert', 'nova-cert', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-compute', 'nova-compute', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-console', 'nova-console', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-consoleauth', 'nova-consoleauth', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-dhcpbridge', 'nova-dhcpbridge', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-manage', 'nova-manage', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-network', 'nova-network', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-novncproxy', 'nova-novncproxy', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-spicehtml5proxy', 'nova-spicehtml5proxy', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-serialproxy', 'nova-serialproxy', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-objectstore', 'nova-objectstore', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-rootwrap', 'nova-rootwrap', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-scheduler', 'nova-scheduler', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-xvpvncproxy', 'nova-xvpvncproxy', u'Cloud controller fabric',
[u'OpenStack'], 1),
('man/nova-conductor', 'nova-conductor', u'Cloud controller fabric',
[u'OpenStack'], 1),
]
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
git_cmd = "git log --pretty=format:'%ad, commit %h' --date=local -n1"
html_last_updated_fmt = os.popen(git_cmd).read()
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'novadoc'
# -- Options for LaTeX output -------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'Nova.tex', u'Nova Documentation',
u'OpenStack Foundation', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
apache-2.0
|
funkyfuture/inxs
|
docs/conf.py
|
1
|
8439
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import inxs
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.5'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx',
'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'inxs'
copyright = u"2017, Frank Sachsenheim"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = inxs.__version__
# The full version, including alpha/beta/rc tags.
release = inxs.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for intersphinx extension -----------------------------------------
intersphinx_mapping = {'python': ('https://docs.python.org/', None),
'delb': ('https://delb.readthedocs.io/en/latest/', None)}
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if not os.getenv('READTHEDOCS', False):
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
else:
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'inxsdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'inxs.tex',
u'inxs Documentation',
u'Frank Sachsenheim', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'inxs',
u'inxs Documentation',
[u'Frank Sachsenheim'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'inxs',
u'inxs Documentation',
u'Frank Sachsenheim',
'inxs',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for doctest -----------------------------------------------
doctest_global_setup = """
import operator
from inxs import *
"""
# -- Options for linkcheck ---------------------------------------------
linkcheck_anchors_ignore = ['^fork-destination-box$']
|
agpl-3.0
|
VeritasOS/cloud-custodian
|
c7n/executor.py
|
2
|
2908
|
# Copyright 2016 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from concurrent.futures import (
ProcessPoolExecutor, ThreadPoolExecutor)
from c7n.registry import PluginRegistry
import threading
class ExecutorRegistry(PluginRegistry):
def __init__(self, plugin_type):
super(ExecutorRegistry, self).__init__(plugin_type)
self.register('process', ProcessPoolExecutor)
self.register('thread', ThreadPoolExecutor)
self.register('main', MainThreadExecutor)
def executor(name, **kw):
factory = executors.get(name)
# post element refactoring
# factory.validate(kw)
if factory is None:
raise ValueError("No Such Executor %s" % name)
return factory(**kw)
class MainThreadExecutor(object):
""" For running tests.
async == True -> catch exceptions and store them in the future.
async == False -> let exceptions bubble up.
"""
async = True
# For Dev/Unit Testing with concurrent.futures
def __init__(self, *args, **kw):
self.args = args
self.kw = kw
def map(self, func, iterable):
for args in iterable:
yield func(args)
def submit(self, func, *args, **kw):
try:
return MainThreadFuture(func(*args, **kw))
except Exception as e:
if self.async:
return MainThreadFuture(None, exception=e)
raise
def __enter__(self):
return self
def __exit__(self, *args):
return False
class MainThreadFuture(object):
# For Dev/Unit Testing with concurrent.futures
def __init__(self, value, exception=None):
self.value = value
self._exception = exception
# Sigh concurrent.futures pokes at privates
self._state = 'FINISHED'
self._waiters = []
self._condition = threading.Condition()
def cancel(self):
return False
def cancelled(self):
return False
def exception(self):
return self._exception
def done(self):
return True
def result(self, timeout=None):
if self._exception:
raise self._exception
return self.value
def add_done_callback(self, fn):
return fn(self)
executors = ExecutorRegistry('executor')
executors.load_plugins()
|
apache-2.0
|
SatoshiNXSimudrone/sl4a-damon-clone
|
python-build/python-libs/gdata/samples/oauth/oauth_on_appengine/appengine_utilities/cron.py
|
129
|
18386
|
"""
Copyright (c) 2008, appengine-utilities project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
- Neither the name of the appengine-utilities project nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os
import cgi
import re
import datetime
import pickle
from google.appengine.ext import db
from google.appengine.api import urlfetch
from google.appengine.api import memcache
APPLICATION_PORT = '8080'
CRON_PORT = '8081'
class _AppEngineUtilities_Cron(db.Model):
"""
Model for the tasks in the datastore. This contains the scheduling and
url information, as well as a field that sets the next time the instance
should run.
"""
cron_entry = db.StringProperty()
next_run = db.DateTimeProperty()
cron_compiled = db.BlobProperty()
url = db.LinkProperty()
class Cron(object):
"""
Cron is a scheduling utility built for appengine, modeled after
crontab for unix systems. While true scheduled tasks are not
possible within the Appengine environment currently, this
is an attmempt to provide a request based alternate. You
configure the tasks in an included interface, and the import
the class on any request you want capable of running tasks.
On each request where Cron is imported, the list of tasks
that need to be run will be pulled and run. A task is a url
within your application. It's important to make sure that these
requests fun quickly, or you could risk timing out the actual
request.
See the documentation for more information on configuring
your application to support Cron and setting up tasks.
"""
def __init__(self):
# Check if any tasks need to be run
query = _AppEngineUtilities_Cron.all()
query.filter('next_run <= ', datetime.datetime.now())
results = query.fetch(1000)
if len(results) > 0:
one_second = datetime.timedelta(seconds = 1)
before = datetime.datetime.now()
for r in results:
if re.search(':' + APPLICATION_PORT, r.url):
r.url = re.sub(':' + APPLICATION_PORT, ':' + CRON_PORT, r.url)
#result = urlfetch.fetch(r.url)
diff = datetime.datetime.now() - before
if int(diff.seconds) < 1:
if memcache.add(str(r.key), "running"):
result = urlfetch.fetch(r.url)
r.next_run = self._get_next_run(pickle.loads(r.cron_compiled))
r.put()
memcache.delete(str(r.key))
else:
break
def add_cron(self, cron_string):
cron = cron_string.split(" ")
if len(cron) is not 6:
raise ValueError, 'Invalid cron string. Format: * * * * * url'
cron = {
'min': cron[0],
'hour': cron[1],
'day': cron[2],
'mon': cron[3],
'dow': cron[4],
'url': cron[5],
}
cron_compiled = self._validate_cron(cron)
next_run = self._get_next_run(cron_compiled)
cron_entry = _AppEngineUtilities_Cron()
cron_entry.cron_entry = cron_string
cron_entry.next_run = next_run
cron_entry.cron_compiled = pickle.dumps(cron_compiled)
cron_entry.url = cron["url"]
cron_entry.put()
def _validate_cron(self, cron):
"""
Parse the field to determine whether it is an integer or lists,
also converting strings to integers where necessary. If passed bad
values, raises a ValueError.
"""
parsers = {
'dow': self._validate_dow,
'mon': self._validate_mon,
'day': self._validate_day,
'hour': self._validate_hour,
'min': self._validate_min,
'url': self. _validate_url,
}
for el in cron:
parse = parsers[el]
cron[el] = parse(cron[el])
return cron
def _validate_type(self, v, t):
"""
Validates that the number (v) passed is in the correct range for the
type (t). Raise ValueError, if validation fails.
Valid ranges:
day of week = 0-7
month = 1-12
day = 1-31
hour = 0-23
minute = 0-59
All can * which will then return the range for that entire type.
"""
if t == "dow":
if v >= 0 and v <= 7:
return [v]
elif v == "*":
return "*"
else:
raise ValueError, "Invalid day of week."
elif t == "mon":
if v >= 1 and v <= 12:
return [v]
elif v == "*":
return range(1, 12)
else:
raise ValueError, "Invalid month."
elif t == "day":
if v >= 1 and v <= 31:
return [v]
elif v == "*":
return range(1, 31)
else:
raise ValueError, "Invalid day."
elif t == "hour":
if v >= 0 and v <= 23:
return [v]
elif v == "*":
return range(0, 23)
else:
raise ValueError, "Invalid hour."
elif t == "min":
if v >= 0 and v <= 59:
return [v]
elif v == "*":
return range(0, 59)
else:
raise ValueError, "Invalid minute."
def _validate_list(self, l, t):
"""
Validates a crontab list. Lists are numerical values seperated
by a comma with no spaces. Ex: 0,5,10,15
Arguments:
l: comma seperated list of numbers
t: type used for validation, valid values are
dow, mon, day, hour, min
"""
elements = l.split(",")
return_list = []
# we have a list, validate all of them
for e in elements:
if "-" in e:
return_list.extend(self._validate_range(e, t))
else:
try:
v = int(e)
self._validate_type(v, t)
return_list.append(v)
except:
raise ValueError, "Names are not allowed in lists."
# return a list of integers
return return_list
def _validate_range(self, r, t):
"""
Validates a crontab range. Ranges are 2 numerical values seperated
by a dash with no spaces. Ex: 0-10
Arguments:
r: dash seperated list of 2 numbers
t: type used for validation, valid values are
dow, mon, day, hour, min
"""
elements = r.split('-')
# a range should be 2 elements
if len(elements) is not 2:
raise ValueError, "Invalid range passed: " + str(r)
# validate the minimum and maximum are valid for the type
for e in elements:
self._validate_type(int(e), t)
# return a list of the numbers in the range.
# +1 makes sure the end point is included in the return value
return range(int(elements[0]), int(elements[1]) + 1)
def _validate_step(self, s, t):
"""
Validates a crontab step. Steps are complicated. They can
be based on a range 1-10/2 or just step through all valid
*/2. When parsing times you should always check for step first
and see if it has a range or not, before checking for ranges because
this will handle steps of ranges returning the final list. Steps
of lists is not supported.
Arguments:
s: slash seperated string
t: type used for validation, valid values are
dow, mon, day, hour, min
"""
elements = s.split('/')
# a range should be 2 elements
if len(elements) is not 2:
raise ValueError, "Invalid step passed: " + str(s)
try:
step = int(elements[1])
except:
raise ValueError, "Invalid step provided " + str(s)
r_list = []
# if the first element is *, use all valid numbers
if elements[0] is "*" or elements[0] is "":
r_list.extend(self._validate_type('*', t))
# check and see if there is a list of ranges
elif "," in elements[0]:
ranges = elements[0].split(",")
for r in ranges:
# if it's a range, we need to manage that
if "-" in r:
r_list.extend(self._validate_range(r, t))
else:
try:
r_list.extend(int(r))
except:
raise ValueError, "Invalid step provided " + str(s)
elif "-" in elements[0]:
r_list.extend(self._validate_range(elements[0], t))
return range(r_list[0], r_list[-1] + 1, step)
def _validate_dow(self, dow):
"""
"""
# if dow is * return it. This is for date parsing where * does not mean
# every day for crontab entries.
if dow is "*":
return dow
days = {
'mon': 1,
'tue': 2,
'wed': 3,
'thu': 4,
'fri': 5,
'sat': 6,
# per man crontab sunday can be 0 or 7.
'sun': [0, 7],
}
if dow in days:
dow = days[dow]
return [dow]
# if dow is * return it. This is for date parsing where * does not mean
# every day for crontab entries.
elif dow is "*":
return dow
elif "/" in dow:
return(self._validate_step(dow, "dow"))
elif "," in dow:
return(self._validate_list(dow, "dow"))
elif "-" in dow:
return(self._validate_range(dow, "dow"))
else:
valid_numbers = range(0, 8)
if not int(dow) in valid_numbers:
raise ValueError, "Invalid day of week " + str(dow)
else:
return [int(dow)]
def _validate_mon(self, mon):
months = {
'jan': 1,
'feb': 2,
'mar': 3,
'apr': 4,
'may': 5,
'jun': 6,
'jul': 7,
'aug': 8,
'sep': 9,
'oct': 10,
'nov': 11,
'dec': 12,
}
if mon in months:
mon = months[mon]
return [mon]
elif mon is "*":
return range(1, 13)
elif "/" in mon:
return(self._validate_step(mon, "mon"))
elif "," in mon:
return(self._validate_list(mon, "mon"))
elif "-" in mon:
return(self._validate_range(mon, "mon"))
else:
valid_numbers = range(1, 13)
if not int(mon) in valid_numbers:
raise ValueError, "Invalid month " + str(mon)
else:
return [int(mon)]
def _validate_day(self, day):
if day is "*":
return range(1, 32)
elif "/" in day:
return(self._validate_step(day, "day"))
elif "," in day:
return(self._validate_list(day, "day"))
elif "-" in day:
return(self._validate_range(day, "day"))
else:
valid_numbers = range(1, 31)
if not int(day) in valid_numbers:
raise ValueError, "Invalid day " + str(day)
else:
return [int(day)]
def _validate_hour(self, hour):
if hour is "*":
return range(0, 24)
elif "/" in hour:
return(self._validate_step(hour, "hour"))
elif "," in hour:
return(self._validate_list(hour, "hour"))
elif "-" in hour:
return(self._validate_range(hour, "hour"))
else:
valid_numbers = range(0, 23)
if not int(hour) in valid_numbers:
raise ValueError, "Invalid hour " + str(hour)
else:
return [int(hour)]
def _validate_min(self, min):
if min is "*":
return range(0, 60)
elif "/" in min:
return(self._validate_step(min, "min"))
elif "," in min:
return(self._validate_list(min, "min"))
elif "-" in min:
return(self._validate_range(min, "min"))
else:
valid_numbers = range(0, 59)
if not int(min) in valid_numbers:
raise ValueError, "Invalid min " + str(min)
else:
return [int(min)]
def _validate_url(self, url):
# kludge for issue 842, right now we use request headers
# to set the host.
if url[0] is not "/":
url = "/" + url
url = 'http://' + str(os.environ['HTTP_HOST']) + url
return url
# content below is for when that issue gets fixed
#regex = re.compile("^(http|https):\/\/([a-z0-9-]\.+)*", re.IGNORECASE)
#if regex.match(url) is not None:
# return url
#else:
# raise ValueError, "Invalid url " + url
def _calc_month(self, next_run, cron):
while True:
if cron["mon"][-1] < next_run.month:
next_run = next_run.replace(year=next_run.year+1, \
month=cron["mon"][0], \
day=1,hour=0,minute=0)
else:
if next_run.month in cron["mon"]:
return next_run
else:
one_month = datetime.timedelta(months=1)
next_run = next_run + one_month
def _calc_day(self, next_run, cron):
# start with dow as per cron if dow and day are set
# then dow is used if it comes before day. If dow
# is *, then ignore it.
if str(cron["dow"]) != str("*"):
# convert any integers to lists in order to easily compare values
m = next_run.month
while True:
if next_run.month is not m:
next_run = next_run.replace(hour=0, minute=0)
next_run = self._calc_month(next_run, cron)
if next_run.weekday() in cron["dow"] or next_run.day in cron["day"]:
return next_run
else:
one_day = datetime.timedelta(days=1)
next_run = next_run + one_day
else:
m = next_run.month
while True:
if next_run.month is not m:
next_run = next_run.replace(hour=0, minute=0)
next_run = self._calc_month(next_run, cron)
# if cron["dow"] is next_run.weekday() or cron["day"] is next_run.day:
if next_run.day in cron["day"]:
return next_run
else:
one_day = datetime.timedelta(days=1)
next_run = next_run + one_day
def _calc_hour(self, next_run, cron):
m = next_run.month
d = next_run.day
while True:
if next_run.month is not m:
next_run = next_run.replace(hour=0, minute=0)
next_run = self._calc_month(next_run, cron)
if next_run.day is not d:
next_run = next_run.replace(hour=0)
next_run = self._calc_day(next_run, cron)
if next_run.hour in cron["hour"]:
return next_run
else:
m = next_run.month
d = next_run.day
one_hour = datetime.timedelta(hours=1)
next_run = next_run + one_hour
def _calc_minute(self, next_run, cron):
one_minute = datetime.timedelta(minutes=1)
m = next_run.month
d = next_run.day
h = next_run.hour
while True:
if next_run.month is not m:
next_run = next_run.replace(minute=0)
next_run = self._calc_month(next_run, cron)
if next_run.day is not d:
next_run = next_run.replace(minute=0)
next_run = self._calc_day(next_run, cron)
if next_run.hour is not h:
next_run = next_run.replace(minute=0)
next_run = self._calc_day(next_run, cron)
if next_run.minute in cron["min"]:
return next_run
else:
m = next_run.month
d = next_run.day
h = next_run.hour
next_run = next_run + one_minute
def _get_next_run(self, cron):
one_minute = datetime.timedelta(minutes=1)
# go up 1 minute because it shouldn't happen right when added
now = datetime.datetime.now() + one_minute
next_run = now.replace(second=0, microsecond=0)
# start with month, which will also help calculate year
next_run = self._calc_month(next_run, cron)
next_run = self._calc_day(next_run, cron)
next_run = self._calc_hour(next_run, cron)
next_run = self._calc_minute(next_run, cron)
return next_run
|
apache-2.0
|
akshaynathr/mailman
|
src/mailman/commands/eml_help.py
|
3
|
3002
|
# Copyright (C) 2012 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""The email command 'help'."""
from __future__ import absolute_import, print_function, unicode_literals
__metaclass__ = type
__all__ = [
'Help',
]
from zope.interface import implementer
from mailman.config import config
from mailman.core.i18n import _
from mailman.interfaces.command import ContinueProcessing, IEmailCommand
from mailman.utilities.string import wrap
SPACE = ' '
@implementer(IEmailCommand)
class Help:
"""The email 'help' command."""
name = 'help'
argument_description = '[command]'
description = _('Get help about available email commands.')
short_description = description
def process(self, mlist, msg, msgdata, arguments, results):
"""See `IEmailCommand`."""
# With no argument, print the command and a short description, which
# is contained in the short_description attribute.
if len(arguments) == 0:
length = max(len(command) for command in config.commands)
format = '{{0: <{0}s}} - {{1}}'.format(length)
for command_name in sorted(config.commands):
command = config.commands[command_name]
short_description = getattr(
command, 'short_description', _('n/a'))
print(format.format(command.name, short_description),
file=results)
return ContinueProcessing.yes
elif len(arguments) == 1:
command_name = arguments[0]
command = config.commands.get(command_name)
if command is None:
print(_('$self.name: no such command: $command_name'),
file=results)
return ContinueProcessing.no
print('{0} {1}'.format(command.name, command.argument_description),
file=results)
print(command.short_description, file=results)
if command.short_description != command.description:
print(wrap(command.description), file=results)
return ContinueProcessing.yes
else:
printable_arguments = SPACE.join(arguments)
print(_('$self.name: too many arguments: $printable_arguments'),
file=results)
return ContinueProcessing.no
|
gpl-3.0
|
credativ/pulp
|
client_lib/pulp/client/extensions/loader.py
|
15
|
8665
|
# -*- coding: utf-8 -*-
"""
Functionality related to loading extensions from a set location. The client
context is constructed ahead of time and provided to this module, which
then uses it to instantiate the extension components.
"""
import copy
from gettext import gettext as _
import logging
import os
import sys
import pkg_resources
_logger = logging.getLogger(__name__)
# Names of the modules in each extension pack for initializing the pack
_MODULE_CLI = 'pulp_cli'
_MODULE_SHELL = 'pulp_shell'
PRIORITY_VAR = 'PRIORITY'
DEFAULT_PRIORITY = 5
_MODULES = 'modules'
_ENTRY_POINTS = 'entry points'
# name of the entry point
ENTRY_POINT_EXTENSIONS = 'pulp.extensions.%s'
class ExtensionLoaderException(Exception):
""" Base class for all loading-related exceptions. """
pass
class InvalidExtensionsDirectory(ExtensionLoaderException):
def __init__(self, dir):
ExtensionLoaderException.__init__(self)
self.dir = dir
def __str__(self):
return _('Inaccessible or missing extensions directory [%(d)s]' % {'d': self.dir})
class LoadFailed(ExtensionLoaderException):
"""
Raised if one or more of the extensions failed to load. All failed
extensions will be listed in the exception, however the causes are logged
rather than carried in this exception.
"""
def __init__(self, failed_packs):
ExtensionLoaderException.__init__(self)
self.failed_packs = failed_packs
def __str__(self):
return _(
'The following extension packs failed to load: [%s]' % ', '.join(self.failed_packs))
# Unit test marker exceptions
class ImportFailed(ExtensionLoaderException):
def __init__(self, pack_name):
ExtensionLoaderException.__init__(self)
self.pack_name = pack_name
class NoInitFunction(ExtensionLoaderException):
pass
class InitError(ExtensionLoaderException):
pass
class InvalidExtensionConfig(ExtensionLoaderException):
pass
def load_extensions(extensions_dir, context, role):
"""
@param extensions_dir: directory in which to find extension packs
@type extensions_dir: str
The "sorted_extensions" data structure is a dict whose keys are priorities.
Each value in the dict is a new dict in this form:
{ _MODULES : [<list of modules>], _ENTRY_POINTS : [<list of entry points>]}
This way we can load the modules and entry points for a given priority at
the same time.
@param context: pre-populated context the extensions should be given to
interact with the client
@type context: pulp.client.extensions.core.ClientContext
@param role: name of a role, either "admin" or "consumer", so we know
which extensions to load
@type role: str
"""
# Validation
if not os.access(extensions_dir, os.F_OK | os.R_OK):
raise InvalidExtensionsDirectory(extensions_dir)
# identify modules and sort them
try:
unsorted_modules = _load_pack_modules(extensions_dir)
sorted_extensions = _resolve_order(unsorted_modules)
except ImportFailed, e:
raise LoadFailed([e.pack_name]), None, sys.exc_info()[2]
# find extensions from entry points and add them to the sorted structure
for extension in pkg_resources.iter_entry_points(ENTRY_POINT_EXTENSIONS % role):
priority = getattr(extension, PRIORITY_VAR, DEFAULT_PRIORITY)
sorted_extensions.setdefault(priority, {}).setdefault(_ENTRY_POINTS, []).append(extension)
error_packs = []
for priority in sorted(sorted_extensions.keys()):
for module in sorted_extensions[priority].get(_MODULES, []):
try:
_load_pack(extensions_dir, module, context)
except ExtensionLoaderException, e:
# Do a best-effort attempt to load all extensions. If any fail,
# the cause will be logged by _load_pack. This method should
# continue to load extensions so all of the errors are logged.
error_packs.append(module.__name__)
for entry_point in sorted_extensions[priority].get(_ENTRY_POINTS, []):
entry_point.load()(context)
if len(error_packs) > 0:
raise LoadFailed(error_packs)
def _load_pack_modules(extensions_dir):
"""
Loads the modules for each pack in the extensions directory, taking care
to update the system path as appropriate.
@return: list of module instances loaded from the call
@rtype: list
@raises ImportFailed: if any of the entries in extensions_dir cannot be
loaded as a python module
"""
# Add the extensions directory to the path so each extension can be
# loaded as a python module
if extensions_dir not in sys.path:
sys.path.append(extensions_dir)
modules = []
pack_names = sorted(os.listdir(extensions_dir))
for pack in pack_names:
if pack.startswith('.'):
continue
try:
mod = __import__(pack)
modules.append(mod)
except Exception:
raise ImportFailed(pack), None, sys.exc_info()[2]
return modules
def _resolve_order(modules):
"""
Determines the order in which the given modules should be initialized. The
determination is made by inspecting the module's init script for the
presence of the priority value. If none is specified, it is defaulted.
See the constants in this module for the actual values of both of these.
This method makes no assumptions on the valid range of priorities. Lower
priorities will be loaded before higher priorities.
@param modules: list of extension module instances
@type modules: list
@return: dict where keys are priority levels, and values are dicts with key
= _MODULES and value = list of modules
@rtype: dict
"""
# Split apart the modules by priority first
modules_by_priority = {} # key: priority, value: module
for m in modules:
try:
m_priority = int(getattr(m, PRIORITY_VAR))
except AttributeError:
# Priority is optional; the default is applied here
m_priority = DEFAULT_PRIORITY
priority_level = modules_by_priority.setdefault(m_priority, {})
priority_mods = priority_level.setdefault(_MODULES, [])
priority_mods.append(m)
# Within each priority, sort each module alphabetically by name
for priority in modules_by_priority.keys():
priority_modules = modules_by_priority[priority].get(_MODULES, [])
priority_modules.sort(key=lambda x: x.__name__)
return modules_by_priority
def _load_pack(extensions_dir, pack_module, context):
# Figure out which initialization module we're loading
init_mod_name = None
if context.cli is not None:
init_mod_name = _MODULE_CLI
elif context.shell is not None:
init_mod_name = _MODULE_SHELL
# Check for the file's existence first. This will make it easier to
# differentiate the difference between a pack not supporting a particular
# UI style and a failure to load the init module.
init_mod_filename = os.path.join(extensions_dir, pack_module.__name__, init_mod_name + '.py')
if not os.path.exists(init_mod_filename):
_logger.debug(_('No plugin initialization module [%(m)s] found, skipping '
'initialization' % {'m': init_mod_filename}))
return
# Figure out the full package name for the module and import it.
try:
init_mod = __import__('%s.%s' % (pack_module.__name__, init_mod_name))
except Exception:
_logger.exception(_('Could not load initialization module [%(m)s]' % {'m': init_mod_name}))
raise ImportFailed(pack_module.__name__), None, sys.exc_info()[2]
# Get a handle on the initialize function
try:
ui_init_module = getattr(init_mod, init_mod_name)
init_func = getattr(ui_init_module, 'initialize')
except AttributeError:
_logger.exception(_('Module [%(m)s] does not define the required '
'initialize function' % {'m': init_mod_name}))
raise NoInitFunction(), None, sys.exc_info()[2]
# Invoke the module's initialization, passing a copy of the context so
# one extension doesn't accidentally muck with it and affect another.
context_copy = copy.copy(context)
context_copy.config = copy.copy(context.config)
try:
init_func(context_copy)
except Exception:
_logger.exception(_('Module [%(m)s] could not be initialized' % {'m': init_mod_name}))
raise InitError(), None, sys.exc_info()[2]
|
gpl-2.0
|
JohnDenker/brython
|
www/src/Lib/unittest/signals.py
|
1016
|
2403
|
import signal
import weakref
from functools import wraps
__unittest = True
class _InterruptHandler(object):
def __init__(self, default_handler):
self.called = False
self.original_handler = default_handler
if isinstance(default_handler, int):
if default_handler == signal.SIG_DFL:
# Pretend it's signal.default_int_handler instead.
default_handler = signal.default_int_handler
elif default_handler == signal.SIG_IGN:
# Not quite the same thing as SIG_IGN, but the closest we
# can make it: do nothing.
def default_handler(unused_signum, unused_frame):
pass
else:
raise TypeError("expected SIGINT signal handler to be "
"signal.SIG_IGN, signal.SIG_DFL, or a "
"callable object")
self.default_handler = default_handler
def __call__(self, signum, frame):
installed_handler = signal.getsignal(signal.SIGINT)
if installed_handler is not self:
# if we aren't the installed handler, then delegate immediately
# to the default handler
self.default_handler(signum, frame)
if self.called:
self.default_handler(signum, frame)
self.called = True
for result in _results.keys():
result.stop()
_results = weakref.WeakKeyDictionary()
def registerResult(result):
_results[result] = 1
def removeResult(result):
return bool(_results.pop(result, None))
_interrupt_handler = None
def installHandler():
global _interrupt_handler
if _interrupt_handler is None:
default_handler = signal.getsignal(signal.SIGINT)
_interrupt_handler = _InterruptHandler(default_handler)
signal.signal(signal.SIGINT, _interrupt_handler)
def removeHandler(method=None):
if method is not None:
@wraps(method)
def inner(*args, **kwargs):
initial = signal.getsignal(signal.SIGINT)
removeHandler()
try:
return method(*args, **kwargs)
finally:
signal.signal(signal.SIGINT, initial)
return inner
global _interrupt_handler
if _interrupt_handler is not None:
signal.signal(signal.SIGINT, _interrupt_handler.original_handler)
|
bsd-3-clause
|
ealgis/ealgis
|
django/ealgis/ealauth/migrations/0009_auto_20170112_0911.py
|
2
|
1405
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-12 09:11
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ealauth', '0008_auto_20161217_1337'),
]
operations = [
migrations.AlterUniqueTogether(
name='columninfo',
unique_together=set([]),
),
migrations.RemoveField(
model_name='columninfo',
name='tableinfo_id',
),
migrations.RemoveField(
model_name='geometrylinkage',
name='attr_table_info_id',
),
migrations.RemoveField(
model_name='geometrylinkage',
name='geo_source_id',
),
migrations.RemoveField(
model_name='geometrysource',
name='tableinfo_id',
),
migrations.RemoveField(
model_name='geometrysourceprojected',
name='geometry_source_id',
),
migrations.DeleteModel(
name='ColumnInfo',
),
migrations.DeleteModel(
name='GeometryLinkage',
),
migrations.DeleteModel(
name='GeometrySource',
),
migrations.DeleteModel(
name='GeometrySourceProjected',
),
migrations.DeleteModel(
name='TableInfo',
),
]
|
gpl-3.0
|
dreamhost/ceilometer
|
ceilometer/objectstore/swift_middleware.py
|
1
|
5594
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright © 2012 eNovance <licensing@enovance.com>
#
# Author: Julien Danjou <julien@danjou.info>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Ceilometer Middleware for Swift Proxy
Configuration:
In /etc/swift/proxy-server.conf on the main pipeline add "ceilometer" just
before "proxy-server" and add the following filter in the file:
[filter:ceilometer]
use = egg:ceilometer#swift
# Some optional configuration
# this allow to publish additional metadata
metadata_headers = X-TEST
"""
from __future__ import absolute_import
from oslo.config import cfg
from stevedore import dispatch
from swift.common.utils import split_path
import webob
REQUEST = webob
try:
# Swift >= 1.7.5
import swift.common.swob
REQUEST = swift.common.swob
except ImportError:
pass
try:
# Swift > 1.7.5 ... module exists but doesn't contain class.
from swift.common.utils import InputProxy
except ImportError:
# Swift <= 1.7.5 ... module exists and has class.
from swift.common.middleware.proxy_logging import InputProxy
from ceilometer import counter
from ceilometer.openstack.common import context
from ceilometer.openstack.common import timeutils
from ceilometer import pipeline
from ceilometer import service
class CeilometerMiddleware(object):
"""
Ceilometer middleware used for counting requests.
"""
def __init__(self, app, conf):
self.app = app
self.metadata_headers = [h.strip().replace('-', '_').lower()
for h in conf.get(
"metadata_headers",
"").split(",") if h.strip()]
service.prepare_service()
publisher_manager = dispatch.NameDispatchExtensionManager(
namespace=pipeline.PUBLISHER_NAMESPACE,
check_func=lambda x: True,
invoke_on_load=True,
)
self.pipeline_manager = pipeline.setup_pipeline(publisher_manager)
def __call__(self, env, start_response):
start_response_args = [None]
input_proxy = InputProxy(env['wsgi.input'])
env['wsgi.input'] = input_proxy
def my_start_response(status, headers, exc_info=None):
start_response_args[0] = (status, list(headers), exc_info)
def iter_response(iterable):
if start_response_args[0]:
start_response(*start_response_args[0])
bytes_sent = 0
try:
for chunk in iterable:
if chunk:
bytes_sent += len(chunk)
yield chunk
finally:
self.publish_counter(env,
input_proxy.bytes_received,
bytes_sent)
try:
iterable = self.app(env, my_start_response)
except Exception:
self.publish_counter(env, input_proxy.bytes_received, 0)
raise
else:
return iter_response(iterable)
def publish_counter(self, env, bytes_received, bytes_sent):
req = REQUEST.Request(env)
version, account, container, obj = split_path(req.path, 1, 4, True)
now = timeutils.utcnow().isoformat()
resource_metadata = {
"path": req.path,
"version": version,
"container": container,
"object": obj,
}
for header in self.metadata_headers:
if header.upper() in req.headers:
resource_metadata['http_header_%s' % header] = req.headers.get(
header.upper())
with pipeline.PublishContext(
context.get_admin_context(),
cfg.CONF.counter_source,
self.pipeline_manager.pipelines,
) as publisher:
if bytes_received:
publisher([counter.Counter(
name='storage.objects.incoming.bytes',
type='delta',
unit='B',
volume=bytes_received,
user_id=env.get('HTTP_X_USER_ID'),
project_id=env.get('HTTP_X_TENANT_ID'),
resource_id=account.partition('AUTH_')[2],
timestamp=now,
resource_metadata=resource_metadata)])
if bytes_sent:
publisher([counter.Counter(
name='storage.objects.outgoing.bytes',
type='delta',
unit='B',
volume=bytes_sent,
user_id=env.get('HTTP_X_USER_ID'),
project_id=env.get('HTTP_X_TENANT_ID'),
resource_id=account.partition('AUTH_')[2],
timestamp=now,
resource_metadata=resource_metadata)])
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def ceilometer_filter(app):
return CeilometerMiddleware(app, conf)
return ceilometer_filter
|
apache-2.0
|
williamfeng323/py-web
|
flask/lib/python3.6/site-packages/alembic/ddl/mysql.py
|
13
|
12426
|
from sqlalchemy.ext.compiler import compiles
from sqlalchemy import types as sqltypes
from sqlalchemy import schema
from ..util.compat import string_types
from .. import util
from .impl import DefaultImpl
from .base import ColumnNullable, ColumnName, ColumnDefault, \
ColumnType, AlterColumn, format_column_name, \
format_server_default
from .base import alter_table
from ..autogenerate import compare
from ..util.sqla_compat import _is_type_bound, sqla_100
class MySQLImpl(DefaultImpl):
__dialect__ = 'mysql'
transactional_ddl = False
def alter_column(self, table_name, column_name,
nullable=None,
server_default=False,
name=None,
type_=None,
schema=None,
existing_type=None,
existing_server_default=None,
existing_nullable=None,
autoincrement=None,
existing_autoincrement=None,
**kw
):
if name is not None:
self._exec(
MySQLChangeColumn(
table_name, column_name,
schema=schema,
newname=name,
nullable=nullable if nullable is not None else
existing_nullable
if existing_nullable is not None
else True,
type_=type_ if type_ is not None else existing_type,
default=server_default if server_default is not False
else existing_server_default,
autoincrement=autoincrement if autoincrement is not None
else existing_autoincrement
)
)
elif nullable is not None or \
type_ is not None or \
autoincrement is not None:
self._exec(
MySQLModifyColumn(
table_name, column_name,
schema=schema,
newname=name if name is not None else column_name,
nullable=nullable if nullable is not None else
existing_nullable
if existing_nullable is not None
else True,
type_=type_ if type_ is not None else existing_type,
default=server_default if server_default is not False
else existing_server_default,
autoincrement=autoincrement if autoincrement is not None
else existing_autoincrement
)
)
elif server_default is not False:
self._exec(
MySQLAlterDefault(
table_name, column_name, server_default,
schema=schema,
)
)
def drop_constraint(self, const):
if isinstance(const, schema.CheckConstraint) and _is_type_bound(const):
return
super(MySQLImpl, self).drop_constraint(const)
def compare_server_default(self, inspector_column,
metadata_column,
rendered_metadata_default,
rendered_inspector_default):
# partially a workaround for SQLAlchemy issue #3023; if the
# column were created without "NOT NULL", MySQL may have added
# an implicit default of '0' which we need to skip
if metadata_column.type._type_affinity is sqltypes.Integer and \
inspector_column.primary_key and \
not inspector_column.autoincrement and \
not rendered_metadata_default and \
rendered_inspector_default == "'0'":
return False
else:
return rendered_inspector_default != rendered_metadata_default
def correct_for_autogen_constraints(self, conn_unique_constraints,
conn_indexes,
metadata_unique_constraints,
metadata_indexes):
# TODO: if SQLA 1.0, make use of "duplicates_index"
# metadata
removed = set()
for idx in list(conn_indexes):
if idx.unique:
continue
# MySQL puts implicit indexes on FK columns, even if
# composite and even if MyISAM, so can't check this too easily.
# the name of the index may be the column name or it may
# be the name of the FK constraint.
for col in idx.columns:
if idx.name == col.name:
conn_indexes.remove(idx)
removed.add(idx.name)
break
for fk in col.foreign_keys:
if fk.name == idx.name:
conn_indexes.remove(idx)
removed.add(idx.name)
break
if idx.name in removed:
break
# then remove indexes from the "metadata_indexes"
# that we've removed from reflected, otherwise they come out
# as adds (see #202)
for idx in list(metadata_indexes):
if idx.name in removed:
metadata_indexes.remove(idx)
if not sqla_100:
self._legacy_correct_for_dupe_uq_uix(
conn_unique_constraints,
conn_indexes,
metadata_unique_constraints,
metadata_indexes
)
def _legacy_correct_for_dupe_uq_uix(self, conn_unique_constraints,
conn_indexes,
metadata_unique_constraints,
metadata_indexes):
# then dedupe unique indexes vs. constraints, since MySQL
# doesn't really have unique constraints as a separate construct.
# but look in the metadata and try to maintain constructs
# that already seem to be defined one way or the other
# on that side. See #276
metadata_uq_names = set([
cons.name for cons in metadata_unique_constraints
if cons.name is not None])
unnamed_metadata_uqs = set([
compare._uq_constraint_sig(cons).sig
for cons in metadata_unique_constraints
if cons.name is None
])
metadata_ix_names = set([
cons.name for cons in metadata_indexes if cons.unique])
conn_uq_names = dict(
(cons.name, cons) for cons in conn_unique_constraints
)
conn_ix_names = dict(
(cons.name, cons) for cons in conn_indexes if cons.unique
)
for overlap in set(conn_uq_names).intersection(conn_ix_names):
if overlap not in metadata_uq_names:
if compare._uq_constraint_sig(conn_uq_names[overlap]).sig \
not in unnamed_metadata_uqs:
conn_unique_constraints.discard(conn_uq_names[overlap])
elif overlap not in metadata_ix_names:
conn_indexes.discard(conn_ix_names[overlap])
def correct_for_autogen_foreignkeys(self, conn_fks, metadata_fks):
conn_fk_by_sig = dict(
(compare._fk_constraint_sig(fk).sig, fk) for fk in conn_fks
)
metadata_fk_by_sig = dict(
(compare._fk_constraint_sig(fk).sig, fk) for fk in metadata_fks
)
for sig in set(conn_fk_by_sig).intersection(metadata_fk_by_sig):
mdfk = metadata_fk_by_sig[sig]
cnfk = conn_fk_by_sig[sig]
# MySQL considers RESTRICT to be the default and doesn't
# report on it. if the model has explicit RESTRICT and
# the conn FK has None, set it to RESTRICT
if mdfk.ondelete is not None and \
mdfk.ondelete.lower() == 'restrict' and \
cnfk.ondelete is None:
cnfk.ondelete = 'RESTRICT'
if mdfk.onupdate is not None and \
mdfk.onupdate.lower() == 'restrict' and \
cnfk.onupdate is None:
cnfk.onupdate = 'RESTRICT'
class MySQLAlterDefault(AlterColumn):
def __init__(self, name, column_name, default, schema=None):
super(AlterColumn, self).__init__(name, schema=schema)
self.column_name = column_name
self.default = default
class MySQLChangeColumn(AlterColumn):
def __init__(self, name, column_name, schema=None,
newname=None,
type_=None,
nullable=None,
default=False,
autoincrement=None):
super(AlterColumn, self).__init__(name, schema=schema)
self.column_name = column_name
self.nullable = nullable
self.newname = newname
self.default = default
self.autoincrement = autoincrement
if type_ is None:
raise util.CommandError(
"All MySQL CHANGE/MODIFY COLUMN operations "
"require the existing type."
)
self.type_ = sqltypes.to_instance(type_)
class MySQLModifyColumn(MySQLChangeColumn):
pass
@compiles(ColumnNullable, 'mysql')
@compiles(ColumnName, 'mysql')
@compiles(ColumnDefault, 'mysql')
@compiles(ColumnType, 'mysql')
def _mysql_doesnt_support_individual(element, compiler, **kw):
raise NotImplementedError(
"Individual alter column constructs not supported by MySQL"
)
@compiles(MySQLAlterDefault, "mysql")
def _mysql_alter_default(element, compiler, **kw):
return "%s ALTER COLUMN %s %s" % (
alter_table(compiler, element.table_name, element.schema),
format_column_name(compiler, element.column_name),
"SET DEFAULT %s" % format_server_default(compiler, element.default)
if element.default is not None
else "DROP DEFAULT"
)
@compiles(MySQLModifyColumn, "mysql")
def _mysql_modify_column(element, compiler, **kw):
return "%s MODIFY %s %s" % (
alter_table(compiler, element.table_name, element.schema),
format_column_name(compiler, element.column_name),
_mysql_colspec(
compiler,
nullable=element.nullable,
server_default=element.default,
type_=element.type_,
autoincrement=element.autoincrement
),
)
@compiles(MySQLChangeColumn, "mysql")
def _mysql_change_column(element, compiler, **kw):
return "%s CHANGE %s %s %s" % (
alter_table(compiler, element.table_name, element.schema),
format_column_name(compiler, element.column_name),
format_column_name(compiler, element.newname),
_mysql_colspec(
compiler,
nullable=element.nullable,
server_default=element.default,
type_=element.type_,
autoincrement=element.autoincrement
),
)
def _render_value(compiler, expr):
if isinstance(expr, string_types):
return "'%s'" % expr
else:
return compiler.sql_compiler.process(expr)
def _mysql_colspec(compiler, nullable, server_default, type_,
autoincrement):
spec = "%s %s" % (
compiler.dialect.type_compiler.process(type_),
"NULL" if nullable else "NOT NULL"
)
if autoincrement:
spec += " AUTO_INCREMENT"
if server_default is not False and server_default is not None:
spec += " DEFAULT %s" % _render_value(compiler, server_default)
return spec
@compiles(schema.DropConstraint, "mysql")
def _mysql_drop_constraint(element, compiler, **kw):
"""Redefine SQLAlchemy's drop constraint to
raise errors for invalid constraint type."""
constraint = element.element
if isinstance(constraint, (schema.ForeignKeyConstraint,
schema.PrimaryKeyConstraint,
schema.UniqueConstraint)
):
return compiler.visit_drop_constraint(element, **kw)
elif isinstance(constraint, schema.CheckConstraint):
raise NotImplementedError(
"MySQL does not support CHECK constraints.")
else:
raise NotImplementedError(
"No generic 'DROP CONSTRAINT' in MySQL - "
"please specify constraint type")
|
mit
|
cnsoft/kbengine-cocos2dx
|
kbe/src/lib/python/Lib/test/test_cmd_line_script.py
|
6
|
14270
|
# tests command line execution of scripts
import unittest
import sys
import os
import os.path
import py_compile
from test import support
from test.script_helper import (
make_pkg, make_script, make_zip_pkg, make_zip_script,
assert_python_ok, assert_python_failure, temp_dir)
verbose = support.verbose
example_args = ['test1', 'test2', 'test3']
test_source = """\
# Script may be run with optimisation enabled, so don't rely on assert
# statements being executed
def assertEqual(lhs, rhs):
if lhs != rhs:
raise AssertionError('%r != %r' % (lhs, rhs))
def assertIdentical(lhs, rhs):
if lhs is not rhs:
raise AssertionError('%r is not %r' % (lhs, rhs))
# Check basic code execution
result = ['Top level assignment']
def f():
result.append('Lower level reference')
f()
assertEqual(result, ['Top level assignment', 'Lower level reference'])
# Check population of magic variables
assertEqual(__name__, '__main__')
print('__file__==%a' % __file__)
assertEqual(__cached__, None)
print('__package__==%r' % __package__)
# Check the sys module
import sys
assertIdentical(globals(), sys.modules[__name__].__dict__)
from test import test_cmd_line_script
example_args_list = test_cmd_line_script.example_args
assertEqual(sys.argv[1:], example_args_list)
print('sys.argv[0]==%a' % sys.argv[0])
print('sys.path[0]==%a' % sys.path[0])
# Check the working directory
import os
print('cwd==%a' % os.getcwd())
"""
def _make_test_script(script_dir, script_basename, source=test_source):
return make_script(script_dir, script_basename, source)
def _make_test_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename,
source=test_source, depth=1):
return make_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename,
source, depth)
# There's no easy way to pass the script directory in to get
# -m to work (avoiding that is the whole point of making
# directories and zipfiles executable!)
# So we fake it for testing purposes with a custom launch script
launch_source = """\
import sys, os.path, runpy
sys.path.insert(0, %s)
runpy._run_module_as_main(%r)
"""
def _make_launch_script(script_dir, script_basename, module_name, path=None):
if path is None:
path = "os.path.dirname(__file__)"
else:
path = repr(path)
source = launch_source % (path, module_name)
return make_script(script_dir, script_basename, source)
class CmdLineTest(unittest.TestCase):
def _check_output(self, script_name, exit_code, data,
expected_file, expected_argv0,
expected_path0, expected_package):
if verbose > 1:
print("Output from test script %r:" % script_name)
print(data)
self.assertEqual(exit_code, 0)
printed_file = '__file__==%a' % expected_file
printed_package = '__package__==%r' % expected_package
printed_argv0 = 'sys.argv[0]==%a' % expected_argv0
printed_path0 = 'sys.path[0]==%a' % expected_path0
printed_cwd = 'cwd==%a' % os.getcwd()
if verbose > 1:
print('Expected output:')
print(printed_file)
print(printed_package)
print(printed_argv0)
print(printed_cwd)
self.assertIn(printed_file.encode('utf-8'), data)
self.assertIn(printed_package.encode('utf-8'), data)
self.assertIn(printed_argv0.encode('utf-8'), data)
self.assertIn(printed_path0.encode('utf-8'), data)
self.assertIn(printed_cwd.encode('utf-8'), data)
def _check_script(self, script_name, expected_file,
expected_argv0, expected_path0,
expected_package,
*cmd_line_switches):
if not __debug__:
cmd_line_switches += ('-' + 'O' * sys.flags.optimize,)
run_args = cmd_line_switches + (script_name,) + tuple(example_args)
rc, out, err = assert_python_ok(*run_args)
self._check_output(script_name, rc, out + err, expected_file,
expected_argv0, expected_path0, expected_package)
def _check_import_error(self, script_name, expected_msg,
*cmd_line_switches):
run_args = cmd_line_switches + (script_name,)
rc, out, err = assert_python_failure(*run_args)
if verbose > 1:
print('Output from test script %r:' % script_name)
print(err)
print('Expected output: %r' % expected_msg)
self.assertIn(expected_msg.encode('utf-8'), err)
def test_basic_script(self):
with temp_dir() as script_dir:
script_name = _make_test_script(script_dir, 'script')
self._check_script(script_name, script_name, script_name,
script_dir, None)
def test_script_compiled(self):
with temp_dir() as script_dir:
script_name = _make_test_script(script_dir, 'script')
py_compile.compile(script_name, doraise=True)
os.remove(script_name)
pyc_file = support.make_legacy_pyc(script_name)
self._check_script(pyc_file, pyc_file,
pyc_file, script_dir, None)
def test_directory(self):
with temp_dir() as script_dir:
script_name = _make_test_script(script_dir, '__main__')
self._check_script(script_dir, script_name, script_dir,
script_dir, '')
def test_directory_compiled(self):
with temp_dir() as script_dir:
script_name = _make_test_script(script_dir, '__main__')
py_compile.compile(script_name, doraise=True)
os.remove(script_name)
pyc_file = support.make_legacy_pyc(script_name)
self._check_script(script_dir, pyc_file, script_dir,
script_dir, '')
def test_directory_error(self):
with temp_dir() as script_dir:
msg = "can't find '__main__' module in %r" % script_dir
self._check_import_error(script_dir, msg)
def test_zipfile(self):
with temp_dir() as script_dir:
script_name = _make_test_script(script_dir, '__main__')
zip_name, run_name = make_zip_script(script_dir, 'test_zip', script_name)
self._check_script(zip_name, run_name, zip_name, zip_name, '')
def test_zipfile_compiled(self):
with temp_dir() as script_dir:
script_name = _make_test_script(script_dir, '__main__')
compiled_name = py_compile.compile(script_name, doraise=True)
zip_name, run_name = make_zip_script(script_dir, 'test_zip', compiled_name)
self._check_script(zip_name, run_name, zip_name, zip_name, '')
def test_zipfile_error(self):
with temp_dir() as script_dir:
script_name = _make_test_script(script_dir, 'not_main')
zip_name, run_name = make_zip_script(script_dir, 'test_zip', script_name)
msg = "can't find '__main__' module in %r" % zip_name
self._check_import_error(zip_name, msg)
def test_module_in_package(self):
with temp_dir() as script_dir:
pkg_dir = os.path.join(script_dir, 'test_pkg')
make_pkg(pkg_dir)
script_name = _make_test_script(pkg_dir, 'script')
launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.script')
self._check_script(launch_name, script_name, script_name, script_dir, 'test_pkg')
def test_module_in_package_in_zipfile(self):
with temp_dir() as script_dir:
zip_name, run_name = _make_test_zip_pkg(script_dir, 'test_zip', 'test_pkg', 'script')
launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.script', zip_name)
self._check_script(launch_name, run_name, run_name, zip_name, 'test_pkg')
def test_module_in_subpackage_in_zipfile(self):
with temp_dir() as script_dir:
zip_name, run_name = _make_test_zip_pkg(script_dir, 'test_zip', 'test_pkg', 'script', depth=2)
launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg.test_pkg.script', zip_name)
self._check_script(launch_name, run_name, run_name, zip_name, 'test_pkg.test_pkg')
def test_package(self):
with temp_dir() as script_dir:
pkg_dir = os.path.join(script_dir, 'test_pkg')
make_pkg(pkg_dir)
script_name = _make_test_script(pkg_dir, '__main__')
launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg')
self._check_script(launch_name, script_name,
script_name, script_dir, 'test_pkg')
def test_package_compiled(self):
with temp_dir() as script_dir:
pkg_dir = os.path.join(script_dir, 'test_pkg')
make_pkg(pkg_dir)
script_name = _make_test_script(pkg_dir, '__main__')
compiled_name = py_compile.compile(script_name, doraise=True)
os.remove(script_name)
pyc_file = support.make_legacy_pyc(script_name)
launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg')
self._check_script(launch_name, pyc_file,
pyc_file, script_dir, 'test_pkg')
def test_package_error(self):
with temp_dir() as script_dir:
pkg_dir = os.path.join(script_dir, 'test_pkg')
make_pkg(pkg_dir)
msg = ("'test_pkg' is a package and cannot "
"be directly executed")
launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg')
self._check_import_error(launch_name, msg)
def test_package_recursion(self):
with temp_dir() as script_dir:
pkg_dir = os.path.join(script_dir, 'test_pkg')
make_pkg(pkg_dir)
main_dir = os.path.join(pkg_dir, '__main__')
make_pkg(main_dir)
msg = ("Cannot use package as __main__ module; "
"'test_pkg' is a package and cannot "
"be directly executed")
launch_name = _make_launch_script(script_dir, 'launch', 'test_pkg')
self._check_import_error(launch_name, msg)
def test_issue8202(self):
# Make sure package __init__ modules see "-m" in sys.argv0 while
# searching for the module to execute
with temp_dir() as script_dir:
with support.temp_cwd(path=script_dir):
pkg_dir = os.path.join(script_dir, 'test_pkg')
make_pkg(pkg_dir, "import sys; print('init_argv0==%r' % sys.argv[0])")
script_name = _make_test_script(pkg_dir, 'script')
rc, out, err = assert_python_ok('-m', 'test_pkg.script', *example_args)
if verbose > 1:
print(out)
expected = "init_argv0==%r" % '-m'
self.assertIn(expected.encode('utf-8'), out)
self._check_output(script_name, rc, out,
script_name, script_name, '', 'test_pkg')
def test_issue8202_dash_c_file_ignored(self):
# Make sure a "-c" file in the current directory
# does not alter the value of sys.path[0]
with temp_dir() as script_dir:
with support.temp_cwd(path=script_dir):
with open("-c", "w") as f:
f.write("data")
rc, out, err = assert_python_ok('-c',
'import sys; print("sys.path[0]==%r" % sys.path[0])')
if verbose > 1:
print(out)
expected = "sys.path[0]==%r" % ''
self.assertIn(expected.encode('utf-8'), out)
def test_issue8202_dash_m_file_ignored(self):
# Make sure a "-m" file in the current directory
# does not alter the value of sys.path[0]
with temp_dir() as script_dir:
script_name = _make_test_script(script_dir, 'other')
with support.temp_cwd(path=script_dir):
with open("-m", "w") as f:
f.write("data")
rc, out, err = assert_python_ok('-m', 'other', *example_args)
self._check_output(script_name, rc, out,
script_name, script_name, '', '')
def test_dash_m_error_code_is_one(self):
# If a module is invoked with the -m command line flag
# and results in an error that the return code to the
# shell is '1'
with temp_dir() as script_dir:
with support.temp_cwd(path=script_dir):
pkg_dir = os.path.join(script_dir, 'test_pkg')
make_pkg(pkg_dir)
script_name = _make_test_script(pkg_dir, 'other',
"if __name__ == '__main__': raise ValueError")
rc, out, err = assert_python_failure('-m', 'test_pkg.other', *example_args)
if verbose > 1:
print(out)
self.assertEqual(rc, 1)
def test_non_ascii(self):
# Mac OS X denies the creation of a file with an invalid UTF-8 name.
# Windows allows to create a name with an arbitrary bytes name, but
# Python cannot a undecodable bytes argument to a subprocess.
if (support.TESTFN_UNDECODABLE
and sys.platform not in ('win32', 'darwin')):
name = os.fsdecode(support.TESTFN_UNDECODABLE)
elif support.TESTFN_NONASCII:
name = support.TESTFN_NONASCII
else:
self.skipTest("need support.TESTFN_NONASCII")
# Issue #16218
source = 'print(ascii(__file__))\n'
script_name = _make_test_script(os.curdir, name, source)
self.addCleanup(support.unlink, script_name)
rc, stdout, stderr = assert_python_ok(script_name)
self.assertEqual(
ascii(script_name),
stdout.rstrip().decode('ascii'),
'stdout=%r stderr=%r' % (stdout, stderr))
self.assertEqual(0, rc)
def test_main():
support.run_unittest(CmdLineTest)
support.reap_children()
if __name__ == '__main__':
test_main()
|
lgpl-3.0
|
x111ong/django
|
django/core/handlers/wsgi.py
|
339
|
9181
|
from __future__ import unicode_literals
import cgi
import codecs
import logging
import sys
from io import BytesIO
from threading import Lock
from django import http
from django.conf import settings
from django.core import signals
from django.core.handlers import base
from django.core.urlresolvers import set_script_prefix
from django.utils import six
from django.utils.encoding import force_str, force_text
from django.utils.functional import cached_property
logger = logging.getLogger('django.request')
# encode() and decode() expect the charset to be a native string.
ISO_8859_1, UTF_8 = str('iso-8859-1'), str('utf-8')
class LimitedStream(object):
'''
LimitedStream wraps another stream in order to not allow reading from it
past specified amount of bytes.
'''
def __init__(self, stream, limit, buf_size=64 * 1024 * 1024):
self.stream = stream
self.remaining = limit
self.buffer = b''
self.buf_size = buf_size
def _read_limited(self, size=None):
if size is None or size > self.remaining:
size = self.remaining
if size == 0:
return b''
result = self.stream.read(size)
self.remaining -= len(result)
return result
def read(self, size=None):
if size is None:
result = self.buffer + self._read_limited()
self.buffer = b''
elif size < len(self.buffer):
result = self.buffer[:size]
self.buffer = self.buffer[size:]
else: # size >= len(self.buffer)
result = self.buffer + self._read_limited(size - len(self.buffer))
self.buffer = b''
return result
def readline(self, size=None):
while b'\n' not in self.buffer and \
(size is None or len(self.buffer) < size):
if size:
# since size is not None here, len(self.buffer) < size
chunk = self._read_limited(size - len(self.buffer))
else:
chunk = self._read_limited()
if not chunk:
break
self.buffer += chunk
sio = BytesIO(self.buffer)
if size:
line = sio.readline(size)
else:
line = sio.readline()
self.buffer = sio.read()
return line
class WSGIRequest(http.HttpRequest):
def __init__(self, environ):
script_name = get_script_name(environ)
path_info = get_path_info(environ)
if not path_info:
# Sometimes PATH_INFO exists, but is empty (e.g. accessing
# the SCRIPT_NAME URL without a trailing slash). We really need to
# operate as if they'd requested '/'. Not amazingly nice to force
# the path like this, but should be harmless.
path_info = '/'
self.environ = environ
self.path_info = path_info
# be careful to only replace the first slash in the path because of
# http://test/something and http://test//something being different as
# stated in http://www.ietf.org/rfc/rfc2396.txt
self.path = '%s/%s' % (script_name.rstrip('/'),
path_info.replace('/', '', 1))
self.META = environ
self.META['PATH_INFO'] = path_info
self.META['SCRIPT_NAME'] = script_name
self.method = environ['REQUEST_METHOD'].upper()
_, content_params = cgi.parse_header(environ.get('CONTENT_TYPE', ''))
if 'charset' in content_params:
try:
codecs.lookup(content_params['charset'])
except LookupError:
pass
else:
self.encoding = content_params['charset']
self._post_parse_error = False
try:
content_length = int(environ.get('CONTENT_LENGTH'))
except (ValueError, TypeError):
content_length = 0
self._stream = LimitedStream(self.environ['wsgi.input'], content_length)
self._read_started = False
self.resolver_match = None
def _get_scheme(self):
return self.environ.get('wsgi.url_scheme')
@cached_property
def GET(self):
# The WSGI spec says 'QUERY_STRING' may be absent.
raw_query_string = get_bytes_from_wsgi(self.environ, 'QUERY_STRING', '')
return http.QueryDict(raw_query_string, encoding=self._encoding)
def _get_post(self):
if not hasattr(self, '_post'):
self._load_post_and_files()
return self._post
def _set_post(self, post):
self._post = post
@cached_property
def COOKIES(self):
raw_cookie = get_str_from_wsgi(self.environ, 'HTTP_COOKIE', '')
return http.parse_cookie(raw_cookie)
def _get_files(self):
if not hasattr(self, '_files'):
self._load_post_and_files()
return self._files
POST = property(_get_post, _set_post)
FILES = property(_get_files)
class WSGIHandler(base.BaseHandler):
initLock = Lock()
request_class = WSGIRequest
def __call__(self, environ, start_response):
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._request_middleware is None:
with self.initLock:
try:
# Check that middleware is still uninitialized.
if self._request_middleware is None:
self.load_middleware()
except:
# Unload whatever middleware we got
self._request_middleware = None
raise
set_script_prefix(get_script_name(environ))
signals.request_started.send(sender=self.__class__, environ=environ)
try:
request = self.request_class(environ)
except UnicodeDecodeError:
logger.warning('Bad Request (UnicodeDecodeError)',
exc_info=sys.exc_info(),
extra={
'status_code': 400,
}
)
response = http.HttpResponseBadRequest()
else:
response = self.get_response(request)
response._handler_class = self.__class__
status = '%s %s' % (response.status_code, response.reason_phrase)
response_headers = [(str(k), str(v)) for k, v in response.items()]
for c in response.cookies.values():
response_headers.append((str('Set-Cookie'), str(c.output(header=''))))
start_response(force_str(status), response_headers)
if getattr(response, 'file_to_stream', None) is not None and environ.get('wsgi.file_wrapper'):
response = environ['wsgi.file_wrapper'](response.file_to_stream)
return response
def get_path_info(environ):
"""
Returns the HTTP request's PATH_INFO as a unicode string.
"""
path_info = get_bytes_from_wsgi(environ, 'PATH_INFO', '/')
return path_info.decode(UTF_8)
def get_script_name(environ):
"""
Returns the equivalent of the HTTP request's SCRIPT_NAME environment
variable. If Apache mod_rewrite has been used, returns what would have been
the script name prior to any rewriting (so it's the script name as seen
from the client's perspective), unless the FORCE_SCRIPT_NAME setting is
set (to anything).
"""
if settings.FORCE_SCRIPT_NAME is not None:
return force_text(settings.FORCE_SCRIPT_NAME)
# If Apache's mod_rewrite had a whack at the URL, Apache set either
# SCRIPT_URL or REDIRECT_URL to the full resource URL before applying any
# rewrites. Unfortunately not every Web server (lighttpd!) passes this
# information through all the time, so FORCE_SCRIPT_NAME, above, is still
# needed.
script_url = get_bytes_from_wsgi(environ, 'SCRIPT_URL', '')
if not script_url:
script_url = get_bytes_from_wsgi(environ, 'REDIRECT_URL', '')
if script_url:
path_info = get_bytes_from_wsgi(environ, 'PATH_INFO', '')
script_name = script_url[:-len(path_info)] if path_info else script_url
else:
script_name = get_bytes_from_wsgi(environ, 'SCRIPT_NAME', '')
return script_name.decode(UTF_8)
def get_bytes_from_wsgi(environ, key, default):
"""
Get a value from the WSGI environ dictionary as bytes.
key and default should be str objects. Under Python 2 they may also be
unicode objects provided they only contain ASCII characters.
"""
value = environ.get(str(key), str(default))
# Under Python 3, non-ASCII values in the WSGI environ are arbitrarily
# decoded with ISO-8859-1. This is wrong for Django websites where UTF-8
# is the default. Re-encode to recover the original bytestring.
return value.encode(ISO_8859_1) if six.PY3 else value
def get_str_from_wsgi(environ, key, default):
"""
Get a value from the WSGI environ dictionary as str.
key and default should be str objects. Under Python 2 they may also be
unicode objects provided they only contain ASCII characters.
"""
value = get_bytes_from_wsgi(environ, key, default)
return value.decode(UTF_8, errors='replace') if six.PY3 else value
|
bsd-3-clause
|
nordri/check_domains
|
lib/python2.7/site-packages/django/contrib/gis/db/backends/oracle/introspection.py
|
79
|
1925
|
import cx_Oracle
import sys
from django.db.backends.oracle.introspection import DatabaseIntrospection
from django.utils import six
class OracleIntrospection(DatabaseIntrospection):
# Associating any OBJECTVAR instances with GeometryField. Of course,
# this won't work right on Oracle objects that aren't MDSYS.SDO_GEOMETRY,
# but it is the only object type supported within Django anyways.
data_types_reverse = DatabaseIntrospection.data_types_reverse.copy()
data_types_reverse[cx_Oracle.OBJECT] = 'GeometryField'
def get_geometry_type(self, table_name, geo_col):
cursor = self.connection.cursor()
try:
# Querying USER_SDO_GEOM_METADATA to get the SRID and dimension information.
try:
cursor.execute('SELECT "DIMINFO", "SRID" FROM "USER_SDO_GEOM_METADATA" WHERE "TABLE_NAME"=%s AND "COLUMN_NAME"=%s',
(table_name.upper(), geo_col.upper()))
row = cursor.fetchone()
except Exception as msg:
new_msg = (
'Could not find entry in USER_SDO_GEOM_METADATA '
'corresponding to "%s"."%s"\n'
'Error message: %s.') % (table_name, geo_col, msg)
six.reraise(Exception, Exception(new_msg), sys.exc_info()[2])
# TODO: Research way to find a more specific geometry field type for
# the column's contents.
field_type = 'GeometryField'
# Getting the field parameters.
field_params = {}
dim, srid = row
if srid != 4326:
field_params['srid'] = srid
# Length of object array ( SDO_DIM_ARRAY ) is number of dimensions.
dim = len(dim)
if dim != 2:
field_params['dim'] = dim
finally:
cursor.close()
return field_type, field_params
|
gpl-3.0
|
skython/eXe
|
exe/webui/appletblock.py
|
2
|
7890
|
# ===========================================================================
# eXe
# Copyright 2004-2005, University of Auckland
# Copyright 2004-2007 eXe Project http://eXeLearning.org/
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# ===========================================================================
"""
AppletBlock can render and process AppletIdevices as XHTML
"""
import os.path
from exe.webui.block import Block
from exe.webui import common
#from string import Template
import logging
log = logging.getLogger(__name__)
# ===========================================================================
class AppletBlock(Block):
"""
AttachmentBlock can render and process AttachmentIdevices as XHTML
"""
def __init__(self, parent, idevice):
"""
Initialize
"""
Block.__init__(self, parent, idevice)
if not hasattr(self.idevice,'undo'):
self.idevice.undo = True
def process(self, request):
"""
Process the request arguments from the web server to see if any
apply to this block
"""
log.debug("process " + repr(request.args))
Block.process(self, request)
is_cancel = common.requestHasCancel(request)
if "code" + self.id in request.args \
and not is_cancel:
self.idevice.appletCode = request.args["code" + self.id][0]
if "action" in request.args and request.args["action"][0] == self.id:
self.idevice.deleteFile(request.args["object"][0])
self.idevice.edit = True
self.idevice.undo = False
if "action" in request.args and request.args["action"][0] == "changeType" + self.id:
self.idevice.type = request.args["object"][0]
self.idevice.copyFiles()
self.idevice.edit = True
self.idevice.undo = False
if "action" in request.args and request.args["action"][0] == "done":
# remove the undo flag in order to reenable it next time:
if hasattr(self.idevice,'undo'):
del self.idevice.undo
if "upload" + self.id in request.args:
if "path" + self.id in request.args:
filePath = request.args["path"+self.id][0]
if filePath:
if self.idevice.type == "geogebra" and not filePath.endswith(".ggb"):
self.idevice.message = _("Please upload a .ggb file.")
else:
self.idevice.uploadFile(filePath)
self.idevice.message = ""
self.idevice.edit = True
self.idevice.undo = False
def renderEdit(self, style):
"""
Returns an XHTML string with the form elements for editing this block
"""
log.debug("renderEdit")
html = "<div class=\"iDevice\"><br/>\n"
html += common.textInput("title"+self.id, self.idevice.title)
html += u"<br/><br/>\n"
types = [(_(u"Geogebra"), "geogebra"),
(_(u"Other"), "other")]
html += u"<b>%s</b>" % _("Applet Type")
html += '<select onchange="submitChange(\'changeType%s\', \'type%s\')";' % (self.id, self.id)
html += 'name="type%s" id="type%s">\n' % (self.id, self.id)
for type, value in types:
html += "<option value=\""+value+"\" "
if self.idevice.type == value:
html += "selected "
html += ">" + type + "</option>\n"
html += "</select> \n"
html += common.elementInstruc(self.idevice.typeInstruc) + "<br/><br/>"
if self.idevice.message <> "":
html += '<p style="color:red"><b>' + self.idevice.message + '</b></p>'
html += common.textInput("path"+self.id, "", 50)
html += u'<input type="button" onclick="addFile(\'%s\')"' % self.id
html += u'value="%s" />\n' % _(u"Add files")
html += u'<input type="submit" name="%s" value="%s"' % ("upload"+self.id,
_(u"Upload"))
html += common.elementInstruc(self.idevice.fileInstruc)
html += u'<br/>\n'
html += u'<b>%s</b>\n' % _(u'Applet Code:')
html += common.elementInstruc(self.idevice.codeInstruc)
html += u'<br/>\n'
html += common.textArea('code'+self.id,
self.idevice.appletCode,rows="12")
if self.idevice.userResources:
html += '<table>'
for resource in self.idevice.userResources:
html += '<tr><td>%s</td><td>' % resource.storageName
html += common.submitImage(self.id, resource.storageName,
"/images/stock-cancel.png",
_("Delete File"))
html += '</td></tr>\n'
html += '</table>'
html += u'<br/>\n'
html += self.renderEditButtons(undo=self.idevice.undo)
html += u'\n</div>\n'
return html
def renderPreview(self, style):
"""
Returns an XHTML string for previewing this block
"""
log.debug("renderPreview")
appletcode = self.idevice.appletCode
appletcode = appletcode.replace('>', '>')
appletcode = appletcode.replace('<', '<')
appletcode = appletcode.replace('"', '"')
appletcode = appletcode.replace(' ', '')
appletcode = appletcode.replace('<applet','<applet CODEBASE="resources"')
appletcode = appletcode.replace('<APPLET','<applet CODEBASE="resources"')
html = u"<div class=\"iDevice "
html += u"emphasis"+unicode(self.idevice.emphasis)+"\" "
html += u"ondblclick=\"submitLink('edit',"+self.id+", 0);\">\n"
html += appletcode
html += u"<br/>"
html += self.renderViewButtons()
html += u"</div>\n"
return html
def renderView(self, style):
"""
Returns an XHTML string for viewing this block
"""
log.debug("renderView")
html = u"<!-- applet iDevice -->\n"
html += u"<div class=\"iDevice "
html += u"emphasis"+unicode(self.idevice.emphasis)+"\">\n"
appletcode = self.idevice.appletCode
appletcode = appletcode.replace('>', '>')
appletcode = appletcode.replace('<', '<')
appletcode = appletcode.replace('"', '"')
appletcode = appletcode.replace(' ', '')
html += appletcode
html += u"<br/>"
html += u"</div>\n"
return html
# ===========================================================================
"""Register this block with the BlockFactory"""
from exe.engine.appletidevice import AppletIdevice
from exe.webui.blockfactory import g_blockFactory
g_blockFactory.registerBlockType(AppletBlock, AppletIdevice)
# ===========================================================================
|
gpl-2.0
|
sauloal/pycluster
|
pypy-1.9_64/lib-python/2.7/unittest/test/test_discovery.py
|
45
|
13032
|
import os
import re
import sys
import unittest
class TestDiscovery(unittest.TestCase):
# Heavily mocked tests so I can avoid hitting the filesystem
def test_get_name_from_path(self):
loader = unittest.TestLoader()
loader._top_level_dir = '/foo'
name = loader._get_name_from_path('/foo/bar/baz.py')
self.assertEqual(name, 'bar.baz')
if not __debug__:
# asserts are off
return
with self.assertRaises(AssertionError):
loader._get_name_from_path('/bar/baz.py')
def test_find_tests(self):
loader = unittest.TestLoader()
original_listdir = os.listdir
def restore_listdir():
os.listdir = original_listdir
original_isfile = os.path.isfile
def restore_isfile():
os.path.isfile = original_isfile
original_isdir = os.path.isdir
def restore_isdir():
os.path.isdir = original_isdir
path_lists = [['test1.py', 'test2.py', 'not_a_test.py', 'test_dir',
'test.foo', 'test-not-a-module.py', 'another_dir'],
['test3.py', 'test4.py', ]]
os.listdir = lambda path: path_lists.pop(0)
self.addCleanup(restore_listdir)
def isdir(path):
return path.endswith('dir')
os.path.isdir = isdir
self.addCleanup(restore_isdir)
def isfile(path):
# another_dir is not a package and so shouldn't be recursed into
return not path.endswith('dir') and not 'another_dir' in path
os.path.isfile = isfile
self.addCleanup(restore_isfile)
loader._get_module_from_name = lambda path: path + ' module'
loader.loadTestsFromModule = lambda module: module + ' tests'
top_level = os.path.abspath('/foo')
loader._top_level_dir = top_level
suite = list(loader._find_tests(top_level, 'test*.py'))
expected = [name + ' module tests' for name in
('test1', 'test2')]
expected.extend([('test_dir.%s' % name) + ' module tests' for name in
('test3', 'test4')])
self.assertEqual(suite, expected)
def test_find_tests_with_package(self):
loader = unittest.TestLoader()
original_listdir = os.listdir
def restore_listdir():
os.listdir = original_listdir
original_isfile = os.path.isfile
def restore_isfile():
os.path.isfile = original_isfile
original_isdir = os.path.isdir
def restore_isdir():
os.path.isdir = original_isdir
directories = ['a_directory', 'test_directory', 'test_directory2']
path_lists = [directories, [], [], []]
os.listdir = lambda path: path_lists.pop(0)
self.addCleanup(restore_listdir)
os.path.isdir = lambda path: True
self.addCleanup(restore_isdir)
os.path.isfile = lambda path: os.path.basename(path) not in directories
self.addCleanup(restore_isfile)
class Module(object):
paths = []
load_tests_args = []
def __init__(self, path):
self.path = path
self.paths.append(path)
if os.path.basename(path) == 'test_directory':
def load_tests(loader, tests, pattern):
self.load_tests_args.append((loader, tests, pattern))
return 'load_tests'
self.load_tests = load_tests
def __eq__(self, other):
return self.path == other.path
# Silence py3k warning
__hash__ = None
loader._get_module_from_name = lambda name: Module(name)
def loadTestsFromModule(module, use_load_tests):
if use_load_tests:
raise self.failureException('use_load_tests should be False for packages')
return module.path + ' module tests'
loader.loadTestsFromModule = loadTestsFromModule
loader._top_level_dir = '/foo'
# this time no '.py' on the pattern so that it can match
# a test package
suite = list(loader._find_tests('/foo', 'test*'))
# We should have loaded tests from the test_directory package by calling load_tests
# and directly from the test_directory2 package
self.assertEqual(suite,
['load_tests', 'test_directory2' + ' module tests'])
self.assertEqual(Module.paths, ['test_directory', 'test_directory2'])
# load_tests should have been called once with loader, tests and pattern
self.assertEqual(Module.load_tests_args,
[(loader, 'test_directory' + ' module tests', 'test*')])
def test_discover(self):
loader = unittest.TestLoader()
original_isfile = os.path.isfile
original_isdir = os.path.isdir
def restore_isfile():
os.path.isfile = original_isfile
os.path.isfile = lambda path: False
self.addCleanup(restore_isfile)
orig_sys_path = sys.path[:]
def restore_path():
sys.path[:] = orig_sys_path
self.addCleanup(restore_path)
full_path = os.path.abspath(os.path.normpath('/foo'))
with self.assertRaises(ImportError):
loader.discover('/foo/bar', top_level_dir='/foo')
self.assertEqual(loader._top_level_dir, full_path)
self.assertIn(full_path, sys.path)
os.path.isfile = lambda path: True
os.path.isdir = lambda path: True
def restore_isdir():
os.path.isdir = original_isdir
self.addCleanup(restore_isdir)
_find_tests_args = []
def _find_tests(start_dir, pattern):
_find_tests_args.append((start_dir, pattern))
return ['tests']
loader._find_tests = _find_tests
loader.suiteClass = str
suite = loader.discover('/foo/bar/baz', 'pattern', '/foo/bar')
top_level_dir = os.path.abspath('/foo/bar')
start_dir = os.path.abspath('/foo/bar/baz')
self.assertEqual(suite, "['tests']")
self.assertEqual(loader._top_level_dir, top_level_dir)
self.assertEqual(_find_tests_args, [(start_dir, 'pattern')])
self.assertIn(top_level_dir, sys.path)
def test_discover_with_modules_that_fail_to_import(self):
loader = unittest.TestLoader()
listdir = os.listdir
os.listdir = lambda _: ['test_this_does_not_exist.py']
isfile = os.path.isfile
os.path.isfile = lambda _: True
orig_sys_path = sys.path[:]
def restore():
os.path.isfile = isfile
os.listdir = listdir
sys.path[:] = orig_sys_path
self.addCleanup(restore)
suite = loader.discover('.')
self.assertIn(os.getcwd(), sys.path)
self.assertEqual(suite.countTestCases(), 1)
test = list(list(suite)[0])[0] # extract test from suite
with self.assertRaises(ImportError):
test.test_this_does_not_exist()
def test_command_line_handling_parseArgs(self):
# Haha - take that uninstantiable class
program = object.__new__(unittest.TestProgram)
args = []
def do_discovery(argv):
args.extend(argv)
program._do_discovery = do_discovery
program.parseArgs(['something', 'discover'])
self.assertEqual(args, [])
program.parseArgs(['something', 'discover', 'foo', 'bar'])
self.assertEqual(args, ['foo', 'bar'])
def test_command_line_handling_do_discovery_too_many_arguments(self):
class Stop(Exception):
pass
def usageExit():
raise Stop
program = object.__new__(unittest.TestProgram)
program.usageExit = usageExit
with self.assertRaises(Stop):
# too many args
program._do_discovery(['one', 'two', 'three', 'four'])
def test_command_line_handling_do_discovery_calls_loader(self):
program = object.__new__(unittest.TestProgram)
class Loader(object):
args = []
def discover(self, start_dir, pattern, top_level_dir):
self.args.append((start_dir, pattern, top_level_dir))
return 'tests'
program._do_discovery(['-v'], Loader=Loader)
self.assertEqual(program.verbosity, 2)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'test*.py', None)])
Loader.args = []
program = object.__new__(unittest.TestProgram)
program._do_discovery(['--verbose'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'test*.py', None)])
Loader.args = []
program = object.__new__(unittest.TestProgram)
program._do_discovery([], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'test*.py', None)])
Loader.args = []
program = object.__new__(unittest.TestProgram)
program._do_discovery(['fish'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'test*.py', None)])
Loader.args = []
program = object.__new__(unittest.TestProgram)
program._do_discovery(['fish', 'eggs'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'eggs', None)])
Loader.args = []
program = object.__new__(unittest.TestProgram)
program._do_discovery(['fish', 'eggs', 'ham'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'eggs', 'ham')])
Loader.args = []
program = object.__new__(unittest.TestProgram)
program._do_discovery(['-s', 'fish'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'test*.py', None)])
Loader.args = []
program = object.__new__(unittest.TestProgram)
program._do_discovery(['-t', 'fish'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'test*.py', 'fish')])
Loader.args = []
program = object.__new__(unittest.TestProgram)
program._do_discovery(['-p', 'fish'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'fish', None)])
self.assertFalse(program.failfast)
self.assertFalse(program.catchbreak)
Loader.args = []
program = object.__new__(unittest.TestProgram)
program._do_discovery(['-p', 'eggs', '-s', 'fish', '-v', '-f', '-c'],
Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'eggs', None)])
self.assertEqual(program.verbosity, 2)
self.assertTrue(program.failfast)
self.assertTrue(program.catchbreak)
def test_detect_module_clash(self):
class Module(object):
__file__ = 'bar/foo.py'
sys.modules['foo'] = Module
full_path = os.path.abspath('foo')
original_listdir = os.listdir
original_isfile = os.path.isfile
original_isdir = os.path.isdir
def cleanup():
os.listdir = original_listdir
os.path.isfile = original_isfile
os.path.isdir = original_isdir
del sys.modules['foo']
if full_path in sys.path:
sys.path.remove(full_path)
self.addCleanup(cleanup)
def listdir(_):
return ['foo.py']
def isfile(_):
return True
def isdir(_):
return True
os.listdir = listdir
os.path.isfile = isfile
os.path.isdir = isdir
loader = unittest.TestLoader()
mod_dir = os.path.abspath('bar')
expected_dir = os.path.abspath('foo')
msg = re.escape(r"'foo' module incorrectly imported from %r. Expected %r. "
"Is this module globally installed?" % (mod_dir, expected_dir))
self.assertRaisesRegexp(
ImportError, '^%s$' % msg, loader.discover,
start_dir='foo', pattern='foo.py'
)
self.assertEqual(sys.path[0], full_path)
def test_discovery_from_dotted_path(self):
loader = unittest.TestLoader()
tests = [self]
expectedPath = os.path.abspath(os.path.dirname(unittest.test.__file__))
self.wasRun = False
def _find_tests(start_dir, pattern):
self.wasRun = True
self.assertEqual(start_dir, expectedPath)
return tests
loader._find_tests = _find_tests
suite = loader.discover('unittest.test')
self.assertTrue(self.wasRun)
self.assertEqual(suite._tests, tests)
if __name__ == '__main__':
unittest.main()
|
mit
|
chiviak/CouchPotatoServer
|
libs/pyutil/test/current/test_iputil.py
|
106
|
1287
|
#!/usr/bin/env python
try:
from twisted.trial import unittest
unittest # http://divmod.org/trac/ticket/1499
except ImportError, le:
print "Skipping test_iputil since it requires Twisted and Twisted could not be imported: %s" % (le,)
else:
from pyutil import iputil, testutil
import re
DOTTED_QUAD_RE=re.compile("^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$")
class ListAddresses(testutil.SignalMixin):
def test_get_local_ip_for(self):
addr = iputil.get_local_ip_for('127.0.0.1')
self.failUnless(DOTTED_QUAD_RE.match(addr))
def test_list_async(self):
try:
from twisted.trial import unittest
unittest # http://divmod.org/trac/ticket/1499
from pyutil import iputil
except ImportError, le:
raise unittest.SkipTest("iputil could not be imported (probably because its dependency, Twisted, is not installed). %s" % (le,))
d = iputil.get_local_addresses_async()
def _check(addresses):
self.failUnless(len(addresses) >= 1) # always have localhost
self.failUnless("127.0.0.1" in addresses, addresses)
d.addCallbacks(_check)
return d
test_list_async.timeout=2
|
gpl-3.0
|
laperry1/android_external_chromium_org
|
tools/telemetry/telemetry/core/backends/chrome/android_browser_backend.py
|
25
|
17290
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import pipes
import re
import subprocess
import sys
import time
from telemetry.core import exceptions
from telemetry.core import forwarders
from telemetry.core import util
from telemetry.core.backends import adb_commands
from telemetry.core.backends import browser_backend
from telemetry.core.backends.chrome import chrome_browser_backend
from telemetry.core.forwarders import android_forwarder
util.AddDirToPythonPath(util.GetChromiumSrcDir(), 'build', 'android')
from pylib.device import device_errors # pylint: disable=F0401
from pylib.device import intent # pylint: disable=F0401
class AndroidBrowserBackendSettings(object):
def __init__(self, activity, cmdline_file, package, pseudo_exec_name,
supports_tab_control, relax_ssl_check=False):
self.activity = activity
self._cmdline_file = cmdline_file
self.package = package
self.pseudo_exec_name = pseudo_exec_name
self.supports_tab_control = supports_tab_control
self.relax_ssl_check = relax_ssl_check
def GetCommandLineFile(self, is_user_debug_build): # pylint: disable=W0613
return self._cmdline_file
def GetDevtoolsRemotePort(self, adb):
raise NotImplementedError()
def RemoveProfile(self, adb):
files = adb.device().RunShellCommand(
'ls "%s"' % self.profile_dir, as_root=True)
# Don't delete lib, since it is created by the installer.
paths = ['"%s/%s"' % (self.profile_dir, f) for f in files if f != 'lib']
adb.device().RunShellCommand('rm -r %s' % ' '.join(paths), as_root=True)
def PushProfile(self, _new_profile_dir, _adb):
logging.critical('Profiles cannot be overriden with current configuration')
sys.exit(1)
@property
def profile_dir(self):
return '/data/data/%s/' % self.package
class ChromeBackendSettings(AndroidBrowserBackendSettings):
# Stores a default Preferences file, re-used to speed up "--page-repeat".
_default_preferences_file = None
def GetCommandLineFile(self, is_user_debug_build):
if is_user_debug_build:
return '/data/local/tmp/chrome-command-line'
else:
return '/data/local/chrome-command-line'
def __init__(self, package):
super(ChromeBackendSettings, self).__init__(
activity='com.google.android.apps.chrome.Main',
cmdline_file=None,
package=package,
pseudo_exec_name='chrome',
supports_tab_control=True)
def GetDevtoolsRemotePort(self, adb):
return 'localabstract:chrome_devtools_remote'
def PushProfile(self, new_profile_dir, adb):
# Pushing the profile is slow, so we don't want to do it every time.
# Avoid this by pushing to a safe location using PushChangedFiles, and
# then copying into the correct location on each test run.
(profile_parent, profile_base) = os.path.split(new_profile_dir)
# If the path ends with a '/' python split will return an empty string for
# the base name; so we now need to get the base name from the directory.
if not profile_base:
profile_base = os.path.basename(profile_parent)
saved_profile_location = '/sdcard/profile/%s' % profile_base
adb.device().PushChangedFiles(new_profile_dir, saved_profile_location)
adb.device().old_interface.EfficientDeviceDirectoryCopy(
saved_profile_location, self.profile_dir)
dumpsys = adb.device().RunShellCommand(
'dumpsys package %s' % self.package)
id_line = next(line for line in dumpsys if 'userId=' in line)
uid = re.search('\d+', id_line).group()
files = adb.device().RunShellCommand(
'ls "%s"' % self.profile_dir, as_root=True)
files.remove('lib')
paths = ['%s/%s' % (self.profile_dir, f) for f in files]
for path in paths:
extended_path = '%s %s/* %s/*/* %s/*/*/*' % (path, path, path, path)
adb.device().RunShellCommand(
'chown %s.%s %s' % (uid, uid, extended_path))
class ContentShellBackendSettings(AndroidBrowserBackendSettings):
def __init__(self, package):
super(ContentShellBackendSettings, self).__init__(
activity='org.chromium.content_shell_apk.ContentShellActivity',
cmdline_file='/data/local/tmp/content-shell-command-line',
package=package,
pseudo_exec_name='content_shell',
supports_tab_control=False)
def GetDevtoolsRemotePort(self, adb):
return 'localabstract:content_shell_devtools_remote'
class ChromeShellBackendSettings(AndroidBrowserBackendSettings):
def __init__(self, package):
super(ChromeShellBackendSettings, self).__init__(
activity='org.chromium.chrome.shell.ChromeShellActivity',
cmdline_file='/data/local/tmp/chrome-shell-command-line',
package=package,
pseudo_exec_name='chrome_shell',
supports_tab_control=False)
def GetDevtoolsRemotePort(self, adb):
return 'localabstract:chrome_shell_devtools_remote'
class WebviewBackendSettings(AndroidBrowserBackendSettings):
def __init__(self, package,
activity='org.chromium.telemetry_shell.TelemetryActivity'):
super(WebviewBackendSettings, self).__init__(
activity=activity,
cmdline_file='/data/local/tmp/webview-command-line',
package=package,
pseudo_exec_name='webview',
supports_tab_control=False)
def GetDevtoolsRemotePort(self, adb):
# The DevTools socket name for WebView depends on the activity PID's.
retries = 0
timeout = 1
pid = None
while True:
pids = adb.ExtractPid(self.package)
if (len(pids) > 0):
pid = pids[-1]
break
time.sleep(timeout)
retries += 1
timeout *= 2
if retries == 4:
logging.critical('android_browser_backend: Timeout while waiting for '
'activity %s:%s to come up',
self.package,
self.activity)
raise exceptions.BrowserGoneException(self.browser,
'Timeout waiting for PID.')
return 'localabstract:webview_devtools_remote_%s' % str(pid)
class WebviewShellBackendSettings(WebviewBackendSettings):
def __init__(self, package):
super(WebviewShellBackendSettings, self).__init__(
activity='org.chromium.android_webview.shell.AwShellActivity',
package=package)
class AndroidBrowserBackend(chrome_browser_backend.ChromeBrowserBackend):
"""The backend for controlling a browser instance running on Android."""
def __init__(self, browser_options, backend_settings, use_rndis_forwarder,
output_profile_path, extensions_to_load, target_arch,
android_platform_backend):
super(AndroidBrowserBackend, self).__init__(
supports_tab_control=backend_settings.supports_tab_control,
supports_extensions=False, browser_options=browser_options,
output_profile_path=output_profile_path,
extensions_to_load=extensions_to_load)
if len(extensions_to_load) > 0:
raise browser_backend.ExtensionsNotSupportedException(
'Android browser does not support extensions.')
# Initialize fields so that an explosion during init doesn't break in Close.
self._android_platform_backend = android_platform_backend
self._backend_settings = backend_settings
self._saved_cmdline = ''
self._target_arch = target_arch
self._saved_sslflag = ''
# TODO(tonyg): This is flaky because it doesn't reserve the port that it
# allocates. Need to fix this.
self._port = adb_commands.AllocateTestServerPort()
# Disables android.net SSL certificate check. This is necessary for
# applications using the android.net stack to work with proxy HTTPS server
# created by telemetry
if self._backend_settings.relax_ssl_check:
self._saved_sslflag = self._adb.device().GetProp('socket.relaxsslcheck')
self._adb.device().SetProp('socket.relaxsslcheck', 'yes')
# Kill old browser.
self._KillBrowser()
if self._adb.device().old_interface.CanAccessProtectedFileContents():
if self.browser_options.profile_dir:
self._backend_settings.PushProfile(self.browser_options.profile_dir,
self._adb)
elif not self.browser_options.dont_override_profile:
self._backend_settings.RemoveProfile(self._adb)
self._forwarder_factory = android_forwarder.AndroidForwarderFactory(
self._adb, use_rndis_forwarder)
if self.browser_options.netsim or use_rndis_forwarder:
assert use_rndis_forwarder, 'Netsim requires RNDIS forwarding.'
self.wpr_port_pairs = forwarders.PortPairs(
http=forwarders.PortPair(0, 80),
https=forwarders.PortPair(0, 443),
dns=forwarders.PortPair(0, 53))
# Set the debug app if needed.
if self._adb.IsUserBuild():
logging.debug('User build device, setting debug app')
self._adb.device().RunShellCommand(
'am set-debug-app --persistent %s' % self._backend_settings.package)
@property
def _adb(self):
return self._android_platform_backend.adb
def _KillBrowser(self):
# We use KillAll rather than ForceStop for efficiency reasons.
try:
self._adb.device().KillAll(self._backend_settings.package, retries=0)
except device_errors.CommandFailedError:
pass
def _SetUpCommandLine(self):
def QuoteIfNeeded(arg):
# Properly escape "key=valueA valueB" to "key='valueA valueB'"
# Values without spaces, or that seem to be quoted are left untouched.
# This is required so CommandLine.java can parse valueB correctly rather
# than as a separate switch.
params = arg.split('=', 1)
if len(params) != 2:
return arg
key, values = params
if ' ' not in values:
return arg
if values[0] in '"\'' and values[-1] == values[0]:
return arg
return '%s=%s' % (key, pipes.quote(values))
args = [self._backend_settings.pseudo_exec_name]
args.extend(self.GetBrowserStartupArgs())
content = ' '.join(QuoteIfNeeded(arg) for arg in args)
cmdline_file = self._backend_settings.GetCommandLineFile(
self._adb.IsUserBuild())
as_root = self._adb.device().old_interface.CanAccessProtectedFileContents()
try:
# Save the current command line to restore later, except if it appears to
# be a Telemetry created one. This is to prevent a common bug where
# --host-resolver-rules borks people's browsers if something goes wrong
# with Telemetry.
self._saved_cmdline = ''.join(self._adb.device().ReadFile(cmdline_file))
if '--host-resolver-rules' in self._saved_cmdline:
self._saved_cmdline = ''
self._adb.device().WriteTextFile(cmdline_file, content, as_root=as_root)
except device_errors.CommandFailedError:
logging.critical('Cannot set Chrome command line. '
'Fix this by flashing to a userdebug build.')
sys.exit(1)
def _RestoreCommandLine(self):
cmdline_file = self._backend_settings.GetCommandLineFile(
self._adb.IsUserBuild())
as_root = self._adb.device().old_interface.CanAccessProtectedFileContents()
self._adb.device().WriteTextFile(cmdline_file, self._saved_cmdline,
as_root=as_root)
def Start(self):
self._SetUpCommandLine()
self._adb.device().RunShellCommand('logcat -c')
if self.browser_options.startup_url:
url = self.browser_options.startup_url
elif self.browser_options.profile_dir:
url = None
else:
# If we have no existing tabs start with a blank page since default
# startup with the NTP can lead to race conditions with Telemetry
url = 'about:blank'
# Dismiss any error dialogs. Limit the number in case we have an error loop
# or we are failing to dismiss.
for _ in xrange(10):
if not self._adb.device().old_interface.DismissCrashDialogIfNeeded():
break
self._adb.device().StartActivity(
intent.Intent(package=self._backend_settings.package,
activity=self._backend_settings.activity,
action=None, data=url, category=None),
blocking=True)
self._adb.Forward('tcp:%d' % self._port,
self._backend_settings.GetDevtoolsRemotePort(self._adb))
try:
self._WaitForBrowserToComeUp()
except exceptions.BrowserGoneException:
logging.critical('Failed to connect to browser.')
if not self._adb.device().old_interface.CanAccessProtectedFileContents():
logging.critical(
'Resolve this by either: '
'(1) Flashing to a userdebug build OR '
'(2) Manually enabling web debugging in Chrome at '
'Settings > Developer tools > Enable USB Web debugging.')
sys.exit(1)
except:
import traceback
traceback.print_exc()
self.Close()
raise
finally:
self._RestoreCommandLine()
def GetBrowserStartupArgs(self):
args = super(AndroidBrowserBackend, self).GetBrowserStartupArgs()
if self.forwarder_factory.does_forwarder_override_dns:
args = [arg for arg in args
if not arg.startswith('--host-resolver-rules')]
args.append('--enable-remote-debugging')
args.append('--disable-fre')
args.append('--disable-external-intent-requests')
return args
@property
def forwarder_factory(self):
return self._forwarder_factory
@property
def adb(self):
return self._adb
@property
def pid(self):
pids = self._adb.ExtractPid(self._backend_settings.package)
if not pids:
raise exceptions.BrowserGoneException(self.browser)
return int(pids[0])
@property
def browser_directory(self):
return None
@property
def profile_directory(self):
return self._backend_settings.profile_dir
@property
def package(self):
return self._backend_settings.package
@property
def activity(self):
return self._backend_settings.activity
def __del__(self):
self.Close()
def Close(self):
super(AndroidBrowserBackend, self).Close()
self._KillBrowser()
# Restore android.net SSL check
if self._backend_settings.relax_ssl_check:
self._adb.device().SetProp('socket.relaxsslcheck', self._saved_sslflag)
if self._output_profile_path:
logging.info("Pulling profile directory from device: '%s'->'%s'.",
self._backend_settings.profile_dir,
self._output_profile_path)
# To minimize bandwidth it might be good to look at whether all the data
# pulled down is really needed e.g. .pak files.
if not os.path.exists(self._output_profile_path):
os.makedirs(self._output_profile_pathame)
files = self.adb.device().RunShellCommand(
'ls "%s"' % self._backend_settings.profile_dir)
for f in files:
# Don't pull lib, since it is created by the installer.
if f != 'lib':
source = '%s%s' % (self._backend_settings.profile_dir, f)
dest = os.path.join(self._output_profile_path, f)
# self._adb.Pull(source, dest) doesn't work because its timeout
# is fixed in android's adb_interface at 60 seconds, which may
# be too short to pull the cache.
cmd = 'pull %s %s' % (source, dest)
self._adb.device().old_interface.Adb().SendCommand(
cmd, timeout_time=240)
def IsBrowserRunning(self):
pids = self._adb.ExtractPid(self._backend_settings.package)
return len(pids) != 0
def GetRemotePort(self, local_port):
return local_port
def GetStandardOutput(self):
return '\n'.join(self._adb.device().RunShellCommand('logcat -d -t 500'))
def GetStackTrace(self):
def Decorate(title, content):
return title + '\n' + content + '\n' + '*' * 80 + '\n'
# Get the last lines of logcat (large enough to contain stacktrace)
logcat = self.GetStandardOutput()
ret = Decorate('Logcat', logcat)
stack = os.path.join(util.GetChromiumSrcDir(), 'third_party',
'android_platform', 'development', 'scripts', 'stack')
# Try to symbolize logcat.
if os.path.exists(stack):
cmd = [stack]
if self._target_arch:
cmd.append('--arch=%s' % self._target_arch)
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
ret += Decorate('Stack from Logcat', p.communicate(input=logcat)[0])
# Try to get tombstones.
tombstones = os.path.join(util.GetChromiumSrcDir(), 'build', 'android',
'tombstones.py')
if os.path.exists(tombstones):
ret += Decorate('Tombstones',
subprocess.Popen([tombstones, '-w', '--device',
self._adb.device_serial()],
stdout=subprocess.PIPE).communicate()[0])
return ret
def AddReplayServerOptions(self, extra_wpr_args):
if not self.forwarder_factory.does_forwarder_override_dns:
extra_wpr_args.append('--no-dns_forwarding')
if self.browser_options.netsim:
extra_wpr_args.append('--net=%s' % self.browser_options.netsim)
|
bsd-3-clause
|
MilaPetrova/python-training-group3
|
contact_Dima.py
|
1
|
3567
|
__author__ = 'Dzmitry'
from model.contact import Group
class ContactHelper:
def __init__(self, app):
self.app = app
def create(self, contact):
wd = self.app.wd
wd.get("http://localhost/addressbook/")
# init new contact creation
wd.find_element_by_link_text("add new").click()
self.fill_contact_form(contact)
# submit new contact creation
wd.find_element_by_xpath("//div[@id='content']/form/input[21]").click()
self.list_contact_cache = None
def modify_first_contact(self, new_contact_date):
self.modify_contact_by_index(0)
def modify_contact_by_index(self, index, new_contact_date):
wd = self.app.wd
self.select_contact_by_index(index)
#click edit contact
wd.find_element_by_xpath(".//*[@id='maintable']/tbody/tr[2]/td[8]/a/img").click()
#fill out form
self.fill_contact_form(new_contact_date)
#submit update
wd.find_element_by_xpath(".//*[@id='content']/form[1]/input[1]").click()
wd.get("http://localhost/addressbook/")
self.list_contact_cache = None
def fill_contact_form(self, contact):
wd = self.app.wd
self.change_field_value("firstname", contact.firstname)
self.change_field_value("lastname", contact.lastname)
self.change_field_value("nickname", contact.nickname)
self.change_field_value("title", contact.title)
self.change_field_value("company", contact.company)
def change_field_value(self, fild_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(fild_name).click()
wd.find_element_by_name(fild_name).clear()
wd.find_element_by_name(fild_name).send_keys(text)
def delet_first_contact(self):
self.delet_contact_by_index(0)
def delet_contact_by_index(self, index):
wd = self.app.wd
wd.get("http://localhost/addressbook/")
self.select_contact_by_index(index)
#delet element
wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click()
#confirm deletion
wd.switch_to_alert().accept()
self.list_contact_cache = None
def select_contact_by_index(self, index):
wd = self.app.wd
wd.get("http://localhost/addressbook/")
wd.find_elements_by_name("selected[]")[index].click()
def select_first_contact(self):
self.select_contact_by_index()
def coutn(self):
wd = self.app.wd
wd.get("http://localhost/addressbook/")
return len(wd.find_elements_by_name("selected[]"))
list_contact_cache = None
def get_contact_list(self):
if self.list_contact_cache is None:
wd = self.app.wd
wd.get("http://localhost/addressbook/")
self.list_contact_cache = []
for element in wd.find_elements_by_name("entry"):
"""cells = element.find_elements_by_tag_name("td")
text_firstname = cells[1].text
text_lastname = cells[2].text"""
id = element.find_element_by_name("selected[]").get_attribute("id")
text_firstname=element.find_element_by_css_selector('tr[name="entry"] td:nth-of-type(3)').text
text_lastname= element.find_element_by_css_selector('tr[name="entry"] td:nth-of-type(2)').text
self.list_contact_cache.append(Group(id=id, lastname=text_lastname, firstname=text_firstname))
return list(self.list_contact_cache)
|
apache-2.0
|
erickt/hue
|
desktop/core/ext-py/guppy-0.1.10/guppy/heapy/Monitor.py
|
37
|
12734
|
#._cv_part guppy.heapy.Monitor
import os, pprint, signal, socket, SocketServer, sys, threading, time, traceback
import cPickle as pickle
try:
import readline # Imported to _enable_ command line editing
except ImportError:
pass
import select, Queue
from guppy.heapy.RemoteConstants import *
from guppy.heapy.Console import Console
from guppy.sets import mutnodeset
from guppy.etc.etc import ptable
from guppy.etc import cmd
class Server(SocketServer.ThreadingTCPServer):
pass
def ioready(fd, wait):
r, w, x = select.select([fd], [], [], wait)
return len(r)
def queue_get_interruptible(q, noblock=0):
while 1:
try:
return q.get(timeout=0.2)
except Queue.Empty:
if noblock:
break
# Special value signals that connection has been closed
CONN_CLOSED = ('CLOSED',)
class Handler(SocketServer.StreamRequestHandler):
allow_reuse_address = 1
def close(self):
if not self.isclosed.tas(0):
self.server.monitor.remove_connection(self)
self.dataq.put(CONN_CLOSED)
self.request.shutdown(2)
self.request.close()
def send_cmd(self, cmd):
if not cmd.endswith('\n'):
cmd += '\n'
self.request.send(cmd)
def browser_cmd(self, cmd):
if self.prompt == '>>> ':
self.exec_cmd('q', retdata=1)
if self.prompt == '<Annex> ':
self.exec_cmd('cont', retdata=1)
return self.exec_cmd(cmd, retdata=1)
def exec_cmd(self, cmd, retdata=0, noblock=0):
if cmd is not None:
self.send_cmd(cmd)
self.promptstate = False
datas = []
while 1:
p = queue_get_interruptible(self.dataq, noblock)
if p is None:
if self.promptstate:
break
else:
time.sleep(1)
continue
if p is CONN_CLOSED:
raise EOFError
if p[0] == 'DATA':
self.promptstate = False
if retdata:
datas.append(p[1])
else:
sys.stdout.write(p[1])
elif p[0] == 'PROMPT':
self.prompt = p[1]
if self.dataq.empty():
self.promptstate = True
break
else:
self.promptstate = False
else:
assert 0
if retdata:
return ''.join(datas)
def get_ps(self, name):
for line in self.firstdata.split('\n'):
if line.startswith(name):
if '=' in line:
ix = line.index('=')
line = line[ix+1:].strip()
return line
return ''
def get_val(self, expr):
data = self.browser_cmd('dump %s'%expr)
return pickle.loads(data)
def handle(self):
self.prompt = None
self.promptstate = False
self.isclosed = mutnodeset()
self.dataq = Queue.Queue()
self.server.monitor.add_connection(self)
while 1:
try:
data = self.rfile.readline()
if not data:
raise EOFError,'End of file'
if data.endswith(DONE):
raise EOFError,'DONE'
except (EOFError, socket.error):
break
if data.endswith(READLINE):
prompt = data[:-len(READLINE)]
self.dataq.put(('PROMPT',prompt))
if self.prompt is None:
self.firstdata = self.exec_cmd(cmd=None,retdata=1)
else:
self.dataq.put(('DATA',data))
self.close()
class MonitorConnection(cmd.Cmd):
use_raw_input = 1
def __init__(self, monitor):
self.aliases = {}
cmd.Cmd.__init__(self)
self.hno = 0
self.isclosed = 0
self.forceexit = 0
self.prompt = '<Monitor> '
self.monitor = monitor
self.server = s = Server((LOCALHOST, HEAPYPORT), Handler)
self.server.monitor = monitor
self.st = threading.Thread(target = self.run_server,
args = ())
self.st.start()
def close(self):
self.isclosed = 1
self.server.socket.shutdown(2)
self.server.server_close()
self.server.verify_request = lambda x, y: 0
def default(self, line):
cmd.Cmd.default(self, line)
cmd.Cmd.do_help(self, '')
def run_server(self):
s = self.server
while not self.isclosed:
s.handle_request()
s.server_close()
def exec_cmd(self, cmd):
if not cmd:
# I don't want the repeat of previous command when giving
# empty command that is provided by cmd.py.
# It turned out to be confusing sometimes.
return
line = cmd
try:
line = self.precmd(line)
stop = self.onecmd(line)
stop = self.postcmd(stop, line)
return stop
except:
self.handle_error(line)
def handle_error(self, cmdline):
"""Handle an error gracefully. May be overridden.
The default is to print a traceback and continue.
"""
print >>sys.stderr,'-'*40
print >>sys.stderr,'Exception happened during processing the command',
print >>sys.stderr,repr(cmdline)
import traceback
traceback.print_exc()
print >>sys.stderr, '-'*40
# Alias handling etc copied from pdb.py in Python dist
def precmd(self, line):
"""Handle alias expansion and ';;' separator."""
self.curline = line
if not line:
return line
args = line.split()
while self.aliases.has_key(args[0]):
line = self.aliases[args[0]]
if '%' in line:
ii = 1
for tmpArg in args[1:]:
line = line.replace("%" + str(ii),
tmpArg)
line = line.replace('%>=' + str(ii),
' '.join(args[ii:]))
ii = ii + 1
line = line.replace("%*", ' '.join(args[1:]))
else:
line = line + ' ' + ' '.join(args[1:])
args = line.split()
# split into ';;' separated commands
# unless it's an alias command
if args[0] != 'alias':
marker = line.find(';;')
if marker >= 0:
# queue up everything after marker
next = line[marker+2:].lstrip()
self.cmdqueue.append(next)
line = line[:marker].rstrip()
return line
def do_exit(self, arg):
self.forceexit = 1
return 1
def help_exit(self):
print """exit
-----
Exit from the monitor and from the Python process that started it.
This makes sure to exit without waiting for the server thread to terminate.
See also the q command."""
do_h = cmd.Cmd.do_help
def help_h(self):
print """h(elp)
-----
Without argument, print the list of available commands.
With a command name as argument, print help about that command."""
def help_help(self):
self.help_h()
def do_int(self, arg):
try:
con = Console(stdin=self.stdin,stdout=self.stdout,
locals=self.__dict__)
con.interact(
"Local interactive console. To return to Monitor, type %r."%
con.EOF_key_sequence)
finally:
pass
def help_int(self):
print """int
-----
Local interactive console.
This will bring up a Python console locally in
the same interpreter process that the Monitor itself."""
def do_ki(self, arg):
if not arg:
arg = self.conid
arg = int(arg)
c = self.monitor.connections[arg]
if c.get_ps('noninterruptible'):
print '''\
Error: Can not interrupt this remote connection (uses Python < 2.4)'''
else:
print 'Sending KeyboardInterrupt to connection %s.'%arg
c.send_cmd(KEYBOARDINTERRUPT)
def help_ki(self):
print """ki <connection ID>
-----
Keyboard Interrupt
Send a command to interrupt the remote thread on the specified
connection (default is the last one connected to).
Notes:
It currently only works with Python >= 2.4. The remote thread will
not always be awakened, for example if it is waiting in
time.sleep(). Sometimes using several ki commands helps."""
def do_lc(self, arg):
table = [['CID', 'PID', 'ARGV']]
for cid, con in self.monitor.connections.items():
table.append([cid,
con.get_ps('target.pid'),
con.get_ps('target.sys.argv')])
ptable(table, self.stdout)
def help_lc(self):
print """lc
-----
List Connections.
List the currently open connections.
The table printed has one line per connection in this form:
CID PID ARGV
1 17999 ['/home/nilsson/bin/solitaire.py']
CID is the connection ID, which may be used as argument to the sc
command.
PID is the process ID of the target interpreter process. In Linux,
this is the parent of the remote control interpreter thread that runs
the Annex that the connection is talking to.
ARGV is the argument vector in the target Python interpereter."""
def do_sc(self, arg):
if arg:
self.conid = int(arg)
print 'Remote connection %d. To return to Monitor, type <Ctrl-C> or .<RETURN>'%self.conid
self.monitor.set_connection(self.monitor.connections[self.conid])
def help_sc(self):
print """sc <connection ID>
-----
Set connection to communicate with a remote thread.
With an argument, set current connection to the number specified.
Without argument, use the same connection as last time. You will then
be talking to a remote process via that connection. You can return to
Monitor at any time by <Ctrl-C>. You may also use the '.' command
(followed by <Return>), if the remote process is waiting for input.
The '.' character may be followed by a monitor command, to execute it
directly instead of returning to the monitor. For example, when
talking to a connection, '.sc 1' will directly change to connection 1."""
def do_q(self, arg):
return 1
def help_q(self):
print """q
-----
Quit from the monitor.
This will not exit from Python itself if called from an interactive
interpreter. To make sure to exit from Python, use the exit command."""
class Monitor:
use_raw_input = 1
def __init__(self):
self.connection = self.monitor_connection = MonitorConnection(self)
self.connections = {}
self.ids = 0
self.prompt = None
def newid(self):
if not self.connections:
self.ids = 1
self.monitor_connection.conid = self.ids
else:
self.ids = max([1]+[c for c in self.connections.keys()])+1
return self.ids
def add_connection(self, connection):
hid = self.newid()
self.connections[hid] = connection
connection.monitor_id = hid
self.print_async( '*** Connection %s opened ***'%hid)
def print_async(self, text):
""" Print text only if we are waiting for input,
and then restore the prompt. """
if self.prompt is not None:
print '\n'+text
sys.stdout.write(self.prompt)
sys.stdout.flush()
def remove_connection(self, connection):
del self.connections[connection.monitor_id]
if connection is self.connection:
self.set_connection(self.monitor_connection)
self.print_async( '*** Connection %s closed ***'%connection.monitor_id)
def run(self):
try:
stop = 0
while not stop:
try:
while not stop:
conn = self.connection
self.prompt = conn.prompt
if conn is not self.monitor_connection:
conn.exec_cmd(cmd=None,noblock=1)
cmd = raw_input(conn.prompt)
self.prompt = None
conn = None
if cmd.startswith('.'):
if cmd == '.':
self.connection = self.monitor_connection
else:
cmd = cmd[1:]
conn = self.monitor_connection
#elif cmd or self.connection is self.monitor_connection:
else:
conn = self.connection
if conn:
try:
r = conn.exec_cmd(cmd)
except EOFError:
r = 1
if conn is self.monitor_connection and r:
stop = 1
#print 'to stop'
#print 'end of loop'
except EOFError:
'We better exit in case the input is from a file'
#print 'EOFError'
#print 'Use the monitor q command to quit.'
print '*** End Of File - Exiting Monitor ***'
self.connection = self.monitor_connection
stop = 1
except KeyboardInterrupt:
print 'KeyboardInterrupt'
print 'Use the ki command to interrupt a remote process.'
self.connection = self.monitor_connection
continue
finally:
self.prompt=None # Avoid closing messages
#print 'to close'
self.close()
def close(self):
for c in self.connections.values():
try:
#print 'to close:', c
c.close()
except socket.error:
pass
try:
#print 'to close: self'
self.monitor_connection.close()
except socket.error:
pass
if self.monitor_connection.forceexit:
os._exit(0)
def set_connection(self, connection):
self.connection = connection
self.prompt = connection.prompt
def monitor():
"""monitor() [0]
Start an interactive remote monitor.
This can be used to get information about the state, in
particular the memory usage, of separately running Python
processes.
References
[0] heapy_Use.html#heapykinds.Use.monitor"""
from guppy.heapy import Remote
Remote.off()
m = Monitor()
m.run()
if __name__ == '__main__':
monitor()
|
apache-2.0
|
x13945/Android-ImageMagick
|
library/src/main/jni/libwebp-0.3.1/swig/libwebp.py
|
107
|
6605
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.4
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_libwebp', [dirname(__file__)])
except ImportError:
import _libwebp
return _libwebp
if fp is not None:
try:
_mod = imp.load_module('_libwebp', fp, pathname, description)
finally:
fp.close()
return _mod
_libwebp = swig_import_helper()
del swig_import_helper
else:
import _libwebp
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
def WebPGetDecoderVersion():
"""WebPGetDecoderVersion() -> int"""
return _libwebp.WebPGetDecoderVersion()
def WebPGetInfo(*args):
"""WebPGetInfo(uint8_t data) -> (width, height)"""
return _libwebp.WebPGetInfo(*args)
def WebPDecodeRGB(*args):
"""WebPDecodeRGB(uint8_t data) -> (rgb, width, height)"""
return _libwebp.WebPDecodeRGB(*args)
def WebPDecodeRGBA(*args):
"""WebPDecodeRGBA(uint8_t data) -> (rgb, width, height)"""
return _libwebp.WebPDecodeRGBA(*args)
def WebPDecodeARGB(*args):
"""WebPDecodeARGB(uint8_t data) -> (rgb, width, height)"""
return _libwebp.WebPDecodeARGB(*args)
def WebPDecodeBGR(*args):
"""WebPDecodeBGR(uint8_t data) -> (rgb, width, height)"""
return _libwebp.WebPDecodeBGR(*args)
def WebPDecodeBGRA(*args):
"""WebPDecodeBGRA(uint8_t data) -> (rgb, width, height)"""
return _libwebp.WebPDecodeBGRA(*args)
def WebPGetEncoderVersion():
"""WebPGetEncoderVersion() -> int"""
return _libwebp.WebPGetEncoderVersion()
def wrap_WebPEncodeRGB(*args):
"""private, do not call directly."""
return _libwebp.wrap_WebPEncodeRGB(*args)
def wrap_WebPEncodeBGR(*args):
"""private, do not call directly."""
return _libwebp.wrap_WebPEncodeBGR(*args)
def wrap_WebPEncodeRGBA(*args):
"""private, do not call directly."""
return _libwebp.wrap_WebPEncodeRGBA(*args)
def wrap_WebPEncodeBGRA(*args):
"""private, do not call directly."""
return _libwebp.wrap_WebPEncodeBGRA(*args)
def wrap_WebPEncodeLosslessRGB(*args):
"""private, do not call directly."""
return _libwebp.wrap_WebPEncodeLosslessRGB(*args)
def wrap_WebPEncodeLosslessBGR(*args):
"""private, do not call directly."""
return _libwebp.wrap_WebPEncodeLosslessBGR(*args)
def wrap_WebPEncodeLosslessRGBA(*args):
"""private, do not call directly."""
return _libwebp.wrap_WebPEncodeLosslessRGBA(*args)
def wrap_WebPEncodeLosslessBGRA(*args):
"""private, do not call directly."""
return _libwebp.wrap_WebPEncodeLosslessBGRA(*args)
_UNUSED = 1
def WebPEncodeRGB(rgb, width, height, stride, quality_factor):
"""WebPEncodeRGB(uint8_t rgb, int width, int height, int stride, float quality_factor) -> lossy_webp"""
webp = wrap_WebPEncodeRGB(
rgb, _UNUSED, _UNUSED, width, height, stride, quality_factor)
if len(webp[0]) == 0:
return None
return webp[0]
def WebPEncodeRGBA(rgb, width, height, stride, quality_factor):
"""WebPEncodeRGBA(uint8_t rgb, int width, int height, int stride, float quality_factor) -> lossy_webp"""
webp = wrap_WebPEncodeRGBA(
rgb, _UNUSED, _UNUSED, width, height, stride, quality_factor)
if len(webp[0]) == 0:
return None
return webp[0]
def WebPEncodeBGR(rgb, width, height, stride, quality_factor):
"""WebPEncodeBGR(uint8_t rgb, int width, int height, int stride, float quality_factor) -> lossy_webp"""
webp = wrap_WebPEncodeBGR(
rgb, _UNUSED, _UNUSED, width, height, stride, quality_factor)
if len(webp[0]) == 0:
return None
return webp[0]
def WebPEncodeBGRA(rgb, width, height, stride, quality_factor):
"""WebPEncodeBGRA(uint8_t rgb, int width, int height, int stride, float quality_factor) -> lossy_webp"""
webp = wrap_WebPEncodeBGRA(
rgb, _UNUSED, _UNUSED, width, height, stride, quality_factor)
if len(webp[0]) == 0:
return None
return webp[0]
def WebPEncodeLosslessRGB(rgb, width, height, stride):
"""WebPEncodeLosslessRGB(uint8_t rgb, int width, int height, int stride) -> lossless_webp"""
webp = wrap_WebPEncodeLosslessRGB(rgb, _UNUSED, _UNUSED, width, height, stride)
if len(webp[0]) == 0:
return None
return webp[0]
def WebPEncodeLosslessRGBA(rgb, width, height, stride):
"""WebPEncodeLosslessRGBA(uint8_t rgb, int width, int height, int stride) -> lossless_webp"""
webp = wrap_WebPEncodeLosslessRGBA(rgb, _UNUSED, _UNUSED, width, height, stride)
if len(webp[0]) == 0:
return None
return webp[0]
def WebPEncodeLosslessBGR(rgb, width, height, stride):
"""WebPEncodeLosslessBGR(uint8_t rgb, int width, int height, int stride) -> lossless_webp"""
webp = wrap_WebPEncodeLosslessBGR(rgb, _UNUSED, _UNUSED, width, height, stride)
if len(webp[0]) == 0:
return None
return webp[0]
def WebPEncodeLosslessBGRA(rgb, width, height, stride):
"""WebPEncodeLosslessBGRA(uint8_t rgb, int width, int height, int stride) -> lossless_webp"""
webp = wrap_WebPEncodeLosslessBGRA(rgb, _UNUSED, _UNUSED, width, height, stride)
if len(webp[0]) == 0:
return None
return webp[0]
# This file is compatible with both classic and new-style classes.
|
mit
|
henkvos/xhtml2pdf
|
demo/tgpisa/setup.py
|
168
|
2452
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from turbogears.finddata import find_package_data
import os
execfile(os.path.join("tgpisa", "release.py"))
packages=find_packages()
package_data = find_package_data(where='tgpisa',
package='tgpisa')
if os.path.isdir('locales'):
packages.append('locales')
package_data.update(find_package_data(where='locales',
exclude=('*.po',), only_in_packages=False))
setup(
name="tgpisa",
version=version,
# uncomment the following lines if you fill them out in release.py
#description=description,
#author=author,
#author_email=email,
#url=url,
#download_url=download_url,
#license=license,
install_requires=[
"TurboGears >= 1.0.4.3",
"SQLObject>=0.8,<=0.10.0"
],
zip_safe=False,
packages=packages,
package_data=package_data,
keywords=[
# Use keywords if you'll be adding your package to the
# Python Cheeseshop
# if this has widgets, uncomment the next line
# 'turbogears.widgets',
# if this has a tg-admin command, uncomment the next line
# 'turbogears.command',
# if this has identity providers, uncomment the next line
# 'turbogears.identity.provider',
# If this is a template plugin, uncomment the next line
# 'python.templating.engines',
# If this is a full application, uncomment the next line
# 'turbogears.app',
],
classifiers=[
'Development Status :: 3 - Alpha',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Framework :: TurboGears',
# if this is an application that you'll distribute through
# the Cheeseshop, uncomment the next line
# 'Framework :: TurboGears :: Applications',
# if this is a package that includes widgets that you'll distribute
# through the Cheeseshop, uncomment the next line
# 'Framework :: TurboGears :: Widgets',
],
test_suite='nose.collector',
entry_points = {
'console_scripts': [
'start-tgpisa = tgpisa.commands:start',
],
},
# Uncomment next line and create a default.cfg file in your project dir
# if you want to package a default configuration in your egg.
#data_files = [('config', ['default.cfg'])],
)
|
apache-2.0
|
elliotthill/django-oscar
|
tests/functional/dashboard/offer_tests.py
|
49
|
3843
|
from django.core.urlresolvers import reverse
from oscar.test import testcases, factories
from oscar.apps.offer import models
class TestAnAdmin(testcases.WebTestCase):
# New version of offer tests buy using WebTest
is_staff = True
def setUp(self):
super(TestAnAdmin, self).setUp()
self.range = models.Range.objects.create(
name="All products", includes_all_products=True)
def test_can_create_an_offer(self):
list_page = self.get(reverse('dashboard:offer-list'))
metadata_page = list_page.click('Create new offer')
metadata_form = metadata_page.form
metadata_form['name'] = "Test offer"
benefit_page = metadata_form.submit().follow()
benefit_form = benefit_page.form
benefit_form['range'] = self.range.id
benefit_form['type'] = "Percentage"
benefit_form['value'] = "25"
condition_page = benefit_form.submit().follow()
condition_form = condition_page.form
condition_form['range'] = self.range.id
condition_form['type'] = "Count"
condition_form['value'] = "3"
restrictions_page = condition_form.submit().follow()
restrictions_page.form.submit()
offers = models.ConditionalOffer.objects.all()
self.assertEqual(1, len(offers))
offer = offers[0]
self.assertEqual("Test offer", offer.name)
self.assertEqual(3, offer.condition.value)
self.assertEqual(25, offer.benefit.value)
def test_can_update_an_existing_offer(self):
factories.create_offer(name="Offer A")
list_page = self.get(reverse('dashboard:offer-list'))
detail_page = list_page.click('Offer A')
metadata_page = detail_page.click(linkid="edit_metadata")
metadata_form = metadata_page.form
metadata_form['name'] = "Offer A+"
benefit_page = metadata_form.submit().follow()
benefit_form = benefit_page.form
condition_page = benefit_form.submit().follow()
condition_form = condition_page.form
restrictions_page = condition_form.submit().follow()
restrictions_page.form.submit()
models.ConditionalOffer.objects.get(name="Offer A+")
def test_can_jump_to_intermediate_step_for_existing_offer(self):
offer = factories.create_offer()
url = reverse('dashboard:offer-condition',
kwargs={'pk': offer.id})
self.assertEqual(200, self.get(url).status_code)
def test_cannot_jump_to_intermediate_step(self):
for url_name in ('dashboard:offer-condition',
'dashboard:offer-benefit',
'dashboard:offer-restrictions'):
response = self.get(reverse(url_name))
self.assertEqual(302, response.status_code)
def test_can_suspend_an_offer(self):
# Create an offer
offer = factories.create_offer()
self.assertFalse(offer.is_suspended)
detail_page = self.get(reverse('dashboard:offer-detail',
kwargs={'pk': offer.pk}))
form = detail_page.forms['status_form']
form.submit('suspend')
reloaded_offer = models.ConditionalOffer.objects.get(pk=offer.pk)
self.assertTrue(reloaded_offer.is_suspended)
def test_can_reinstate_a_suspended_offer(self):
# Create a suspended offer
offer = factories.create_offer()
offer.suspend()
self.assertTrue(offer.is_suspended)
detail_page = self.get(reverse('dashboard:offer-detail',
kwargs={'pk': offer.pk}))
form = detail_page.forms['status_form']
form.submit('unsuspend')
reloaded_offer = models.ConditionalOffer.objects.get(pk=offer.pk)
self.assertFalse(reloaded_offer.is_suspended)
|
bsd-3-clause
|
denisKaranja/django-dive-in
|
karanja_me/polls/views.py
|
2
|
1695
|
from django.shortcuts import get_object_or_404, render, HttpResponse
from django.http import HttpResponseRedirect, HttpResponse
from django.core.urlresolvers import reverse
from django.views import generic
from django.utils import timezone
from .models import Choice, Question
# entry point of the polls app
class IndexView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'latest_question_list'
def get_queryset(self):
'''
Return the last 5 published questions
'''
return Question.objects.filter(
pub_date__lte = timezone.now()
).order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
model = Question
template_name = "polls/detail.html"
def get_queryset(self):
"""
Excludes any questions that aren't published yet.
"""
return Question.objects.filter(pub_date__lte = timezone.now())
class ResultsView(generic.DetailView):
model = Question
template_name = "polls/results.html"
def vote(request, question_id):
p = get_object_or_404(Question, pk=question_id)
try:
selected_choice = p.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# Redisplay the question voting form.
return render(request, 'polls/detail.html', {
'question': p,
'error_message': "You must select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('polls:results', args = (p.id,)))
|
mit
|
Zanzibar82/pelisalacarta
|
python/main-classic/core/samba.py
|
5
|
7516
|
# -*- coding: iso-8859-1 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Acceso a directorios con samba
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import logger
import os
import config
'''
El formato de una ruta samba es:
smb://[usuario:password]@servidor/rutacompartida/directorio/
Ej:
Con login y password: smb://jesus:mipassword@MEDIASERVER/DESCARGAS/xbmc/favoritos
Con acceso guest: smb://MEDIASERVER/DESCARGAS/xbmc/favoritos
'''
def parse_url(url):
#logger.info("[samba.py] url="+url)
# Algunas trampas para facilitar el parseo de la url
url = url.strip()
if not url.endswith("/"):
url = url + "/"
#logger.info("[samba.py] url="+url)
import re
patron = 'smb\:\/\/([^\:]+)\:([^\@]+)@([^\/]+)\/([^\/]+)/(.*/)?'
matches = re.compile(patron,re.DOTALL).findall(url)
if len(matches)>0:
#logger.info("url con login y password")
server_name=matches[0][2]
share_name=matches[0][3]
path=matches[0][4]
user=matches[0][0]
password=matches[0][1]
else:
#logger.info("url sin login y password")
patron = 'smb\:\/\/([^\/]+)\/([^\/]+)/(.*/)?'
matches = re.compile(patron,re.DOTALL).findall(url)
if len(matches)>0:
server_name=matches[0][0]
share_name=matches[0][1]
path=matches[0][2]
user=""
password=""
else:
server_name=""
share_name=""
path=""
user=""
password=""
if path=="":
path="/"
#logger.info("[samba.py] server_name="+server_name+", share_name="+share_name+", path="+path+", user="+user+", password="+password)
return server_name,share_name,path,user,password
def connect(server_name,user,password):
from lib.samba import smb as smb
from lib.samba import nmb as nmb
logger.info("[samba.py] Crea netbios...")
netbios = nmb.NetBIOS()
logger.info("[samba.py] Averigua IP...")
nbhost = netbios.gethostbyname(server_name)
server_ip = nbhost[0].get_ip()
logger.info("[samba.py] server_ip="+server_ip)
logger.info("[samba.py] Crea smb...")
remote = smb.SMB(server_name, server_ip)
logger.info("ok")
if remote.is_login_required():
logger.info("[samba.py] Login...")
if user=="":
logger.info("[samba.py] User vacio, se asume 'guest'")
user="guest"
remote.login(user, password)
else:
logger.info("[samba.py] Login no requerido")
return remote
'''
Graba el string "filecontent" en un fichero "filename" almacenado en la ruta samba indicada
'''
def write_file(filename,filecontent,url):
# Separa la URL en los elementos
server_name,share_name,path,user,password = parse_url(url)
# Conecta con el servidor remoto
remote = connect(server_name,user,password)
# Crea un fichero temporal con el bookmark
logger.info("Crea fichero temporal")
try:
import xbmc
localfilename = xbmc.translatePath( "special://temp" )
except:
localfilename = config.get_data_path()
logger.info("localfilename="+localfilename)
localfilename = os.path.join(localfilename,"bookmark.tmp")
bookmarkfile = open(localfilename,"w")
bookmarkfile.write(filecontent)
bookmarkfile.flush()
bookmarkfile.close()
# Copia el bookmark al directorio Samba
logger.info("Crea el fichero remoto")
bookmarkfile = open(localfilename,"rb")
remote.stor_file(share_name, path+"/"+filename, bookmarkfile.read)
bookmarkfile.close()
# Borra el fichero temporal
logger.info("Borra el fichero local")
os.remove(localfilename)
def get_files(url):
logger.info("[samba.py] get_files")
# Separa la URL en los elementos
server_name,share_name,path,user,password = parse_url(url)
# Conecta con el servidor remoto
remote = connect(server_name,user,password)
ficheros = []
for f in remote.list_path(share_name, path + '*'):
name = f.get_longname()
#logger.info("[samba.py] name="+name)
if name == '.' or name == '..':
continue
if f.is_directory():
continue
ficheros.append(name)
return ficheros
def get_file_handle_for_reading(filename,url):
logger.info("[samba.py] get_file_handle_for_reading")
# Separa la URL en los elementos
server_name,share_name,path,user,password = parse_url(url)
# Conecta con el servidor remoto
remote = connect(server_name,user,password)
# Crea un fichero temporal con el bookmark
logger.info("[samba.py] Crea fichero temporal")
try:
import xbmc
localfilename = xbmc.translatePath( "special://temp" )
except:
localfilename = config.get_data_path()
logger.info("[samba.py] localfilename="+localfilename)
localfilename = os.path.join(localfilename,"bookmark.tmp")
# Lo abre
bookmarkfile = open(localfilename,"wb")
# Lo copia de la URL
try:
remote.retr_file(share_name, path + filename, bookmarkfile.write, password = password)
finally:
bookmarkfile.close()
return open(localfilename)
def file_exists(filename,url):
logger.info("[samba.py] file_exists "+ filename )
# Separa la URL en los elementos
server_name,share_name,path,user,password = parse_url(url)
# Conecta con el servidor remoto
remote = connect(server_name,user,password)
ficheros = []
for f in remote.list_path(share_name, path + '*'):
name = f.get_longname()
#logger.info("name="+name)
if name == '.' or name == '..':
continue
if f.is_directory():
continue
ficheros.append(name)
try:
logger.info(ficheros.index(filename))
return True
except:
return False
def remove_file(filename,url):
logger.info("[samba.py] remove_file "+filename)
# Separa la URL en los elementos
server_name,share_name,path,user,password = parse_url(url)
# Conecta con el servidor remoto
remote = connect(server_name,user,password)
remote.remove(share_name,path+filename,password=password)
def test():
'''
parse_url("smb://jesus:mipassword@MEDIASERVER/DESCARGAS/XBMC/favoritos")
parse_url("smb://MEDIASERVER/DESCARGAS/XBMC/favoritos")
parse_url("smb://MEDIASERVER/DESCARGAS")
parse_url("smb://jesus:mipassword@MEDIASERVER/DESCARGAS")
write_file("bookmark.txt","aqui ira el bookmark","smb://MEDIASERVER/DESCARGAS/xbmc/favoritos")
ficheros = get_files("smb://MEDIASERVER/DESCARGAS/XBMC/favoritos")
for fichero in ficheros:
handle = get_file_handle_for_reading(fichero,"smb://MEDIASERVER/DESCARGAS/XBMC/favoritos")
data = handle.read()
handle.close()
print data
'''
print file_exists("00000005.txt","smb://MEDIASERVER/DESCARGAS/XBMC/favoritos")
print file_exists("00000001.txt","smb://MEDIASERVER/DESCARGAS/XBMC/favoritos")
if __name__ == "__main__":
test()
|
gpl-3.0
|
rseubert/scikit-learn
|
sklearn/datasets/mldata.py
|
309
|
7838
|
"""Automatically download MLdata datasets."""
# Copyright (c) 2011 Pietro Berkes
# License: BSD 3 clause
import os
from os.path import join, exists
import re
import numbers
try:
# Python 2
from urllib2 import HTTPError
from urllib2 import quote
from urllib2 import urlopen
except ImportError:
# Python 3+
from urllib.error import HTTPError
from urllib.parse import quote
from urllib.request import urlopen
import numpy as np
import scipy as sp
from scipy import io
from shutil import copyfileobj
from .base import get_data_home, Bunch
MLDATA_BASE_URL = "http://mldata.org/repository/data/download/matlab/%s"
def mldata_filename(dataname):
"""Convert a raw name for a data set in a mldata.org filename."""
dataname = dataname.lower().replace(' ', '-')
return re.sub(r'[().]', '', dataname)
def fetch_mldata(dataname, target_name='label', data_name='data',
transpose_data=True, data_home=None):
"""Fetch an mldata.org data set
If the file does not exist yet, it is downloaded from mldata.org .
mldata.org does not have an enforced convention for storing data or
naming the columns in a data set. The default behavior of this function
works well with the most common cases:
1) data values are stored in the column 'data', and target values in the
column 'label'
2) alternatively, the first column stores target values, and the second
data values
3) the data array is stored as `n_features x n_samples` , and thus needs
to be transposed to match the `sklearn` standard
Keyword arguments allow to adapt these defaults to specific data sets
(see parameters `target_name`, `data_name`, `transpose_data`, and
the examples below).
mldata.org data sets may have multiple columns, which are stored in the
Bunch object with their original name.
Parameters
----------
dataname:
Name of the data set on mldata.org,
e.g.: "leukemia", "Whistler Daily Snowfall", etc.
The raw name is automatically converted to a mldata.org URL .
target_name: optional, default: 'label'
Name or index of the column containing the target values.
data_name: optional, default: 'data'
Name or index of the column containing the data.
transpose_data: optional, default: True
If True, transpose the downloaded data array.
data_home: optional, default: None
Specify another download and cache folder for the data sets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'DESCR', the full description of the dataset, and
'COL_NAMES', the original names of the dataset columns.
Examples
--------
Load the 'iris' dataset from mldata.org:
>>> from sklearn.datasets.mldata import fetch_mldata
>>> import tempfile
>>> test_data_home = tempfile.mkdtemp()
>>> iris = fetch_mldata('iris', data_home=test_data_home)
>>> iris.target.shape
(150,)
>>> iris.data.shape
(150, 4)
Load the 'leukemia' dataset from mldata.org, which needs to be transposed
to respects the sklearn axes convention:
>>> leuk = fetch_mldata('leukemia', transpose_data=True,
... data_home=test_data_home)
>>> leuk.data.shape
(72, 7129)
Load an alternative 'iris' dataset, which has different names for the
columns:
>>> iris2 = fetch_mldata('datasets-UCI iris', target_name=1,
... data_name=0, data_home=test_data_home)
>>> iris3 = fetch_mldata('datasets-UCI iris',
... target_name='class', data_name='double0',
... data_home=test_data_home)
>>> import shutil
>>> shutil.rmtree(test_data_home)
"""
# normalize dataset name
dataname = mldata_filename(dataname)
# check if this data set has been already downloaded
data_home = get_data_home(data_home=data_home)
data_home = join(data_home, 'mldata')
if not exists(data_home):
os.makedirs(data_home)
matlab_name = dataname + '.mat'
filename = join(data_home, matlab_name)
# if the file does not exist, download it
if not exists(filename):
urlname = MLDATA_BASE_URL % quote(dataname)
try:
mldata_url = urlopen(urlname)
except HTTPError as e:
if e.code == 404:
e.msg = "Dataset '%s' not found on mldata.org." % dataname
raise
# store Matlab file
try:
with open(filename, 'w+b') as matlab_file:
copyfileobj(mldata_url, matlab_file)
except:
os.remove(filename)
raise
mldata_url.close()
# load dataset matlab file
with open(filename, 'rb') as matlab_file:
matlab_dict = io.loadmat(matlab_file, struct_as_record=True)
# -- extract data from matlab_dict
# flatten column names
col_names = [str(descr[0])
for descr in matlab_dict['mldata_descr_ordering'][0]]
# if target or data names are indices, transform then into names
if isinstance(target_name, numbers.Integral):
target_name = col_names[target_name]
if isinstance(data_name, numbers.Integral):
data_name = col_names[data_name]
# rules for making sense of the mldata.org data format
# (earlier ones have priority):
# 1) there is only one array => it is "data"
# 2) there are multiple arrays
# a) copy all columns in the bunch, using their column name
# b) if there is a column called `target_name`, set "target" to it,
# otherwise set "target" to first column
# c) if there is a column called `data_name`, set "data" to it,
# otherwise set "data" to second column
dataset = {'DESCR': 'mldata.org dataset: %s' % dataname,
'COL_NAMES': col_names}
# 1) there is only one array => it is considered data
if len(col_names) == 1:
data_name = col_names[0]
dataset['data'] = matlab_dict[data_name]
# 2) there are multiple arrays
else:
for name in col_names:
dataset[name] = matlab_dict[name]
if target_name in col_names:
del dataset[target_name]
dataset['target'] = matlab_dict[target_name]
else:
del dataset[col_names[0]]
dataset['target'] = matlab_dict[col_names[0]]
if data_name in col_names:
del dataset[data_name]
dataset['data'] = matlab_dict[data_name]
else:
del dataset[col_names[1]]
dataset['data'] = matlab_dict[col_names[1]]
# set axes to sklearn conventions
if transpose_data:
dataset['data'] = dataset['data'].T
if 'target' in dataset:
if not sp.sparse.issparse(dataset['target']):
dataset['target'] = dataset['target'].squeeze()
return Bunch(**dataset)
# The following is used by nosetests to setup the docstring tests fixture
def setup_module(module):
# setup mock urllib2 module to avoid downloading from mldata.org
from sklearn.utils.testing import install_mldata_mock
install_mldata_mock({
'iris': {
'data': np.empty((150, 4)),
'label': np.empty(150),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
'leukemia': {
'data': np.empty((72, 7129)),
},
})
def teardown_module(module):
from sklearn.utils.testing import uninstall_mldata_mock
uninstall_mldata_mock()
|
bsd-3-clause
|
samuelshaner/openmc
|
openmc/clean_xml.py
|
3
|
2913
|
def sort_xml_elements(tree):
# Retrieve all children of the root XML node in the tree
elements = list(tree)
# Initialize empty lists for the sorted and comment elements
sorted_elements = []
# Initialize an empty set of tags (e.g., Surface, Cell, and Lattice)
tags = set()
# Find the unique tags in the tree
for element in elements:
tags.add(element.tag)
# Initialize an empty list for the comment elements
comment_elements = []
# Find the comment elements and record their ordering within the
# tree using a precedence with respect to the subsequent nodes
for index, element in enumerate(elements):
next_element = None
if 'Comment' in str(element.tag):
if index < len(elements)-1:
next_element = elements[index+1]
comment_elements.append((element, next_element))
# Now iterate over all tags and order the elements within each tag
for tag in sorted(list(tags)):
# Retrieve all of the elements for this tag
try:
tagged_elements = tree.findall(tag)
except:
continue
# Initialize an empty list of tuples to sort (id, element)
tagged_data = []
# Retrieve the IDs for each of the elements
for element in tagged_elements:
key = element.get('id')
# If this element has an "ID" tag, append it to the list to sort
if not key is None:
tagged_data.append((key, element))
# Sort the elements according to the IDs for this tag
tagged_data.sort()
sorted_elements.extend(list(item[-1] for item in tagged_data))
# Add the comment elements while preserving the original precedence
for element, next_element in comment_elements:
index = sorted_elements.index(next_element)
sorted_elements.insert(index, element)
# Remove all of the sorted elements from the tree
for element in sorted_elements:
tree.remove(element)
# Add the sorted elements back to the tree in the proper order
tree.extend(sorted_elements)
def clean_xml_indentation(element, level=0):
"""
copy and paste from http://effbot.org/zone/elementent-lib.htm#prettyprint
it basically walks your tree and adds spaces and newlines so the tree is
printed in a nice way
"""
i = "\n" + level*" "
if len(element):
if not element.text or not element.text.strip():
element.text = i + " "
if not element.tail or not element.tail.strip():
element.tail = i
for sub_element in element:
clean_xml_indentation(sub_element, level+1)
if not sub_element.tail or not sub_element.tail.strip():
sub_element.tail = i
else:
if level and (not element.tail or not element.tail.strip()):
element.tail = i
|
mit
|
ROM-Jeremy/android_kernel_x5
|
tools/perf/python/twatch.py
|
7370
|
1334
|
#! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
|
gpl-2.0
|
sbellem/django
|
tests/migrations/test_executor.py
|
202
|
24097
|
from django.apps.registry import apps as global_apps
from django.db import connection
from django.db.migrations.executor import MigrationExecutor
from django.db.migrations.graph import MigrationGraph
from django.db.migrations.recorder import MigrationRecorder
from django.db.utils import DatabaseError
from django.test import TestCase, modify_settings, override_settings
from .test_base import MigrationTestBase
@modify_settings(INSTALLED_APPS={'append': 'migrations2'})
class ExecutorTests(MigrationTestBase):
"""
Tests the migration executor (full end-to-end running).
Bear in mind that if these are failing you should fix the other
test failures first, as they may be propagating into here.
"""
available_apps = ["migrations", "migrations2", "django.contrib.auth", "django.contrib.contenttypes"]
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_run(self):
"""
Tests running a simple set of migrations.
"""
executor = MigrationExecutor(connection)
# Let's look at the plan first and make sure it's up to scratch
plan = executor.migration_plan([("migrations", "0002_second")])
self.assertEqual(
plan,
[
(executor.loader.graph.nodes["migrations", "0001_initial"], False),
(executor.loader.graph.nodes["migrations", "0002_second"], False),
],
)
# Were the tables there before?
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_book")
# Alright, let's try running it
executor.migrate([("migrations", "0002_second")])
# Are the tables there now?
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_book")
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
# Alright, let's undo what we did
plan = executor.migration_plan([("migrations", None)])
self.assertEqual(
plan,
[
(executor.loader.graph.nodes["migrations", "0002_second"], True),
(executor.loader.graph.nodes["migrations", "0001_initial"], True),
],
)
executor.migrate([("migrations", None)])
# Are the tables gone?
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_book")
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"})
def test_run_with_squashed(self):
"""
Tests running a squashed migration from zero (should ignore what it replaces)
"""
executor = MigrationExecutor(connection)
# Check our leaf node is the squashed one
leaves = [key for key in executor.loader.graph.leaf_nodes() if key[0] == "migrations"]
self.assertEqual(leaves, [("migrations", "0001_squashed_0002")])
# Check the plan
plan = executor.migration_plan([("migrations", "0001_squashed_0002")])
self.assertEqual(
plan,
[
(executor.loader.graph.nodes["migrations", "0001_squashed_0002"], False),
],
)
# Were the tables there before?
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_book")
# Alright, let's try running it
executor.migrate([("migrations", "0001_squashed_0002")])
# Are the tables there now?
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_book")
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
# Alright, let's undo what we did. Should also just use squashed.
plan = executor.migration_plan([("migrations", None)])
self.assertEqual(
plan,
[
(executor.loader.graph.nodes["migrations", "0001_squashed_0002"], True),
],
)
executor.migrate([("migrations", None)])
# Are the tables gone?
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_book")
@override_settings(MIGRATION_MODULES={
"migrations": "migrations.test_migrations",
"migrations2": "migrations2.test_migrations_2",
})
def test_empty_plan(self):
"""
Tests that re-planning a full migration of a fully-migrated set doesn't
perform spurious unmigrations and remigrations.
There was previously a bug where the executor just always performed the
backwards plan for applied migrations - which even for the most recent
migration in an app, might include other, dependent apps, and these
were being unmigrated.
"""
# Make the initial plan, check it
executor = MigrationExecutor(connection)
plan = executor.migration_plan([
("migrations", "0002_second"),
("migrations2", "0001_initial"),
])
self.assertEqual(
plan,
[
(executor.loader.graph.nodes["migrations", "0001_initial"], False),
(executor.loader.graph.nodes["migrations", "0002_second"], False),
(executor.loader.graph.nodes["migrations2", "0001_initial"], False),
],
)
# Fake-apply all migrations
executor.migrate([
("migrations", "0002_second"),
("migrations2", "0001_initial")
], fake=True)
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
# Now plan a second time and make sure it's empty
plan = executor.migration_plan([
("migrations", "0002_second"),
("migrations2", "0001_initial"),
])
self.assertEqual(plan, [])
# Erase all the fake records
executor.recorder.record_unapplied("migrations2", "0001_initial")
executor.recorder.record_unapplied("migrations", "0002_second")
executor.recorder.record_unapplied("migrations", "0001_initial")
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_soft_apply(self):
"""
Tests detection of initial migrations already having been applied.
"""
state = {"faked": None}
def fake_storer(phase, migration=None, fake=None):
state["faked"] = fake
executor = MigrationExecutor(connection, progress_callback=fake_storer)
# Were the tables there before?
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
# Run it normally
self.assertEqual(
executor.migration_plan([("migrations", "0001_initial")]),
[
(executor.loader.graph.nodes["migrations", "0001_initial"], False),
],
)
executor.migrate([("migrations", "0001_initial")])
# Are the tables there now?
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_tribble")
# We shouldn't have faked that one
self.assertEqual(state["faked"], False)
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
# Fake-reverse that
executor.migrate([("migrations", None)], fake=True)
# Are the tables still there?
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_tribble")
# Make sure that was faked
self.assertEqual(state["faked"], True)
# Finally, migrate forwards; this should fake-apply our initial migration
executor.loader.build_graph()
self.assertEqual(
executor.migration_plan([("migrations", "0001_initial")]),
[
(executor.loader.graph.nodes["migrations", "0001_initial"], False),
],
)
# Applying the migration should raise a database level error
# because we haven't given the --fake-initial option
with self.assertRaises(DatabaseError):
executor.migrate([("migrations", "0001_initial")])
# Reset the faked state
state = {"faked": None}
# Allow faking of initial CreateModel operations
executor.migrate([("migrations", "0001_initial")], fake_initial=True)
self.assertEqual(state["faked"], True)
# And migrate back to clean up the database
executor.loader.build_graph()
executor.migrate([("migrations", None)])
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
@override_settings(
MIGRATION_MODULES={
"migrations": "migrations.test_migrations_custom_user",
"django.contrib.auth": "django.contrib.auth.migrations",
},
AUTH_USER_MODEL="migrations.Author",
)
def test_custom_user(self):
"""
Regression test for #22325 - references to a custom user model defined in the
same app are not resolved correctly.
"""
executor = MigrationExecutor(connection)
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
# Migrate forwards
executor.migrate([("migrations", "0001_initial")])
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_tribble")
# Make sure the soft-application detection works (#23093)
# Change table_names to not return auth_user during this as
# it wouldn't be there in a normal run, and ensure migrations.Author
# exists in the global app registry temporarily.
old_table_names = connection.introspection.table_names
connection.introspection.table_names = lambda c: [x for x in old_table_names(c) if x != "auth_user"]
migrations_apps = executor.loader.project_state(("migrations", "0001_initial")).apps
global_apps.get_app_config("migrations").models["author"] = migrations_apps.get_model("migrations", "author")
try:
migration = executor.loader.get_migration("auth", "0001_initial")
self.assertEqual(executor.detect_soft_applied(None, migration)[0], True)
finally:
connection.introspection.table_names = old_table_names
del global_apps.get_app_config("migrations").models["author"]
# And migrate back to clean up the database
executor.loader.build_graph()
executor.migrate([("migrations", None)])
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.lookuperror_a",
"migrations.migrations_test_apps.lookuperror_b",
"migrations.migrations_test_apps.lookuperror_c"
]
)
def test_unrelated_model_lookups_forwards(self):
"""
#24123 - Tests that all models of apps already applied which are
unrelated to the first app being applied are part of the initial model
state.
"""
try:
executor = MigrationExecutor(connection)
self.assertTableNotExists("lookuperror_a_a1")
self.assertTableNotExists("lookuperror_b_b1")
self.assertTableNotExists("lookuperror_c_c1")
executor.migrate([("lookuperror_b", "0003_b3")])
self.assertTableExists("lookuperror_b_b3")
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
# Migrate forwards -- This led to a lookup LookupErrors because
# lookuperror_b.B2 is already applied
executor.migrate([
("lookuperror_a", "0004_a4"),
("lookuperror_c", "0003_c3"),
])
self.assertTableExists("lookuperror_a_a4")
self.assertTableExists("lookuperror_c_c3")
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
finally:
# Cleanup
executor.migrate([
("lookuperror_a", None),
("lookuperror_b", None),
("lookuperror_c", None),
])
self.assertTableNotExists("lookuperror_a_a1")
self.assertTableNotExists("lookuperror_b_b1")
self.assertTableNotExists("lookuperror_c_c1")
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.lookuperror_a",
"migrations.migrations_test_apps.lookuperror_b",
"migrations.migrations_test_apps.lookuperror_c"
]
)
def test_unrelated_model_lookups_backwards(self):
"""
#24123 - Tests that all models of apps being unapplied which are
unrelated to the first app being unapplied are part of the initial
model state.
"""
try:
executor = MigrationExecutor(connection)
self.assertTableNotExists("lookuperror_a_a1")
self.assertTableNotExists("lookuperror_b_b1")
self.assertTableNotExists("lookuperror_c_c1")
executor.migrate([
("lookuperror_a", "0004_a4"),
("lookuperror_b", "0003_b3"),
("lookuperror_c", "0003_c3"),
])
self.assertTableExists("lookuperror_b_b3")
self.assertTableExists("lookuperror_a_a4")
self.assertTableExists("lookuperror_c_c3")
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
# Migrate backwards -- This led to a lookup LookupErrors because
# lookuperror_b.B2 is not in the initial state (unrelated to app c)
executor.migrate([("lookuperror_a", None)])
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
finally:
# Cleanup
executor.migrate([
("lookuperror_b", None),
("lookuperror_c", None)
])
self.assertTableNotExists("lookuperror_a_a1")
self.assertTableNotExists("lookuperror_b_b1")
self.assertTableNotExists("lookuperror_c_c1")
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_process_callback(self):
"""
#24129 - Tests callback process
"""
call_args_list = []
def callback(*args):
call_args_list.append(args)
executor = MigrationExecutor(connection, progress_callback=callback)
# Were the tables there before?
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
executor.migrate([
("migrations", "0001_initial"),
("migrations", "0002_second"),
])
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
executor.migrate([
("migrations", None),
("migrations", None),
])
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
migrations = executor.loader.graph.nodes
expected = [
("render_start", ),
("render_success", ),
("apply_start", migrations['migrations', '0001_initial'], False),
("apply_success", migrations['migrations', '0001_initial'], False),
("apply_start", migrations['migrations', '0002_second'], False),
("apply_success", migrations['migrations', '0002_second'], False),
("render_start", ),
("render_success", ),
("unapply_start", migrations['migrations', '0002_second'], False),
("unapply_success", migrations['migrations', '0002_second'], False),
("unapply_start", migrations['migrations', '0001_initial'], False),
("unapply_success", migrations['migrations', '0001_initial'], False),
]
self.assertEqual(call_args_list, expected)
@override_settings(
INSTALLED_APPS=[
"migrations.migrations_test_apps.alter_fk.author_app",
"migrations.migrations_test_apps.alter_fk.book_app",
]
)
def test_alter_id_type_with_fk(self):
try:
executor = MigrationExecutor(connection)
self.assertTableNotExists("author_app_author")
self.assertTableNotExists("book_app_book")
# Apply initial migrations
executor.migrate([
("author_app", "0001_initial"),
("book_app", "0001_initial"),
])
self.assertTableExists("author_app_author")
self.assertTableExists("book_app_book")
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
# Apply PK type alteration
executor.migrate([("author_app", "0002_alter_id")])
# Rebuild the graph to reflect the new DB state
executor.loader.build_graph()
finally:
# We can't simply unapply the migrations here because there is no
# implicit cast from VARCHAR to INT on the database level.
with connection.schema_editor() as editor:
editor.execute(editor.sql_delete_table % {"table": "book_app_book"})
editor.execute(editor.sql_delete_table % {"table": "author_app_author"})
self.assertTableNotExists("author_app_author")
self.assertTableNotExists("book_app_book")
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"})
def test_apply_all_replaced_marks_replacement_as_applied(self):
"""
Applying all replaced migrations marks replacement as applied (#24628).
"""
recorder = MigrationRecorder(connection)
# Place the database in a state where the replaced migrations are
# partially applied: 0001 is applied, 0002 is not.
recorder.record_applied("migrations", "0001_initial")
executor = MigrationExecutor(connection)
# Use fake because we don't actually have the first migration
# applied, so the second will fail. And there's no need to actually
# create/modify tables here, we're just testing the
# MigrationRecord, which works the same with or without fake.
executor.migrate([("migrations", "0002_second")], fake=True)
# Because we've now applied 0001 and 0002 both, their squashed
# replacement should be marked as applied.
self.assertIn(
("migrations", "0001_squashed_0002"),
recorder.applied_migrations(),
)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"})
def test_migrate_marks_replacement_applied_even_if_it_did_nothing(self):
"""
A new squash migration will be marked as applied even if all its
replaced migrations were previously already applied (#24628).
"""
recorder = MigrationRecorder(connection)
# Record all replaced migrations as applied
recorder.record_applied("migrations", "0001_initial")
recorder.record_applied("migrations", "0002_second")
executor = MigrationExecutor(connection)
executor.migrate([("migrations", "0001_squashed_0002")])
# Because 0001 and 0002 are both applied, even though this migrate run
# didn't apply anything new, their squashed replacement should be
# marked as applied.
self.assertIn(
("migrations", "0001_squashed_0002"),
recorder.applied_migrations(),
)
class FakeLoader(object):
def __init__(self, graph, applied):
self.graph = graph
self.applied_migrations = applied
class FakeMigration(object):
"""Really all we need is any object with a debug-useful repr."""
def __init__(self, name):
self.name = name
def __repr__(self):
return 'M<%s>' % self.name
class ExecutorUnitTests(TestCase):
"""(More) isolated unit tests for executor methods."""
def test_minimize_rollbacks(self):
"""
Minimize unnecessary rollbacks in connected apps.
When you say "./manage.py migrate appA 0001", rather than migrating to
just after appA-0001 in the linearized migration plan (which could roll
back migrations in other apps that depend on appA 0001, but don't need
to be rolled back since we're not rolling back appA 0001), we migrate
to just before appA-0002.
"""
a1_impl = FakeMigration('a1')
a1 = ('a', '1')
a2_impl = FakeMigration('a2')
a2 = ('a', '2')
b1_impl = FakeMigration('b1')
b1 = ('b', '1')
graph = MigrationGraph()
graph.add_node(a1, a1_impl)
graph.add_node(a2, a2_impl)
graph.add_node(b1, b1_impl)
graph.add_dependency(None, b1, a1)
graph.add_dependency(None, a2, a1)
executor = MigrationExecutor(None)
executor.loader = FakeLoader(graph, {a1, b1, a2})
plan = executor.migration_plan({a1})
self.assertEqual(plan, [(a2_impl, True)])
def test_minimize_rollbacks_branchy(self):
"""
Minimize rollbacks when target has multiple in-app children.
a: 1 <---- 3 <--\
\ \- 2 <--- 4
\ \
b: \- 1 <--- 2
"""
a1_impl = FakeMigration('a1')
a1 = ('a', '1')
a2_impl = FakeMigration('a2')
a2 = ('a', '2')
a3_impl = FakeMigration('a3')
a3 = ('a', '3')
a4_impl = FakeMigration('a4')
a4 = ('a', '4')
b1_impl = FakeMigration('b1')
b1 = ('b', '1')
b2_impl = FakeMigration('b2')
b2 = ('b', '2')
graph = MigrationGraph()
graph.add_node(a1, a1_impl)
graph.add_node(a2, a2_impl)
graph.add_node(a3, a3_impl)
graph.add_node(a4, a4_impl)
graph.add_node(b1, b1_impl)
graph.add_node(b2, b2_impl)
graph.add_dependency(None, a2, a1)
graph.add_dependency(None, a3, a1)
graph.add_dependency(None, a4, a2)
graph.add_dependency(None, a4, a3)
graph.add_dependency(None, b2, b1)
graph.add_dependency(None, b1, a1)
graph.add_dependency(None, b2, a2)
executor = MigrationExecutor(None)
executor.loader = FakeLoader(graph, {a1, b1, a2, b2, a3, a4})
plan = executor.migration_plan({a1})
should_be_rolled_back = [b2_impl, a4_impl, a2_impl, a3_impl]
exp = [(m, True) for m in should_be_rolled_back]
self.assertEqual(plan, exp)
def test_backwards_nothing_to_do(self):
"""
If the current state satisfies the given target, do nothing.
a: 1 <--- 2
b: \- 1
c: \- 1
If a1 is applied already and a2 is not, and we're asked to migrate to
a1, don't apply or unapply b1 or c1, regardless of their current state.
"""
a1_impl = FakeMigration('a1')
a1 = ('a', '1')
a2_impl = FakeMigration('a2')
a2 = ('a', '2')
b1_impl = FakeMigration('b1')
b1 = ('b', '1')
c1_impl = FakeMigration('c1')
c1 = ('c', '1')
graph = MigrationGraph()
graph.add_node(a1, a1_impl)
graph.add_node(a2, a2_impl)
graph.add_node(b1, b1_impl)
graph.add_node(c1, c1_impl)
graph.add_dependency(None, a2, a1)
graph.add_dependency(None, b1, a1)
graph.add_dependency(None, c1, a1)
executor = MigrationExecutor(None)
executor.loader = FakeLoader(graph, {a1, b1})
plan = executor.migration_plan({a1})
self.assertEqual(plan, [])
|
bsd-3-clause
|
eunchong/build
|
scripts/common/twisted_util/netrc_authorizer.py
|
2
|
1382
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""NETRCAuthorizer class"""
import base64
import netrc
import urlparse
from common.twisted_util.authorizer import IAuthorizer
from zope.interface import implements
class EmptyNetrc(object):
def authenticators(self, _):
return None
def __repr__(self):
return ''
@property
def hosts(self):
return {}
@property
def macros(self):
return {}
class NETRCAuthorizer(object):
"""An Authorizer implementation that loads its authorization from a '.netrc'
file.
"""
implements(IAuthorizer)
def __init__(self, netrc_path=None):
"""Initializes a new NetRC Authorizer
Args:
netrc_path: (str) If not None, use this as the 'netrc' file path;
otherwise, use '~/.netrc'.
"""
try:
self._netrc = netrc.netrc(netrc_path)
except IOError:
self._netrc = EmptyNetrc()
def addAuthHeadersForURL(self, headers, url):
parsed_url = urlparse.urlparse(url)
auth_entry = self._netrc.authenticators(parsed_url.hostname)
if auth_entry is not None:
auth_token = 'Basic %s' % \
base64.b64encode('%s:%s' % (auth_entry[0], auth_entry[2]))
headers.setRawHeaders('Authorization', [auth_token])
return True
return False
|
bsd-3-clause
|
loic/django
|
django/core/checks/security/csrf.py
|
62
|
2003
|
from django.conf import settings
from .. import Tags, Warning, register
from ..utils import patch_middleware_message
W003 = Warning(
"You don't appear to be using Django's built-in "
"cross-site request forgery protection via the middleware "
"('django.middleware.csrf.CsrfViewMiddleware' is not in your "
"MIDDLEWARE). Enabling the middleware is the safest approach "
"to ensure you don't leave any holes.",
id='security.W003',
)
W016 = Warning(
"You have 'django.middleware.csrf.CsrfViewMiddleware' in your "
"MIDDLEWARE, but you have not set CSRF_COOKIE_SECURE to True. "
"Using a secure-only CSRF cookie makes it more difficult for network "
"traffic sniffers to steal the CSRF token.",
id='security.W016',
)
W017 = Warning(
"You have 'django.middleware.csrf.CsrfViewMiddleware' in your "
"MIDDLEWARE, but you have not set CSRF_COOKIE_HTTPONLY to True. "
"Using an HttpOnly CSRF cookie makes it more difficult for cross-site "
"scripting attacks to steal the CSRF token.",
id='security.W017',
)
def _csrf_middleware():
return ("django.middleware.csrf.CsrfViewMiddleware" in settings.MIDDLEWARE_CLASSES or
settings.MIDDLEWARE and "django.middleware.csrf.CsrfViewMiddleware" in settings.MIDDLEWARE)
@register(Tags.security, deploy=True)
def check_csrf_middleware(app_configs, **kwargs):
passed_check = _csrf_middleware()
return [] if passed_check else [patch_middleware_message(W003)]
@register(Tags.security, deploy=True)
def check_csrf_cookie_secure(app_configs, **kwargs):
passed_check = (
not _csrf_middleware() or
settings.CSRF_COOKIE_SECURE
)
return [] if passed_check else [patch_middleware_message(W016)]
@register(Tags.security, deploy=True)
def check_csrf_cookie_httponly(app_configs, **kwargs):
passed_check = (
not _csrf_middleware() or
settings.CSRF_COOKIE_HTTPONLY
)
return [] if passed_check else [patch_middleware_message(W017)]
|
bsd-3-clause
|
webmasterraj/FogOrNot
|
flask/lib/python2.7/site-packages/boto/sdb/__init__.py
|
14
|
1972
|
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from .regioninfo import SDBRegionInfo
from boto.regioninfo import get_regions
def regions():
"""
Get all available regions for the SDB service.
:rtype: list
:return: A list of :class:`boto.sdb.regioninfo.RegionInfo` instances
"""
return get_regions(
'sdb',
region_cls=SDBRegionInfo
)
def connect_to_region(region_name, **kw_params):
"""
Given a valid region name, return a
:class:`boto.sdb.connection.SDBConnection`.
:type: str
:param region_name: The name of the region to connect to.
:rtype: :class:`boto.sdb.connection.SDBConnection` or ``None``
:return: A connection to the given region, or None if an invalid region
name is given
"""
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
|
gpl-2.0
|
waaaaargh/di_playlist_grabber
|
di.py
|
1
|
2209
|
#!/usr/bin/env python3
from argparse import ArgumentParser
import json
import os.path
import sys
from urllib.request import urlopen
if __name__ == '__main__':
parser = ArgumentParser(description='Grab di.fm playlists')
parser.add_argument('--key', help='Listening key')
parser.add_argument('--destination', help='Target directory for playlist files')
parser.add_argument('quality', choices=["40kaac", "96kmp3", "64kaac", "128kaac", "256kmp3"], help='Stream bitrate')
args = parser.parse_args()
if args.quality in ['128kaac', '256kmp3'] and args.key is None:
print("[!] Sorry, you get high quality playlists if you supply a listening key :(")
sys.exit(1)
# get all channels and IDs
channels = json.loads(urlopen('http://listen.di.fm/public5').read().decode("utf-8"))
print("[i] found %d channels." % len(channels))
quality_to_suffix = {
'40kaac': '_aacplus',
'64kaac': '_aac',
'96kmp3': '',
'128kaac': '',
'256kmp3': '_hi',
}
# premium-servers
if args.key is not None:
di_servers = [
'prem1.di.fm:80',
'prem2.di.fm:80',
'prem4.di.fm:80',
]
# free (as in free beer) servers
else:
di_servers = [
'pub1.di.fm:80',
'pub2.di.fm:80',
'pub3.di.fm:80',
'pub4.di.fm:80',
'pub5.di.fm:80',
'pub6.di.fm:80',
'pub7.di.fm:80',
'pub8.di.fm:80',
]
try:
for channel in channels:
playlist_name = os.path.join(args.destination or "", "di_" + channel['key'] + ".m3u")
with open(playlist_name, 'w') as f:
for server in di_servers:
stream_path = channel['key'] + quality_to_suffix[args.quality]
if args.key is None:
stream_path = 'di_' + stream_path
else:
stream_path += "?%s" % args.key
f.write("http://%s/%s\n" % (server, stream_path))
except IOError:
print("[!] Sorry, couldn't open destination file. Are you sure the base directory exists?")
|
gpl-3.0
|
vicky2135/lucious
|
oscar/lib/python2.7/site-packages/sqlparse/filters/tokens.py
|
20
|
1612
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Andi Albrecht, albrecht.andi@gmail.com
#
# This module is part of python-sqlparse and is released under
# the BSD License: https://opensource.org/licenses/BSD-3-Clause
from sqlparse import tokens as T
from sqlparse.compat import text_type
class _CaseFilter(object):
ttype = None
def __init__(self, case=None):
case = case or 'upper'
self.convert = getattr(text_type, case)
def process(self, stream):
for ttype, value in stream:
if ttype in self.ttype:
value = self.convert(value)
yield ttype, value
class KeywordCaseFilter(_CaseFilter):
ttype = T.Keyword
class IdentifierCaseFilter(_CaseFilter):
ttype = T.Name, T.String.Symbol
def process(self, stream):
for ttype, value in stream:
if ttype in self.ttype and value.strip()[0] != '"':
value = self.convert(value)
yield ttype, value
class TruncateStringFilter(object):
def __init__(self, width, char):
self.width = width
self.char = char
def process(self, stream):
for ttype, value in stream:
if ttype != T.Literal.String.Single:
yield ttype, value
continue
if value[:2] == "''":
inner = value[2:-2]
quote = "''"
else:
inner = value[1:-1]
quote = "'"
if len(inner) > self.width:
value = ''.join((quote, inner[:self.width], self.char, quote))
yield ttype, value
|
bsd-3-clause
|
rwl/PyCIM
|
CIM15/IEC61970/Informative/InfGMLSupport/GmlTopologyStyle.py
|
1
|
3427
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61970.Core.IdentifiedObject import IdentifiedObject
class GmlTopologyStyle(IdentifiedObject):
"""The style for one topology property. Similarly to the Geometry style, a feature can have multiple topology properties, thus multiple topology style descriptors can be specified within one feature style.The style for one topology property. Similarly to the Geometry style, a feature can have multiple topology properties, thus multiple topology style descriptors can be specified within one feature style.
"""
def __init__(self, GmlLableStyle=None, GmlFeatureStyle=None, *args, **kw_args):
"""Initialises a new 'GmlTopologyStyle' instance.
@param GmlLableStyle:
@param GmlFeatureStyle:
"""
self._GmlLableStyle = None
self.GmlLableStyle = GmlLableStyle
self._GmlFeatureStyle = None
self.GmlFeatureStyle = GmlFeatureStyle
super(GmlTopologyStyle, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = ["GmlLableStyle", "GmlFeatureStyle"]
_many_refs = []
def getGmlLableStyle(self):
return self._GmlLableStyle
def setGmlLableStyle(self, value):
if self._GmlLableStyle is not None:
filtered = [x for x in self.GmlLableStyle.GmlTopologyStyles if x != self]
self._GmlLableStyle._GmlTopologyStyles = filtered
self._GmlLableStyle = value
if self._GmlLableStyle is not None:
if self not in self._GmlLableStyle._GmlTopologyStyles:
self._GmlLableStyle._GmlTopologyStyles.append(self)
GmlLableStyle = property(getGmlLableStyle, setGmlLableStyle)
def getGmlFeatureStyle(self):
return self._GmlFeatureStyle
def setGmlFeatureStyle(self, value):
if self._GmlFeatureStyle is not None:
filtered = [x for x in self.GmlFeatureStyle.GmlTobologyStyles if x != self]
self._GmlFeatureStyle._GmlTobologyStyles = filtered
self._GmlFeatureStyle = value
if self._GmlFeatureStyle is not None:
if self not in self._GmlFeatureStyle._GmlTobologyStyles:
self._GmlFeatureStyle._GmlTobologyStyles.append(self)
GmlFeatureStyle = property(getGmlFeatureStyle, setGmlFeatureStyle)
|
mit
|
bjmc/kodos
|
modules/regexLibrary.py
|
2
|
1540
|
from regexLibraryBA import RegexLibraryBA
from parseRegexLib import ParseRegexLib
from qt import *
from util import restoreWindowSettings, saveWindowSettings, kodos_toolbar_logo
import os
GEO = "regex-lib_geometry"
class RegexLibrary(RegexLibraryBA):
def __init__(self, parent, filename):
RegexLibraryBA.__init__(self, None)
self.parent = parent
self.filename = filename
self.selected = None
self.parseXML()
self.populateListBox()
kodos_toolbar_logo(self.toolBar)
restoreWindowSettings(self, GEO)
def closeEvent(self, ev):
saveWindowSettings(self, GEO)
ev.accept()
def parseXML(self):
parser = ParseRegexLib(self.filename)
self.xml_dicts = parser.parse()
def populateListBox(self):
for d in self.xml_dicts:
self.descriptionListBox.insertItem(d.get('desc', "<unknown>"))
def descSelectedSlot(self, qlistboxitem):
if qlistboxitem == None: return
itemnum = self.descriptionListBox.currentItem()
self.populateSelected(self.xml_dicts[itemnum])
def populateSelected(self, xml_dict):
self.regexTextBrowser.setText(xml_dict.get('regex', ""))
self.contribEdit.setText(xml_dict.get("contrib", ""))
self.noteTextBrowser.setText(xml_dict.get('note', ""))
self.selected = xml_dict
def editPaste(self):
if self.selected:
self.parent.emit(PYSIGNAL('pasteRegexLib()'), (self.selected,) )
|
gpl-2.0
|
swalladge/ranger
|
ranger/gui/widgets/taskview.py
|
2
|
2868
|
# This file is part of ranger, the console file manager.
# License: GNU GPL version 3, see the file "AUTHORS" for details.
"""The TaskView allows you to modify what the loader is doing."""
from __future__ import (absolute_import, division, print_function)
from ranger.ext.accumulator import Accumulator
from . import Widget
class TaskView(Widget, Accumulator):
old_lst = None
def __init__(self, win):
Widget.__init__(self, win)
Accumulator.__init__(self)
self.scroll_begin = 0
def draw(self):
base_clr = []
base_clr.append('in_taskview')
lst = self.get_list()
if self.old_lst != lst:
self.old_lst = lst
self.need_redraw = True
if self.need_redraw:
self.win.erase()
if not self.pointer_is_synced():
self.sync_index()
if self.hei <= 0:
return
self.addstr(0, 0, "Task View")
self.color_at(0, 0, self.wid, tuple(base_clr), 'title')
if lst:
for i in range(self.hei - 1):
i += self.scroll_begin
try:
obj = lst[i]
except IndexError:
break
y = i + 1
clr = list(base_clr)
if self.pointer == i:
clr.append('selected')
descr = obj.get_description()
if obj.progressbar_supported and obj.percent >= 0 and obj.percent <= 100:
self.addstr(y, 0, "%3.2f%% - %s" % (obj.percent, descr), self.wid)
wid = int((self.wid / 100) * obj.percent)
self.color_at(y, 0, self.wid, tuple(clr))
self.color_at(y, 0, wid, tuple(clr), 'loaded')
else:
self.addstr(y, 0, descr, self.wid)
self.color_at(y, 0, self.wid, tuple(clr))
else:
if self.hei > 1:
self.addstr(1, 0, "No task in the queue.")
self.color_at(1, 0, self.wid, tuple(base_clr), 'error')
self.color_reset()
def finalize(self):
y = self.y + 1 + self.pointer - self.scroll_begin
self.fm.ui.win.move(y, self.x)
def task_remove(self, i=None):
if i is None:
i = self.pointer
if self.fm.loader.queue:
self.fm.loader.remove(index=i)
def task_move(self, to, i=None): # pylint: disable=invalid-name
if i is None:
i = self.pointer
self.fm.loader.move(_from=i, to=to)
def press(self, key):
self.fm.ui.keymaps.use_keymap('taskview')
self.fm.ui.press(key)
def get_list(self):
return self.fm.loader.queue
|
gpl-3.0
|
bvernoux/micropython
|
tests/bytecode/pylib-tests/runpy.py
|
28
|
10413
|
"""runpy.py - locating and running Python code using the module namespace
Provides support for locating and running Python scripts using the Python
module namespace instead of the native filesystem.
This allows Python code to play nicely with non-filesystem based PEP 302
importers when locating support scripts as well as when importing modules.
"""
# Written by Nick Coghlan <ncoghlan at gmail.com>
# to implement PEP 338 (Executing Modules as Scripts)
import os
import sys
import importlib.machinery # importlib first so we can test #15386 via -m
import imp
from pkgutil import read_code, get_loader, get_importer
__all__ = [
"run_module", "run_path",
]
class _TempModule(object):
"""Temporarily replace a module in sys.modules with an empty namespace"""
def __init__(self, mod_name):
self.mod_name = mod_name
self.module = imp.new_module(mod_name)
self._saved_module = []
def __enter__(self):
mod_name = self.mod_name
try:
self._saved_module.append(sys.modules[mod_name])
except KeyError:
pass
sys.modules[mod_name] = self.module
return self
def __exit__(self, *args):
if self._saved_module:
sys.modules[self.mod_name] = self._saved_module[0]
else:
del sys.modules[self.mod_name]
self._saved_module = []
class _ModifiedArgv0(object):
def __init__(self, value):
self.value = value
self._saved_value = self._sentinel = object()
def __enter__(self):
if self._saved_value is not self._sentinel:
raise RuntimeError("Already preserving saved value")
self._saved_value = sys.argv[0]
sys.argv[0] = self.value
def __exit__(self, *args):
self.value = self._sentinel
sys.argv[0] = self._saved_value
def _run_code(code, run_globals, init_globals=None,
mod_name=None, mod_fname=None,
mod_loader=None, pkg_name=None):
"""Helper to run code in nominated namespace"""
if init_globals is not None:
run_globals.update(init_globals)
run_globals.update(__name__ = mod_name,
__file__ = mod_fname,
__cached__ = None,
__doc__ = None,
__loader__ = mod_loader,
__package__ = pkg_name)
exec(code, run_globals)
return run_globals
def _run_module_code(code, init_globals=None,
mod_name=None, mod_fname=None,
mod_loader=None, pkg_name=None):
"""Helper to run code in new namespace with sys modified"""
with _TempModule(mod_name) as temp_module, _ModifiedArgv0(mod_fname):
mod_globals = temp_module.module.__dict__
_run_code(code, mod_globals, init_globals,
mod_name, mod_fname, mod_loader, pkg_name)
# Copy the globals of the temporary module, as they
# may be cleared when the temporary module goes away
return mod_globals.copy()
# This helper is needed due to a missing component in the PEP 302
# loader protocol (specifically, "get_filename" is non-standard)
# Since we can't introduce new features in maintenance releases,
# support was added to zipimporter under the name '_get_filename'
def _get_filename(loader, mod_name):
for attr in ("get_filename", "_get_filename"):
meth = getattr(loader, attr, None)
if meth is not None:
return os.path.abspath(meth(mod_name))
return None
# Helper to get the loader, code and filename for a module
def _get_module_details(mod_name):
loader = get_loader(mod_name)
if loader is None:
raise ImportError("No module named %s" % mod_name)
if loader.is_package(mod_name):
if mod_name == "__main__" or mod_name.endswith(".__main__"):
raise ImportError("Cannot use package as __main__ module")
try:
pkg_main_name = mod_name + ".__main__"
return _get_module_details(pkg_main_name)
except ImportError as e:
raise ImportError(("%s; %r is a package and cannot " +
"be directly executed") %(e, mod_name))
code = loader.get_code(mod_name)
if code is None:
raise ImportError("No code object available for %s" % mod_name)
filename = _get_filename(loader, mod_name)
return mod_name, loader, code, filename
# XXX ncoghlan: Should this be documented and made public?
# (Current thoughts: don't repeat the mistake that lead to its
# creation when run_module() no longer met the needs of
# mainmodule.c, but couldn't be changed because it was public)
def _run_module_as_main(mod_name, alter_argv=True):
"""Runs the designated module in the __main__ namespace
Note that the executed module will have full access to the
__main__ namespace. If this is not desirable, the run_module()
function should be used to run the module code in a fresh namespace.
At the very least, these variables in __main__ will be overwritten:
__name__
__file__
__cached__
__loader__
__package__
"""
try:
if alter_argv or mod_name != "__main__": # i.e. -m switch
mod_name, loader, code, fname = _get_module_details(mod_name)
else: # i.e. directory or zipfile execution
mod_name, loader, code, fname = _get_main_module_details()
except ImportError as exc:
# Try to provide a good error message
# for directories, zip files and the -m switch
if alter_argv:
# For -m switch, just display the exception
info = str(exc)
else:
# For directories/zipfiles, let the user
# know what the code was looking for
info = "can't find '__main__' module in %r" % sys.argv[0]
msg = "%s: %s" % (sys.executable, info)
sys.exit(msg)
pkg_name = mod_name.rpartition('.')[0]
main_globals = sys.modules["__main__"].__dict__
if alter_argv:
sys.argv[0] = fname
return _run_code(code, main_globals, None,
"__main__", fname, loader, pkg_name)
def run_module(mod_name, init_globals=None,
run_name=None, alter_sys=False):
"""Execute a module's code without importing it
Returns the resulting top level namespace dictionary
"""
mod_name, loader, code, fname = _get_module_details(mod_name)
if run_name is None:
run_name = mod_name
pkg_name = mod_name.rpartition('.')[0]
if alter_sys:
return _run_module_code(code, init_globals, run_name,
fname, loader, pkg_name)
else:
# Leave the sys module alone
return _run_code(code, {}, init_globals, run_name,
fname, loader, pkg_name)
def _get_main_module_details():
# Helper that gives a nicer error message when attempting to
# execute a zipfile or directory by invoking __main__.py
# Also moves the standard __main__ out of the way so that the
# preexisting __loader__ entry doesn't cause issues
main_name = "__main__"
saved_main = sys.modules[main_name]
del sys.modules[main_name]
try:
return _get_module_details(main_name)
except ImportError as exc:
if main_name in str(exc):
raise ImportError("can't find %r module in %r" %
(main_name, sys.path[0])) from exc
raise
finally:
sys.modules[main_name] = saved_main
def _get_code_from_file(run_name, fname):
# Check for a compiled file first
with open(fname, "rb") as f:
code = read_code(f)
if code is None:
# That didn't work, so try it as normal source code
with open(fname, "rb") as f:
code = compile(f.read(), fname, 'exec')
loader = importlib.machinery.SourceFileLoader(run_name, fname)
else:
loader = importlib.machinery.SourcelessFileLoader(run_name, fname)
return code, loader
def run_path(path_name, init_globals=None, run_name=None):
"""Execute code located at the specified filesystem location
Returns the resulting top level namespace dictionary
The file path may refer directly to a Python script (i.e.
one that could be directly executed with execfile) or else
it may refer to a zipfile or directory containing a top
level __main__.py script.
"""
if run_name is None:
run_name = "<run_path>"
pkg_name = run_name.rpartition(".")[0]
importer = get_importer(path_name)
if isinstance(importer, (type(None), imp.NullImporter)):
# Not a valid sys.path entry, so run the code directly
# execfile() doesn't help as we want to allow compiled files
code, mod_loader = _get_code_from_file(run_name, path_name)
return _run_module_code(code, init_globals, run_name, path_name,
mod_loader, pkg_name)
else:
# Importer is defined for path, so add it to
# the start of sys.path
sys.path.insert(0, path_name)
try:
# Here's where things are a little different from the run_module
# case. There, we only had to replace the module in sys while the
# code was running and doing so was somewhat optional. Here, we
# have no choice and we have to remove it even while we read the
# code. If we don't do this, a __loader__ attribute in the
# existing __main__ module may prevent location of the new module.
mod_name, loader, code, fname = _get_main_module_details()
with _TempModule(run_name) as temp_module, \
_ModifiedArgv0(path_name):
mod_globals = temp_module.module.__dict__
return _run_code(code, mod_globals, init_globals,
run_name, fname, loader, pkg_name).copy()
finally:
try:
sys.path.remove(path_name)
except ValueError:
pass
if __name__ == "__main__":
# Run the module specified as the next command line argument
if len(sys.argv) < 2:
print("No module specified for execution", file=sys.stderr)
else:
del sys.argv[0] # Make the requested module sys.argv[0]
_run_module_as_main(sys.argv[0])
|
mit
|
Dancovich/libgdx_blender_g3d_exporter
|
io_scene_g3d/domain_classes.py
|
1
|
25899
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import math
from io_scene_g3d import util
from io_scene_g3d.util import Util, ROUND_STRING
from io_scene_g3d.profile import profile
class Vertex(object):
"""Define a mesh vertex"""
_attributes = []
_hashCache = None
_listOfZeros = Util.floatListToString([0.0, 0.0])
def __init__(self):
self.attributes = []
def add(self, attribute):
if attribute is None or not isinstance(attribute, VertexAttribute):
raise TypeError("'attribute' must be a VertexAttribute")
alreadyAdded = attribute in self._attributes
"""
for attr in self._attributes:
if attr == attribute:
alreadyAdded = True
break
"""
if not alreadyAdded:
self._attributes.append(attribute)
self._hashCache = None
return not alreadyAdded
@property
def attributes(self):
return self._attributes
@attributes.setter
def attributes(self, newAttributes):
self._attributes = newAttributes
self._hashCache = None
def normalizeBlendWeight(self):
if self.attributes is not None:
blendWeightSum = 0.0
for attr in self.attributes:
if attr.name.startswith(VertexAttribute.BLENDWEIGHT, 0, len(VertexAttribute.BLENDWEIGHT)):
blendWeightSum = blendWeightSum + attr.value[1]
for attr in self.attributes:
if attr.name.startswith(VertexAttribute.BLENDWEIGHT, 0, len(VertexAttribute.BLENDWEIGHT)):
attr.value[1] = attr.value[1] / blendWeightSum
self._hashCache = None
def sortAttributes(self):
if self._attributes is not None:
self._attributes.sort(key=util.attributeSort)
@profile('hashVertex')
def __hash__(self):
if self._hashCache is None or self._hashCache == 0:
self._hashCache = 0
if self._attributes is not None:
for attr in self._attributes:
# If the attribute is of type TEXCOORD or BLENDWEIGHT and it's
# value is zero we ignore it.
if not ((attr.name.startswith(VertexAttribute.BLENDWEIGHT, 0, len(VertexAttribute.BLENDWEIGHT)) \
or attr.name.startswith(VertexAttribute.TEXCOORD, 0, len(VertexAttribute.TEXCOORD))) \
and Util.floatListToString(attr.value) == self._listOfZeros):
self._hashCache = 31 * self._hashCache + hash(attr)
return self._hashCache
@profile('eqVertex')
def __eq__(self, another):
if another is None or not isinstance(another, Vertex):
raise TypeError("'another' must be a Vertex")
return hash(self) == hash(another)
"""
if another is None or not isinstance(another, Vertex):
raise TypeError("'another' must be a Vertex")
numMyAttributes = len(self.attributes)
numOtherAttributes = len(another.attributes)
sameAmountOfAttributes = (numMyAttributes == numOtherAttributes)
numEqualAttributeValues = 0
if sameAmountOfAttributes:
for position in range(0, numMyAttributes):
myAttr = self._attributes[position]
otherAttr = another._attributes[position]
if myAttr == otherAttr:
numEqualAttributeValues = numEqualAttributeValues + 1
else:
break
return sameAmountOfAttributes and (numEqualAttributeValues == numMyAttributes)
"""
def __ne__(self, another):
return not self.__eq__(another)
def __repr__(self):
reprStr = "{"
firstTime = True
for attr in self._attributes:
if firstTime:
firstTime = False
# reprStr = reprStr + " "
else:
reprStr = reprStr + ", "
reprStr = reprStr + ("{!s} [{!r}]".format(attr.name, attr.value))
reprStr = reprStr + ("}")
return reprStr
class VertexAttribute(object):
"""A vertex attribute"""
_name = None
_value = None
"""Attribute Types"""
###
POSITION = "POSITION"
NORMAL = "NORMAL"
COLOR = "COLOR"
COLORPACKED = "COLORPACKED"
TANGENT = "TANGENT"
BINORMAL = "BINORMAL"
TEXCOORD = "TEXCOORD"
BLENDWEIGHT = "BLENDWEIGHT"
###
# Used to calculate attribute hash
ATTRIBUTE_HASH = "%s||%s"
_hashCache = None
def __init__(self, name="POSITION", value=[0.0, 0.0, 0.0]):
self.name = name
self.value = value
self._hashCache = None
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
self._hashCache = None
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
self._hashCache = None
@profile('hashVertexAttribute')
def __hash__(self):
if self._hashCache is None or self._hashCache == 0:
self._hashCache = 0
hashString = None
if self._value is not None and isinstance(self._value, list):
hashString = self.ATTRIBUTE_HASH % (self._name, Util.floatListToString(self._value))
elif self._value is not None:
hashString = self.ATTRIBUTE_HASH % (self._name, Util.floatToString(self._value))
else:
hashString = self.ATTRIBUTE_HASH % (self._name, self._value)
self._hashCache = hash(hashString)
return self._hashCache
@profile('eqVertexAttribute')
def __eq__(self, another):
"""Compare this attribute with another for value"""
if another is None or not isinstance(another, VertexAttribute):
return False
if self.name != another.name:
return False
if isinstance(self.value, list) and isinstance(another.value, list):
if len(self.value) == len(another.value):
isEqual = True
for pos in range(0, len(self.value)):
thisValue = ROUND_STRING.format(self.value[pos])
otherValue = ROUND_STRING.format(another.value[pos])
if thisValue != otherValue:
# handles cases where 0 and -0 are different when compared as strings
if math.fabs(self.value[pos]) - math.fabs(another.value[pos]) == math.fabs(self.value[pos]):
compareThisForZero = ROUND_STRING.format(math.fabs(self.value[pos]))
compareOtherForZero = ROUND_STRING.format(math.fabs(another.value[pos]))
if compareThisForZero != compareOtherForZero:
isEqual = False
break
else:
isEqual = False
break
return isEqual
else:
return False
else:
return self.value == another.value
def __ne__(self, another):
return not self.__eq__(another)
def __repr__(self):
value = "{!s} {{{!r}}}".format(self.name, self.value)
return value
class Texture(object):
_id = ""
_filename = ""
_type = ""
def __init__(self, textureId="", textureType="", filename=""):
self._id = textureId
self._filename = filename
self._type = textureType
class Node(object):
_id = ""
_rotation = None
_translation = None
_scale = None
_parts = None
_children = None
def __init__(self):
self._id = ""
self._rotation = None
self._translation = None
self._scale = None
self._children = None
@property
def id(self):
return self._id
@id.setter
def id(self, nodeId):
self._id = nodeId
@property
def rotation(self):
return self._rotation
@rotation.setter
def rotation(self, rotation):
self._rotation = rotation
@property
def translation(self):
return self._translation
@translation.setter
def translation(self, translation):
self._translation = translation
@property
def scale(self):
return self._scale
@scale.setter
def scale(self, scale):
self._scale = scale
def addChild(self, childNode):
if childNode is None or not isinstance(childNode, Node):
raise TypeError("'childNode' must be of type Node")
if self._children is None:
self._children = []
self._children.append(childNode)
@property
def children(self):
return self._children
@children.setter
def children(self, children):
if children is None or not isinstance(children, list):
raise TypeError("'children' must be of type list")
self._children = children
def addPart(self, part):
if part is None or not isinstance(part, NodePart):
raise TypeError("'part' must be of type NodePart")
if self._parts is None:
self._parts = []
self._parts.append(part)
@property
def parts(self):
return self._parts
class NodePart(object):
_meshPartId = ""
_materialId = ""
_bones = None
_uvLayers = None
def __init__(self):
self._meshPartId = ""
self._materialId = ""
@property
def meshPartId(self):
return self._meshPartId
@meshPartId.setter
def meshPartId(self, meshPartId):
self._meshPartId = meshPartId
@property
def materialId(self):
return self._materialId
@materialId.setter
def materialId(self, materialId):
self._materialId = materialId
def addUVLayer(self, uvLayerMapping):
if uvLayerMapping is None or not isinstance(uvLayerMapping, list):
raise TypeError("'uvLayerMapping' must be of type list")
if self._uvLayers is None:
self._uvLayers = []
self._uvLayers.append(uvLayerMapping)
@property
def uvLayers(self):
return self._uvLayers
def addBone(self, bone):
if bone is None or not isinstance(bone, Bone):
raise TypeError("'bone' must be of type Bone")
if self._bones is None:
self._bones = []
self._bones.append(bone)
@property
def bones(self):
return self._bones
class Bone(object):
_node = ""
_rotation = None
_translation = None
_scale = None
def __init__(self):
self._node = ""
self._rotation = None
self._translation = None
self._scale = None
@property
def node(self):
return self._node
@node.setter
def node(self, node):
self._node = node
@property
def rotation(self):
return self._rotation
@rotation.setter
def rotation(self, rotation):
self._rotation = rotation
@property
def translation(self):
return self._translation
@translation.setter
def translation(self, translation):
self._translation = translation
@property
def scale(self):
return self._scale
@scale.setter
def scale(self, scale):
self._scale = scale
class Mesh(object):
_id = ""
_vertices = None
_parts = None
# This is a cache so we know all attributes this mesh has.
# All the real attributes are in the vertices
_attributes = None
# This stores positions of each vertex on the _vertices attribute, so searches are faster
_vertexIndex = None
def __init__(self):
self._id = ""
self._vertices = []
self._parts = []
self._attributes = []
self._vertexIndex = {}
@property
def id(self):
return self._id
@id.setter
def id(self, meshId):
self._id = meshId
@property
def vertices(self):
return self._vertices
def getAttributes(self):
return self._attributes
def getVertexIndex(self, vertex):
if vertex is None or not isinstance(vertex, Vertex):
raise TypeError("'vertex' must be of type Vertex")
vertexHash = hash(vertex)
try:
existingVertexPos = self._vertexIndex[vertexHash]
return existingVertexPos
except KeyError:
return None
def addVertex(self, vertex):
"""
Adds a vertex if it has not been added before.
A vertex is considered 'already added' if all it's vertex
attributes have the same value.
Returns the added vertex if it's new or a pointer
to the already present vertex if there is one.
"""
if vertex is None or not isinstance(vertex, Vertex):
raise TypeError("'vertex' must be of type Vertex")
vertexHash = hash(vertex)
try:
existingVertexPos = self._vertexIndex[vertexHash]
existingVertex = self._vertices[existingVertexPos]
return existingVertex
except KeyError:
self._vertices.append(vertex)
vertexIndex = len(self._vertices) - 1
self._vertexIndex[vertexHash] = vertexIndex
# Add this vertice's attributes to the attribute name cache
for attr in vertex.attributes:
if attr.name not in self._attributes:
self._attributes.append(attr.name)
return vertex
"""
alreadyAdded = False
foundVertex = None
for vtx in self._vertices:
if vtx == vertex:
alreadyAdded = True
foundVertex = vtx
break
if not alreadyAdded:
self._vertices.append(vertex)
# Add this vertice's attributes to the attribute name cache
for attr in vertex.attributes:
if attr.name not in self._attributes:
self._attributes.append(attr.name)
return vertex
else:
return foundVertex
"""
@property
def parts(self):
return self._parts
def addPart(self, meshPart):
if meshPart is None or not isinstance(meshPart, MeshPart):
raise TypeError("'meshPart' must be of type MeshPart")
self._parts.append(meshPart)
meshPart.parentMesh = self
def normalizeAttributes(self):
"""
Makes sure all vertices have the same number of attributes.
More specifically, individual vertices may not have all BLENDWEIGHT or TEXCOORD
attributes but if that's the case we need to add blank values for those
attributes as LibGDX requires all vertices to have the same number of attribute values
"""
# Naming for our attributes
blendWeightAttrName = VertexAttribute.BLENDWEIGHT + "%d"
texCoordAttrName = VertexAttribute.TEXCOORD + "%d"
# This is how many total attributes we have. We can skip any vertices
# that have that many attributes
totalAttributes = len(self._attributes)
# Figure out how many TEXCOORD and BLENDWEIGHT attributes we have
numOfTexCoord = 0
numOfBlendWeight = 0
for attr in self._attributes:
if attr.startswith(VertexAttribute.BLENDWEIGHT, 0, len(VertexAttribute.BLENDWEIGHT)):
numOfBlendWeight = numOfBlendWeight + 1
elif attr.startswith(VertexAttribute.TEXCOORD, 0, len(VertexAttribute.TEXCOORD)):
numOfTexCoord = numOfTexCoord + 1
# Normalize any vertex that has less than these number of attributes
for vertex in self._vertices:
if len(vertex.attributes) == totalAttributes:
continue
vertexNumOfTexCoord = 0
vertexNumOfBlendWeight = 0
for attr in vertex.attributes:
if attr.name.startswith(VertexAttribute.BLENDWEIGHT, 0, len(VertexAttribute.BLENDWEIGHT)):
vertexNumOfBlendWeight = vertexNumOfBlendWeight + 1
elif attr.name.startswith(VertexAttribute.TEXCOORD, 0, len(VertexAttribute.TEXCOORD)):
vertexNumOfTexCoord = vertexNumOfTexCoord + 1
# Add missing attributes
for newBlendIndex in range(vertexNumOfBlendWeight, numOfBlendWeight):
newAttribute = VertexAttribute(name=(blendWeightAttrName % newBlendIndex), value=[0.0, 0.0])
vertex.add(newAttribute)
for newTexCoordIndex in range(vertexNumOfTexCoord, numOfTexCoord):
newAttribute = VertexAttribute(name=(texCoordAttrName % newTexCoordIndex), value=[0.0, 0.0])
vertex.add(newAttribute)
def __repr__(self):
value = "VERTICES:\n{!r}\n\nPARTS:\n{!r}\n\n".format(self._vertices, self._parts)
return value
class MeshPart(object):
"""Represents a mesh part"""
_id = ""
_type = "TRIANGLES"
_vertices = None
_parentMesh = None
def __init__(self, meshPartId="", meshType="TRIANGLES", vertices=None, parentMesh=None):
self._id = meshPartId
self._type = meshType
self._vertices = vertices
self._parentMesh = parentMesh
@property
def id(self):
return self._id
@id.setter
def id(self, partId):
self._id = partId
@property
def type(self):
return self._type
@type.setter
def type(self, partType):
self._type = partType
@property
def parentMesh(self):
return self._parentMesh
@parentMesh.setter
def parentMesh(self, parentMesh):
if parentMesh is None or not isinstance(parentMesh, Mesh):
raise TypeError("'parentMesh' must be of type Mesh")
self._parentMesh = parentMesh
def addVertex(self, vertex):
if vertex is None or not isinstance(vertex, Vertex):
raise TypeError("'vertex' must be of type Vertex")
if self._vertices is None:
self._vertices = []
self._vertices.append(vertex)
@property
def vertices(self):
return self._vertices
def __repr__(self):
reprStr = "{{\n ID: {!s}\n TYPE: {!s}\n".format(self.id, self.type)
if self.parentMesh is not None and self._vertices is not None:
reprStr = reprStr + (" TOTAL INDICES: {:d}\n VERTICES:\n [".format(len(self._vertices)))
for ver in self._vertices:
reprStr = reprStr + (" {!r}\n".format(ver))
reprStr = reprStr + " ]\n"
reprStr = reprStr + "}}\n"
return reprStr
class Material(object):
"""Material associated with a geometry"""
_id = ""
_ambient = None
_diffuse = None
_emissive = None
_opacity = None
_specular = None
_shininess = None
_reflection = None
_textures = []
def __init__(self):
self._id = ""
self._ambient = None
self._diffuse = None
self._emissive = None
self._opacity = None
self._specular = None
self._shininess = None
self._reflection = None
@property
def id(self):
return self._id
@id.setter
def id(self, materialId):
self._id = materialId
@property
def ambient(self):
return self._ambient
@ambient.setter
def ambient(self, ambient):
self._ambient = ambient
@property
def diffuse(self):
return self._diffuse
@diffuse.setter
def diffuse(self, diffuse):
self._diffuse = diffuse
@property
def emissive(self):
return self._emissive
@emissive.setter
def emissive(self, emissive):
self._emissive = emissive
@property
def opacity(self):
return self._opacity
@opacity.setter
def opacity(self, opacity):
self._opacity = opacity
@property
def specular(self):
return self._specular
@specular.setter
def specular(self, specular):
self._specular = specular
@property
def shininess(self):
return self._shininess
@shininess.setter
def shininess(self, shininess):
self._shininess = shininess
@property
def reflection(self):
return self._reflection
@reflection.setter
def reflection(self, reflection):
self._reflection = reflection
@property
def textures(self):
return self._textures
@textures.setter
def textures(self, textures):
self._textures = textures
class Animation(object):
_id = ""
_bones = None
def __init__(self):
self._id = ""
self._bones = None
@property
def id(self):
return self._id
@id.setter
def id(self, animationId):
self._id = animationId
@property
def bones(self):
return self._bones
@bones.setter
def bones(self, bones):
if bones is None or not isinstance(bones, list):
raise TypeError("'bones' must be of type list")
self._bones = bones
def addBone(self, bone):
if bone is None or not isinstance(bone, NodeAnimation):
raise TypeError("'bone' must be of type NodeAnimation")
if self._bones is None:
self._bones = []
self._bones.append(bone)
class NodeAnimation(object):
_boneId = ""
_keyframes = None
def __init__(self):
self._boneId = ""
self._keyframes = None
@property
def boneId(self):
return self._boneId
@boneId.setter
def boneId(self, boneId):
self._boneId = boneId
@property
def keyframes(self):
return self._keyframes
@keyframes.setter
def keyframes(self, keyframes):
if keyframes is None or not isinstance(keyframes, list):
raise TypeError("'keyframes' must be of type list")
self._keyframes = keyframes
def addKeyframe(self, keyframe):
if keyframe is None or not isinstance(keyframe, Keyframe):
raise TypeError("'keyframe' must be of type Keyframe")
if self._keyframes is None:
self._keyframes = []
self._keyframes.append(keyframe)
class Keyframe(object):
_keytime = 0.0
_translation = None
_rotation = None
_scale = None
def __init__(self):
self._keytime = 0.0
self._rotation = None
self._translation = None
self._scale = None
@property
def keytime(self):
return self._keytime
@keytime.setter
def keytime(self, keytime):
self._keytime = keytime
@property
def rotation(self):
return self._rotation
@rotation.setter
def rotation(self, rotation):
self._rotation = rotation
@property
def translation(self):
return self._translation
@translation.setter
def translation(self, translation):
self._translation = translation
@property
def scale(self):
return self._scale
@scale.setter
def scale(self, scale):
self._scale = scale
class G3DModel(object):
""" Our model class that will later be exported to G3D """
_meshes = None
_materials = None
_nodes = None
_animations = None
def __init__(self):
self._meshes = []
self._materials = []
self._nodes = []
self._animations = []
@property
def meshes(self):
return self._meshes
@meshes.setter
def meshes(self, meshes):
if meshes is None or not isinstance(meshes, list):
raise TypeError("'meshes' must be a list of Mesh")
self._meshes = meshes
def addMesh(self, mesh):
if mesh is None or not isinstance(mesh, Mesh):
raise TypeError("'mesh' must be of type Mesh")
self._meshes.append(mesh)
def hasMesh(self, meshId):
for mesh in self._meshes:
if mesh.id() == meshId:
return True
return False
@property
def materials(self):
return self._materials
@materials.setter
def materials(self, materials):
if materials is None or not isinstance(materials, list):
raise TypeError("'materials' must be of type list")
self._materials = materials
@property
def nodes(self):
return self._nodes
@nodes.setter
def nodes(self, nodes):
if nodes is None or not isinstance(nodes, list):
raise TypeError("'nodes' must be of type list")
self._nodes = nodes
@property
def animations(self):
return self._animations
@animations.setter
def animations(self, animations):
if animations is None or not isinstance(animations, list):
raise TypeError("'animations' must be of type list")
self._animations = animations
|
gpl-3.0
|
tinloaf/home-assistant
|
homeassistant/components/binary_sensor/fritzbox.py
|
4
|
1775
|
"""
Support for Fritzbox binary sensors.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/binary_sensor.fritzbox/
"""
import logging
import requests
from homeassistant.components.binary_sensor import BinarySensorDevice
from homeassistant.components.fritzbox import DOMAIN as FRITZBOX_DOMAIN
DEPENDENCIES = ['fritzbox']
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Fritzbox binary sensor platform."""
devices = []
fritz_list = hass.data[FRITZBOX_DOMAIN]
for fritz in fritz_list:
device_list = fritz.get_devices()
for device in device_list:
if device.has_alarm:
devices.append(FritzboxBinarySensor(device, fritz))
add_entities(devices, True)
class FritzboxBinarySensor(BinarySensorDevice):
"""Representation of a binary Fritzbox device."""
def __init__(self, device, fritz):
"""Initialize the Fritzbox binary sensor."""
self._device = device
self._fritz = fritz
@property
def name(self):
"""Return the name of the entity."""
return self._device.name
@property
def device_class(self):
"""Return the class of this sensor."""
return 'window'
@property
def is_on(self):
"""Return true if sensor is on."""
if not self._device.present:
return False
return self._device.alert_state
def update(self):
"""Get latest data from the Fritzbox."""
try:
self._device.update()
except requests.exceptions.HTTPError as ex:
_LOGGER.warning("Connection error: %s", ex)
self._fritz.login()
|
apache-2.0
|
sysalexis/kbengine
|
kbe/res/scripts/common/Lib/crypt.py
|
104
|
1879
|
"""Wrapper to the POSIX crypt library call and associated functionality."""
import _crypt
import string as _string
from random import SystemRandom as _SystemRandom
from collections import namedtuple as _namedtuple
_saltchars = _string.ascii_letters + _string.digits + './'
_sr = _SystemRandom()
class _Method(_namedtuple('_Method', 'name ident salt_chars total_size')):
"""Class representing a salt method per the Modular Crypt Format or the
legacy 2-character crypt method."""
def __repr__(self):
return '<crypt.METHOD_{}>'.format(self.name)
def mksalt(method=None):
"""Generate a salt for the specified method.
If not specified, the strongest available method will be used.
"""
if method is None:
method = methods[0]
s = '${}$'.format(method.ident) if method.ident else ''
s += ''.join(_sr.choice(_saltchars) for char in range(method.salt_chars))
return s
def crypt(word, salt=None):
"""Return a string representing the one-way hash of a password, with a salt
prepended.
If ``salt`` is not specified or is ``None``, the strongest
available method will be selected and a salt generated. Otherwise,
``salt`` may be one of the ``crypt.METHOD_*`` values, or a string as
returned by ``crypt.mksalt()``.
"""
if salt is None or isinstance(salt, _Method):
salt = mksalt(salt)
return _crypt.crypt(word, salt)
# available salting/crypto methods
METHOD_CRYPT = _Method('CRYPT', None, 2, 13)
METHOD_MD5 = _Method('MD5', '1', 8, 34)
METHOD_SHA256 = _Method('SHA256', '5', 16, 63)
METHOD_SHA512 = _Method('SHA512', '6', 16, 106)
methods = []
for _method in (METHOD_SHA512, METHOD_SHA256, METHOD_MD5):
_result = crypt('', _method)
if _result and len(_result) == _method.total_size:
methods.append(_method)
methods.append(METHOD_CRYPT)
del _result, _method
|
lgpl-3.0
|
EvgeneOskin/taiga-back
|
taiga/projects/migrations/0001_initial.py
|
26
|
6614
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import django_pgjson.fields
import django.utils.timezone
import django.db.models.deletion
import djorm_pgarray.fields
import taiga.projects.history.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('users', '0002_auto_20140903_0916'),
]
operations = [
migrations.CreateModel(
name='Membership',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),
('is_owner', models.BooleanField(default=False)),
('email', models.EmailField(max_length=255, null=True, default=None, verbose_name='email', blank=True)),
('created_at', models.DateTimeField(default=django.utils.timezone.now, verbose_name='creado el')),
('token', models.CharField(max_length=60, null=True, default=None, verbose_name='token', blank=True)),
('invited_by_id', models.IntegerField(null=True, blank=True)),
],
options={
'ordering': ['project', 'user__full_name', 'user__username', 'user__email', 'email'],
'verbose_name_plural': 'membershipss',
'permissions': (('view_membership', 'Can view membership'),),
'verbose_name': 'membership',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),
('tags', djorm_pgarray.fields.TextArrayField(dbtype='text', verbose_name='tags')),
('name', models.CharField(max_length=250, unique=True, verbose_name='name')),
('slug', models.SlugField(max_length=250, unique=True, verbose_name='slug', blank=True)),
('description', models.TextField(verbose_name='description')),
('created_date', models.DateTimeField(default=django.utils.timezone.now, verbose_name='created date')),
('modified_date', models.DateTimeField(verbose_name='modified date')),
('total_milestones', models.IntegerField(null=True, default=0, verbose_name='total of milestones', blank=True)),
('total_story_points', models.FloatField(default=0, verbose_name='total story points')),
('is_backlog_activated', models.BooleanField(default=True, verbose_name='active backlog panel')),
('is_kanban_activated', models.BooleanField(default=False, verbose_name='active kanban panel')),
('is_wiki_activated', models.BooleanField(default=True, verbose_name='active wiki panel')),
('is_issues_activated', models.BooleanField(default=True, verbose_name='active issues panel')),
('videoconferences', models.CharField(max_length=250, null=True, choices=[('appear-in', 'AppearIn'), ('talky', 'Talky')], verbose_name='videoconference system', blank=True)),
('videoconferences_salt', models.CharField(max_length=250, null=True, verbose_name='videoconference room salt', blank=True)),
('anon_permissions', djorm_pgarray.fields.TextArrayField(choices=[('view_project', 'View project'), ('view_milestones', 'View milestones'), ('view_us', 'View user stories'), ('view_tasks', 'View tasks'), ('view_issues', 'View issues'), ('view_wiki_pages', 'View wiki pages'), ('view_wiki_links', 'View wiki links')], dbtype='text', default=[], verbose_name='anonymous permissions')),
('public_permissions', djorm_pgarray.fields.TextArrayField(choices=[('view_project', 'View project'), ('view_milestones', 'View milestones'), ('view_us', 'View user stories'), ('view_issues', 'View issues'), ('vote_issues', 'Vote issues'), ('view_tasks', 'View tasks'), ('view_wiki_pages', 'View wiki pages'), ('view_wiki_links', 'View wiki links'), ('request_membership', 'Request membership'), ('add_us_to_project', 'Add user story to project'), ('add_comments_to_us', 'Add comments to user stories'), ('add_comments_to_task', 'Add comments to tasks'), ('add_issue', 'Add issues'), ('add_comments_issue', 'Add comments to issues'), ('add_wiki_page', 'Add wiki page'), ('modify_wiki_page', 'Modify wiki page'), ('add_wiki_link', 'Add wiki link'), ('modify_wiki_link', 'Modify wiki link')], dbtype='text', default=[], verbose_name='user permissions')),
('is_private', models.BooleanField(default=False, verbose_name='is private')),
('tags_colors', djorm_pgarray.fields.TextArrayField(dbtype='text', dimension=2, default=[], null=False, verbose_name='tags colors')),
],
options={
'ordering': ['name'],
'verbose_name_plural': 'projects',
'permissions': (('view_project', 'Can view project'),),
'verbose_name': 'project',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='project',
name='members',
field=models.ManyToManyField(to=settings.AUTH_USER_MODEL, related_name='projects', verbose_name='members', through='projects.Membership'),
preserve_default=True,
),
migrations.AddField(
model_name='project',
name='owner',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL, related_name='owned_projects', verbose_name='owner'),
preserve_default=True,
),
migrations.AddField(
model_name='membership',
name='user',
field=models.ForeignKey(blank=True, default=None, to=settings.AUTH_USER_MODEL, null=True, related_name='memberships'),
preserve_default=True,
),
migrations.AddField(
model_name='membership',
name='project',
field=models.ForeignKey(default=1, to='projects.Project', related_name='memberships'),
preserve_default=False,
),
migrations.AlterUniqueTogether(
name='membership',
unique_together=set([('user', 'project')]),
),
migrations.AddField(
model_name='membership',
name='role',
field=models.ForeignKey(related_name='memberships', to='users.Role', default=1),
preserve_default=False,
),
]
|
agpl-3.0
|
saeedhadi/linux-at91
|
scripts/rt-tester/rt-tester.py
|
11005
|
5307
|
#!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
|
gpl-2.0
|
osvalr/odoo
|
addons/document/document.py
|
152
|
83929
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
import errno
import logging
import os
import random
import shutil
import string
import time
from StringIO import StringIO
import psycopg2
import openerp
from openerp import tools
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.osv.orm import except_orm
import openerp.report.interface
from openerp.tools.misc import ustr
from openerp.tools.translate import _
from openerp.tools.safe_eval import safe_eval
from content_index import cntIndex
_logger = logging.getLogger(__name__)
class document_file(osv.osv):
_inherit = 'ir.attachment'
_columns = {
# Columns from ir.attachment:
'write_date': fields.datetime('Date Modified', readonly=True),
'write_uid': fields.many2one('res.users', 'Last Modification User', readonly=True),
# Fields of document:
'user_id': fields.many2one('res.users', 'Owner', select=1),
'parent_id': fields.many2one('document.directory', 'Directory', select=1, change_default=True),
'index_content': fields.text('Indexed Content'),
'partner_id':fields.many2one('res.partner', 'Partner', select=1),
'file_type': fields.char('Content Type'),
}
_order = "id desc"
_defaults = {
'user_id': lambda self, cr, uid, ctx:uid,
}
_sql_constraints = [
('filename_unique', 'unique (name,parent_id)', 'The filename must be unique in a directory !'),
]
def check(self, cr, uid, ids, mode, context=None, values=None):
"""Overwrite check to verify access on directory to validate specifications of doc/access_permissions.rst"""
if not isinstance(ids, list):
ids = [ids]
super(document_file, self).check(cr, uid, ids, mode, context=context, values=values)
if ids:
# use SQL to avoid recursive loop on read
cr.execute('SELECT DISTINCT parent_id from ir_attachment WHERE id in %s AND parent_id is not NULL', (tuple(ids),))
parent_ids = [parent_id for (parent_id,) in cr.fetchall()]
if parent_ids:
self.pool.get('ir.model.access').check(cr, uid, 'document.directory', mode)
self.pool.get('document.directory').check_access_rule(cr, uid, parent_ids, mode, context=context)
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
# Grab ids, bypassing 'count'
ids = super(document_file, self).search(cr, uid, args, offset=offset, limit=limit, order=order, context=context, count=False)
if not ids:
return 0 if count else []
# Filter out documents that are in directories that the user is not allowed to read.
# Must use pure SQL to avoid access rules exceptions (we want to remove the records,
# not fail), and the records have been filtered in parent's search() anyway.
cr.execute('SELECT id, parent_id from ir_attachment WHERE id in %s', (tuple(ids),))
# cont a dict of parent -> attach
parents = {}
for attach_id, attach_parent in cr.fetchall():
parents.setdefault(attach_parent, []).append(attach_id)
parent_ids = parents.keys()
# filter parents
visible_parent_ids = self.pool.get('document.directory').search(cr, uid, [('id', 'in', list(parent_ids))])
# null parents means allowed
orig_ids = ids # save the ids, to keep order
ids = parents.get(None,[])
for parent_id in visible_parent_ids:
ids.extend(parents[parent_id])
# sort result according to the original sort ordering
if count:
return len(ids)
else:
set_ids = set(ids)
return [id for id in orig_ids if id in set_ids]
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
if 'name' not in default:
name = self.read(cr, uid, [id], ['name'])[0]['name']
default.update(name=_("%s (copy)") % (name))
return super(document_file, self).copy(cr, uid, id, default, context=context)
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
vals['parent_id'] = context.get('parent_id', False) or vals.get('parent_id', False)
# take partner from uid
if vals.get('res_id', False) and vals.get('res_model', False) and not vals.get('partner_id', False):
vals['partner_id'] = self.__get_partner_id(cr, uid, vals['res_model'], vals['res_id'], context)
if vals.get('datas', False):
vals['file_type'], vals['index_content'] = self._index(cr, uid, vals['datas'].decode('base64'), vals.get('datas_fname', False), vals.get('file_type', None))
return super(document_file, self).create(cr, uid, vals, context)
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
if vals.get('datas', False):
vals['file_type'], vals['index_content'] = self._index(cr, uid, vals['datas'].decode('base64'), vals.get('datas_fname', False), vals.get('file_type', None))
return super(document_file, self).write(cr, uid, ids, vals, context)
def _index(self, cr, uid, data, datas_fname, file_type):
mime, icont = cntIndex.doIndex(data, datas_fname, file_type or None, None)
icont_u = ustr(icont)
return mime, icont_u
def __get_partner_id(self, cr, uid, res_model, res_id, context=None):
""" A helper to retrieve the associated partner from any res_model+id
It is a hack that will try to discover if the mentioned record is
clearly associated with a partner record.
"""
obj_model = self.pool[res_model]
if obj_model._name == 'res.partner':
return res_id
elif 'partner_id' in obj_model._columns and obj_model._columns['partner_id']._obj == 'res.partner':
bro = obj_model.browse(cr, uid, res_id, context=context)
return bro.partner_id.id
return False
class document_directory(osv.osv):
_name = 'document.directory'
_description = 'Directory'
_order = 'name'
_columns = {
'name': fields.char('Name', required=True, select=1),
'write_date': fields.datetime('Date Modified', readonly=True),
'write_uid': fields.many2one('res.users', 'Last Modification User', readonly=True),
'create_date': fields.datetime('Date Created', readonly=True),
'create_uid': fields.many2one('res.users', 'Creator', readonly=True),
'user_id': fields.many2one('res.users', 'Owner'),
'group_ids': fields.many2many('res.groups', 'document_directory_group_rel', 'item_id', 'group_id', 'Groups'),
'parent_id': fields.many2one('document.directory', 'Parent Directory', select=1, change_default=True),
'child_ids': fields.one2many('document.directory', 'parent_id', 'Children'),
'file_ids': fields.one2many('ir.attachment', 'parent_id', 'Files'),
'content_ids': fields.one2many('document.directory.content', 'directory_id', 'Virtual Files'),
'type': fields.selection([ ('directory','Static Directory'), ('ressource','Folders per resource'), ],
'Type', required=True, select=1, change_default=True,
help="Each directory can either have the type Static or be linked to another resource. A static directory, as with Operating Systems, is the classic directory that can contain a set of files. The directories linked to systems resources automatically possess sub-directories for each of resource types defined in the parent directory."),
'domain': fields.char('Domain', help="Use a domain if you want to apply an automatic filter on visible resources."),
'ressource_type_id': fields.many2one('ir.model', 'Resource model', change_default=True,
help="Select an object here and there will be one folder per record of that resource."),
'resource_field': fields.many2one('ir.model.fields', 'Name field', help='Field to be used as name on resource directories. If empty, the "name" will be used.'),
'resource_find_all': fields.boolean('Find all resources',
help="If true, all attachments that match this resource will " \
" be located. If false, only ones that have this as parent." ),
'ressource_parent_type_id': fields.many2one('ir.model', 'Parent Model', change_default=True,
help="If you put an object here, this directory template will appear bellow all of these objects. " \
"Such directories are \"attached\" to the specific model or record, just like attachments. " \
"Don't put a parent directory if you select a parent model."),
'ressource_id': fields.integer('Resource ID',
help="Along with Parent Model, this ID attaches this folder to a specific record of Parent Model."),
'ressource_tree': fields.boolean('Tree Structure',
help="Check this if you want to use the same tree structure as the object selected in the system."),
'dctx_ids': fields.one2many('document.directory.dctx', 'dir_id', 'Context fields'),
'company_id': fields.many2one('res.company', 'Company', change_default=True),
}
_defaults = {
'company_id': lambda s,cr,uid,c: s.pool.get('res.company')._company_default_get(cr, uid, 'document.directory', context=c),
'user_id': lambda self,cr,uid,ctx: uid,
'domain': '[]',
'type': 'directory',
'ressource_id': 0,
'resource_find_all': True,
}
_sql_constraints = [
('dirname_uniq', 'unique (name,parent_id,ressource_id,ressource_parent_type_id)', 'The directory name must be unique !'),
('no_selfparent', 'check(parent_id <> id)', 'Directory cannot be parent of itself!'),
]
def name_get(self, cr, uid, ids, context=None):
res = []
if not self.search(cr,uid,[('id','in',ids)]):
ids = []
for d in self.browse(cr, uid, ids, context=context):
s = ''
d2 = d
while d2 and d2.parent_id:
s = d2.name + (s and ('/' + s) or '')
d2 = d2.parent_id
res.append((d.id, s or d.name))
return res
def get_full_path(self, cr, uid, dir_id, context=None):
""" Return the full path to this directory, in a list, root first
"""
if isinstance(dir_id, (tuple, list)):
assert len(dir_id) == 1
dir_id = dir_id[0]
def _parent(dir_id, path):
parent=self.browse(cr, uid, dir_id)
if parent.parent_id and not parent.ressource_parent_type_id:
_parent(parent.parent_id.id,path)
path.append(parent.name)
else:
path.append(parent.name)
return path
path = []
_parent(dir_id, path)
return path
_constraints = [
(osv.osv._check_recursion, 'Error! You cannot create recursive directories.', ['parent_id'])
]
def onchange_content_id(self, cr, uid, ids, ressource_type_id):
return {}
def get_object(self, cr, uid, uri, context=None):
""" Return a node object for the given uri.
This fn merely passes the call to node_context
"""
return get_node_context(cr, uid, context).get_uri(cr, uri)
def get_node_class(self, cr, uid, ids, dbro=None, dynamic=False, context=None):
"""Retrieve the class of nodes for this directory
This function can be overriden by inherited classes ;)
@param dbro The browse object, if caller already has it
"""
if dbro is None:
dbro = self.browse(cr, uid, ids, context=context)
if dynamic:
return node_res_obj
elif dbro.type == 'directory':
return node_dir
elif dbro.type == 'ressource':
return node_res_dir
else:
raise ValueError("dir node for %s type.", dbro.type)
def _prepare_context(self, cr, uid, nctx, context=None):
""" Fill nctx with properties for this database
@param nctx instance of nodes.node_context, to be filled
@param context ORM context (dict) for us
Note that this function is called *without* a list of ids,
it should behave the same for the whole database (based on the
ORM instance of document.directory).
Some databases may override this and attach properties to the
node_context. See WebDAV, CalDAV.
"""
return
def get_dir_permissions(self, cr, uid, ids, context=None):
"""Check what permission user 'uid' has on directory 'id'
"""
assert len(ids) == 1
res = 0
for pperms in [('read', 5), ('write', 2), ('unlink', 8)]:
try:
self.check_access_rule(cr, uid, ids, pperms[0], context=context)
res |= pperms[1]
except except_orm:
pass
return res
def _locate_child(self, cr, uid, root_id, uri, nparent, ncontext):
""" try to locate the node in uri,
Return a tuple (node_dir, remaining_path)
"""
return (node_database(context=ncontext), uri)
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default ={}
name = self.read(cr, uid, [id])[0]['name']
default.update(name=_("%s (copy)") % (name))
return super(document_directory,self).copy(cr, uid, id, default, context=context)
def _check_duplication(self, cr, uid, vals, ids=None, op='create'):
name=vals.get('name',False)
parent_id=vals.get('parent_id',False)
ressource_parent_type_id=vals.get('ressource_parent_type_id',False)
ressource_id=vals.get('ressource_id',0)
if op=='write':
for directory in self.browse(cr, SUPERUSER_ID, ids):
if not name:
name=directory.name
if not parent_id:
parent_id=directory.parent_id and directory.parent_id.id or False
# TODO fix algo
if not ressource_parent_type_id:
ressource_parent_type_id=directory.ressource_parent_type_id and directory.ressource_parent_type_id.id or False
if not ressource_id:
ressource_id=directory.ressource_id and directory.ressource_id or 0
res=self.search(cr,uid,[('id','<>',directory.id),('name','=',name),('parent_id','=',parent_id),('ressource_parent_type_id','=',ressource_parent_type_id),('ressource_id','=',ressource_id)])
if len(res):
return False
if op=='create':
res = self.search(cr, SUPERUSER_ID, [('name','=',name),('parent_id','=',parent_id),('ressource_parent_type_id','=',ressource_parent_type_id),('ressource_id','=',ressource_id)])
if len(res):
return False
return True
def write(self, cr, uid, ids, vals, context=None):
if not self._check_duplication(cr, uid, vals, ids, op='write'):
raise osv.except_osv(_('ValidateError'), _('Directory name must be unique!'))
return super(document_directory,self).write(cr, uid, ids, vals, context=context)
def create(self, cr, uid, vals, context=None):
if not self._check_duplication(cr, uid, vals):
raise osv.except_osv(_('ValidateError'), _('Directory name must be unique!'))
newname = vals.get('name',False)
if newname:
for illeg in ('/', '@', '$', '#'):
if illeg in newname:
raise osv.except_osv(_('ValidateError'), _('Directory name contains special characters!'))
return super(document_directory,self).create(cr, uid, vals, context)
class document_directory_dctx(osv.osv):
""" In order to evaluate dynamic folders, child items could have a limiting
domain expression. For that, their parents will export a context where useful
information will be passed on.
If you define sth like "s_id" = "this.id" at a folder iterating over sales, its
children could have a domain like [('sale_id', = ,s_id )]
This system should be used recursively, that is, parent dynamic context will be
appended to all children down the tree.
"""
_name = 'document.directory.dctx'
_description = 'Directory Dynamic Context'
_columns = {
'dir_id': fields.many2one('document.directory', 'Directory', required=True, ondelete="cascade"),
'field': fields.char('Field', required=True, select=1, help="The name of the field."),
'expr': fields.char('Expression', required=True, help="A python expression used to evaluate the field.\n" + \
"You can use 'dir_id' for current dir, 'res_id', 'res_model' as a reference to the current record, in dynamic folders"),
}
class document_directory_content_type(osv.osv):
_name = 'document.directory.content.type'
_description = 'Directory Content Type'
_columns = {
'name': fields.char('Content Type', required=True),
'code': fields.char('Extension', size=4),
'active': fields.boolean('Active'),
'mimetype': fields.char('Mime Type')
}
_defaults = {
'active': lambda *args: 1
}
class document_directory_content(osv.osv):
_name = 'document.directory.content'
_description = 'Directory Content'
_order = "sequence"
def _extension_get(self, cr, uid, context=None):
cr.execute('select code,name from document_directory_content_type where active')
res = cr.fetchall()
return res
_columns = {
'name': fields.char('Content Name', required=True),
'sequence': fields.integer('Sequence', size=16),
'prefix': fields.char('Prefix', size=16),
'suffix': fields.char('Suffix', size=16),
'report_id': fields.many2one('ir.actions.report.xml', 'Report'),
'extension': fields.selection(_extension_get, 'Document Type', required=True, size=4),
'include_name': fields.boolean('Include Record Name',
help="Check this field if you want that the name of the file to contain the record name." \
"\nIf set, the directory will have to be a resource one."),
'directory_id': fields.many2one('document.directory', 'Directory'),
}
_defaults = {
'extension': lambda *args: '.pdf',
'sequence': lambda *args: 1,
'include_name': lambda *args: 1,
}
def _file_get(self, cr, node, nodename, content, context=None):
""" return the nodes of a <node> parent having a <content> content
The return value MUST be false or a list of node_class objects.
"""
# TODO: respect the context!
model = node.res_model
if content.include_name and not model:
return False
res2 = []
tname = ''
if content.include_name:
record_name = node.displayname or ''
if record_name:
tname = (content.prefix or '') + record_name + (content.suffix or '') + (content.extension or '')
else:
tname = (content.prefix or '') + (content.name or '') + (content.suffix or '') + (content.extension or '')
if tname.find('/'):
tname=tname.replace('/', '_')
act_id = False
if 'dctx_res_id' in node.dctx:
act_id = node.dctx['res_id']
elif hasattr(node, 'res_id'):
act_id = node.res_id
else:
act_id = node.context.context.get('res_id',False)
if not nodename:
n = node_content(tname, node, node.context,content, act_id=act_id)
res2.append( n)
else:
if nodename == tname:
n = node_content(tname, node, node.context,content, act_id=act_id)
n.fill_fields(cr)
res2.append(n)
return res2
def process_write(self, cr, uid, node, data, context=None):
if node.extension != '.pdf':
raise Exception("Invalid content: %s" % node.extension)
return True
def process_read(self, cr, uid, node, context=None):
if node.extension != '.pdf':
raise Exception("Invalid content: %s" % node.extension)
report = self.pool.get('ir.actions.report.xml').browse(cr, uid, node.report_id, context=context)
srv = openerp.report.interface.report_int._reports['report.'+report.report_name]
ctx = node.context.context.copy()
ctx.update(node.dctx)
pdf,pdftype = srv.create(cr, uid, [node.act_id,], {}, context=ctx)
return pdf
class ir_action_report_xml(osv.osv):
_name="ir.actions.report.xml"
_inherit ="ir.actions.report.xml"
def _model_get(self, cr, uid, ids, name, arg, context=None):
res = {}
model_pool = self.pool.get('ir.model')
for data in self.read(cr, uid, ids, ['model']):
model = data.get('model',False)
if model:
model_id =model_pool.search(cr, uid, [('model','=',model)])
if model_id:
res[data.get('id')] = model_id[0]
else:
res[data.get('id')] = False
return res
def _model_search(self, cr, uid, obj, name, args, context=None):
if not len(args):
return []
assert len(args) == 1 and args[0][1] == '=', 'expression is not what we expect: %r' % args
model_id= args[0][2]
if not model_id:
# a deviation from standard behavior: when searching model_id = False
# we return *all* reports, not just ones with empty model.
# One reason is that 'model' is a required field so far
return []
model = self.pool.get('ir.model').read(cr, uid, [model_id])[0]['model']
report_id = self.search(cr, uid, [('model','=',model)])
if not report_id:
return [('id','=','0')]
return [('id','in',report_id)]
_columns={
'model_id' : fields.function(_model_get, fnct_search=_model_search, string='Model Id'),
}
class document_storage(osv.osv):
""" The primary object for data storage. Deprecated. """
_name = 'document.storage'
_description = 'Storage Media'
def get_data(self, cr, uid, id, file_node, context=None, fil_obj=None):
""" retrieve the contents of some file_node having storage_id = id
optionally, fil_obj could point to the browse object of the file
(ir.attachment)
"""
boo = self.browse(cr, uid, id, context=context)
if fil_obj:
ira = fil_obj
else:
ira = self.pool.get('ir.attachment').browse(cr, uid, file_node.file_id, context=context)
data = ira.datas
if data:
out = data.decode('base64')
else:
out = ''
return out
def get_file(self, cr, uid, id, file_node, mode, context=None):
""" Return a file-like object for the contents of some node
"""
if context is None:
context = {}
boo = self.browse(cr, uid, id, context=context)
ira = self.pool.get('ir.attachment').browse(cr, uid, file_node.file_id, context=context)
return nodefd_db(file_node, ira_browse=ira, mode=mode)
def set_data(self, cr, uid, id, file_node, data, context=None, fil_obj=None):
""" store the data.
This function MUST be used from an ir.attachment. It wouldn't make sense
to store things persistently for other types (dynamic).
"""
boo = self.browse(cr, uid, id, context=context)
if fil_obj:
ira = fil_obj
else:
ira = self.pool.get('ir.attachment').browse(cr, uid, file_node.file_id, context=context)
_logger.debug( "Store data for ir.attachment #%d." % ira.id)
store_fname = None
fname = None
filesize = len(data)
self.pool.get('ir.attachment').write(cr, uid, [file_node.file_id], {'datas': data.encode('base64')}, context=context)
# 2nd phase: store the metadata
try:
icont = ''
mime = ira.file_type
if not mime:
mime = ""
try:
mime, icont = cntIndex.doIndex(data, ira.datas_fname, ira.file_type or None, fname)
except Exception:
_logger.debug('Cannot index file.', exc_info=True)
pass
try:
icont_u = ustr(icont)
except UnicodeError:
icont_u = ''
# a hack: /assume/ that the calling write operation will not try
# to write the fname and size, and update them in the db concurrently.
# We cannot use a write() here, because we are already in one.
cr.execute('UPDATE ir_attachment SET file_size = %s, index_content = %s, file_type = %s WHERE id = %s', (filesize, icont_u, mime, file_node.file_id))
self.pool.get('ir.attachment').invalidate_cache(cr, uid, ['file_size', 'index_content', 'file_type'], [file_node.file_id], context=context)
file_node.content_length = filesize
file_node.content_type = mime
return True
except Exception, e :
_logger.warning("Cannot save data.", exc_info=True)
# should we really rollback once we have written the actual data?
# at the db case (only), that rollback would be safe
raise except_orm(_('Error at doc write!'), str(e))
def _str2time(cre):
""" Convert a string with time representation (from db) into time (float)
Note: a place to fix if datetime is used in db.
"""
if not cre:
return time.time()
frac = 0.0
if isinstance(cre, basestring) and '.' in cre:
fdot = cre.find('.')
frac = float(cre[fdot:])
cre = cre[:fdot]
return time.mktime(time.strptime(cre,'%Y-%m-%d %H:%M:%S')) + frac
def get_node_context(cr, uid, context):
return node_context(cr, uid, context)
#
# An object that represent an uri
# path: the uri of the object
# content: the Content it belongs to (_print.pdf)
# type: content or collection
# content: objct = res.partner
# collection: object = directory, object2 = res.partner
# file: objct = ir.attachement
# root: if we are at the first directory of a ressource
#
class node_context(object):
""" This is the root node, representing access to some particular context
A context is a set of persistent data, which may influence the structure
of the nodes. All other transient information during a data query should
be passed down with function arguments.
"""
cached_roots = {}
node_file_class = None
def __init__(self, cr, uid, context=None):
self.dbname = cr.dbname
self.uid = uid
self.context = context
if context is None:
context = {}
context['uid'] = uid
self._dirobj = openerp.registry(cr.dbname).get('document.directory')
self.node_file_class = node_file
self.extra_ctx = {} # Extra keys for context, that do _not_ trigger inequality
assert self._dirobj
self._dirobj._prepare_context(cr, uid, self, context=context)
self.rootdir = False #self._dirobj._get_root_directory(cr,uid,context)
def __eq__(self, other):
if not type(other) == node_context:
return False
if self.dbname != other.dbname:
return False
if self.uid != other.uid:
return False
if self.context != other.context:
return False
if self.rootdir != other.rootdir:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def get(self, name, default=None):
return self.context.get(name, default)
def get_uri(self, cr, uri):
""" Although this fn passes back to doc.dir, it is needed since
it is a potential caching point.
"""
(ndir, duri) = self._dirobj._locate_child(cr, self.uid, self.rootdir, uri, None, self)
while duri:
ndir = ndir.child(cr, duri[0])
if not ndir:
return False
duri = duri[1:]
return ndir
def get_dir_node(self, cr, dbro):
"""Create (or locate) a node for a directory
@param dbro a browse object of document.directory
"""
fullpath = dbro.get_full_path(context=self.context)
klass = dbro.get_node_class(dbro, context=self.context)
return klass(fullpath, None ,self, dbro)
def get_file_node(self, cr, fbro):
""" Create or locate a node for a static file
@param fbro a browse object of an ir.attachment
"""
parent = None
if fbro.parent_id:
parent = self.get_dir_node(cr, fbro.parent_id)
return self.node_file_class(fbro.name, parent, self, fbro)
class node_class(object):
""" this is a superclass for our inodes
It is an API for all code that wants to access the document files.
Nodes have attributes which contain usual file properties
"""
our_type = 'baseclass'
DAV_PROPS = None
DAV_M_NS = None
def __init__(self, path, parent, context):
assert isinstance(context,node_context)
assert (not parent ) or isinstance(parent,node_class)
self.path = path
self.context = context
self.type=self.our_type
self.parent = parent
self.uidperms = 5 # computed permissions for our uid, in unix bits
self.mimetype = 'application/octet-stream'
self.create_date = None
self.write_date = None
self.unixperms = 0660
self.uuser = 'user'
self.ugroup = 'group'
self.content_length = 0
# dynamic context:
self.dctx = {}
if parent:
self.dctx = parent.dctx.copy()
self.displayname = 'Object'
def __eq__(self, other):
return NotImplemented
def __ne__(self, other):
return not self.__eq__(other)
def full_path(self):
""" Return the components of the full path for some
node.
The returned list only contains the names of nodes.
"""
if self.parent:
s = self.parent.full_path()
else:
s = []
if isinstance(self.path,list):
s+=self.path
elif self.path is None:
s.append('')
else:
s.append(self.path)
return s #map(lambda x: '/' +x, s)
def __repr__(self):
return "%s@/%s" % (self.our_type, '/'.join(self.full_path()))
def children(self, cr, domain=None):
print "node_class.children()"
return [] #stub
def child(self, cr, name, domain=None):
print "node_class.child()"
return None
def get_uri(self, cr, uri):
duri = uri
ndir = self
while duri:
ndir = ndir.child(cr, duri[0])
if not ndir:
return False
duri = duri[1:]
return ndir
def path_get(self):
print "node_class.path_get()"
return False
def get_data(self, cr):
raise TypeError('No data for %s.'% self.type)
def open_data(self, cr, mode):
""" Open a node_descriptor object for this node.
@param the mode of open, eg 'r', 'w', 'a', like file.open()
This operation may lock the data for this node (and accross
other node hierarchies), until the descriptor is close()d. If
the node is locked, subsequent opens (depending on mode) may
immediately fail with an exception (which?).
For this class, there is no data, so no implementation. Each
child class that has data should override this.
"""
raise TypeError('No data for %s.' % self.type)
def get_etag(self, cr):
""" Get a tag, unique per object + modification.
see. http://tools.ietf.org/html/rfc2616#section-13.3.3 """
return '"%s-%s"' % (self._get_ttag(cr), self._get_wtag(cr))
def _get_wtag(self, cr):
""" Return the modification time as a unique, compact string """
return str(_str2time(self.write_date)).replace('.','')
def _get_ttag(self, cr):
""" Get a unique tag for this type/id of object.
Must be overriden, so that each node is uniquely identified.
"""
print "node_class.get_ttag()",self
raise NotImplementedError("get_ttag stub()")
def get_dav_props(self, cr):
""" If this class has special behaviour for GroupDAV etc, export
its capabilities """
# This fn is placed here rather than WebDAV, because we want the
# baseclass methods to apply to all node subclasses
return self.DAV_PROPS or {}
def match_dav_eprop(self, cr, match, ns, prop):
res = self.get_dav_eprop(cr, ns, prop)
if res == match:
return True
return False
def get_dav_eprop(self, cr, ns, prop):
if not self.DAV_M_NS:
return None
if self.DAV_M_NS.has_key(ns):
prefix = self.DAV_M_NS[ns]
else:
_logger.debug('No namespace: %s ("%s").',ns, prop)
return None
mname = prefix + "_" + prop.replace('-','_')
if not hasattr(self, mname):
return None
try:
m = getattr(self, mname)
r = m(cr)
return r
except AttributeError:
_logger.debug('The property %s is not supported.' % prop, exc_info=True)
return None
def get_dav_resourcetype(self, cr):
""" Get the DAV resource type.
Is here because some nodes may exhibit special behaviour, like
CalDAV/GroupDAV collections
"""
raise NotImplementedError
def move_to(self, cr, ndir_node, new_name=False, fil_obj=None, ndir_obj=None, in_write=False):
""" Move this node to a new parent directory.
@param ndir_node the collection that this node should be moved under
@param new_name a name to rename this node to. If omitted, the old
name is preserved
@param fil_obj, can be None, is the browse object for the file,
if already available.
@param ndir_obj must be the browse object to the new doc.directory
location, where this node should be moved to.
in_write: When called by write(), we shouldn't attempt to write the
object, but instead return the dict of vals (avoid re-entrance).
If false, we should write all data to the object, here, as if the
caller won't do anything after calling move_to()
Return value:
True: the node is moved, the caller can update other values, too.
False: the node is either removed or fully updated, the caller
must discard the fil_obj, not attempt to write any more to it.
dict: values to write back to the object. *May* contain a new id!
Depending on src and target storage, implementations of this function
could do various things.
Should also consider node<->content, dir<->dir moves etc.
Move operations, as instructed from APIs (e.g. request from DAV) could
use this function.
"""
raise NotImplementedError(repr(self))
def create_child(self, cr, path, data=None):
""" Create a regular file under this node
"""
_logger.warning("Attempted to create a file under %r, not possible.", self)
raise IOError(errno.EPERM, "Not allowed to create file(s) here.")
def create_child_collection(self, cr, objname):
""" Create a child collection (directory) under self
"""
_logger.warning("Attempted to create a collection under %r, not possible.", self)
raise IOError(errno.EPERM, "Not allowed to create folder(s) here.")
def rm(self, cr):
raise NotImplementedError(repr(self))
def rmcol(self, cr):
raise NotImplementedError(repr(self))
def get_domain(self, cr, filters):
# TODO Document
return []
def check_perms(self, perms):
""" Check the permissions of the current node.
@param perms either an integers of the bits to check, or
a string with the permission letters
Permissions of nodes are (in a unix way):
1, x : allow descend into dir
2, w : allow write into file, or modification to dir
4, r : allow read of file, or listing of dir contents
8, u : allow remove (unlink)
"""
if isinstance(perms, str):
pe2 = 0
chars = { 'x': 1, 'w': 2, 'r': 4, 'u': 8 }
for c in perms:
pe2 = pe2 | chars[c]
perms = pe2
elif isinstance(perms, int):
if perms < 0 or perms > 15:
raise ValueError("Invalid permission bits.")
else:
raise ValueError("Invalid permission attribute.")
return ((self.uidperms & perms) == perms)
class node_database(node_class):
""" A node representing the database directory
"""
our_type = 'database'
def __init__(self, path=None, parent=False, context=None):
if path is None:
path = []
super(node_database,self).__init__(path, parent, context)
self.unixperms = 040750
self.uidperms = 5
def children(self, cr, domain=None):
res = self._child_get(cr, domain=domain) + self._file_get(cr)
return res
def child(self, cr, name, domain=None):
res = self._child_get(cr, name, domain=None)
if res:
return res[0]
res = self._file_get(cr,name)
if res:
return res[0]
return None
def _child_get(self, cr, name=False, domain=None):
dirobj = self.context._dirobj
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
where = [('parent_id','=', False), ('ressource_parent_type_id','=',False)]
if name:
where.append(('name','=',name))
is_allowed = self.check_perms(1)
else:
is_allowed = self.check_perms(5)
if not is_allowed:
raise IOError(errno.EPERM, "Permission into directory denied.")
if domain:
where = where + domain
ids = dirobj.search(cr, uid, where, context=ctx)
res = []
for dirr in dirobj.browse(cr, uid, ids, context=ctx):
klass = dirr.get_node_class(dirr, context=ctx)
res.append(klass(dirr.name, self, self.context,dirr))
return res
def _file_get(self, cr, nodename=False):
res = []
return res
def _get_ttag(self, cr):
return 'db-%s' % cr.dbname
def mkdosname(company_name, default='noname'):
""" convert a string to a dos-like name"""
if not company_name:
return default
badchars = ' !@#$%^`~*()+={}[];:\'"/?.<>'
n = ''
for c in company_name[:8]:
n += (c in badchars and '_') or c
return n
def _uid2unixperms(perms, has_owner):
""" Convert the uidperms and the owner flag to full unix bits
"""
res = 0
if has_owner:
res |= (perms & 0x07) << 6
res |= (perms & 0x05) << 3
elif perms & 0x02:
res |= (perms & 0x07) << 6
res |= (perms & 0x07) << 3
else:
res |= (perms & 0x07) << 6
res |= (perms & 0x05) << 3
res |= 0x05
return res
class node_dir(node_database):
our_type = 'collection'
def __init__(self, path, parent, context, dirr, dctx=None):
super(node_dir,self).__init__(path, parent,context)
self.dir_id = dirr and dirr.id or False
#todo: more info from dirr
self.mimetype = 'application/x-directory'
# 'httpd/unix-directory'
self.create_date = dirr and dirr.create_date or False
self.domain = dirr and dirr.domain or []
self.res_model = dirr and dirr.ressource_type_id and dirr.ressource_type_id.model or False
# TODO: the write date should be MAX(file.write)..
self.write_date = dirr and (dirr.write_date or dirr.create_date) or False
self.content_length = 0
try:
self.uuser = (dirr.user_id and dirr.user_id.login) or 'nobody'
except Exception:
self.uuser = 'nobody'
self.ugroup = mkdosname(dirr.company_id and dirr.company_id.name, default='nogroup')
self.uidperms = dirr.get_dir_permissions()
self.unixperms = 040000 | _uid2unixperms(self.uidperms, dirr and dirr.user_id)
if dctx:
self.dctx.update(dctx)
dc2 = self.context.context
dc2.update(self.dctx)
dc2['dir_id'] = self.dir_id
self.displayname = dirr and dirr.name or False
if dirr and dirr.dctx_ids:
for dfld in dirr.dctx_ids:
try:
self.dctx[dfld.field] = safe_eval(dfld.expr,dc2)
except Exception,e:
print "Cannot eval %s." % dfld.expr
print e
pass
def __eq__(self, other):
if type(self) != type(other):
return False
if not self.context == other.context:
return False
# Two directory nodes, for the same document.directory, may have a
# different context! (dynamic folders)
if self.dctx != other.dctx:
return False
return self.dir_id == other.dir_id
def get_data(self, cr):
#res = ''
#for child in self.children(cr):
# res += child.get_data(cr)
return None
def _file_get(self, cr, nodename=False):
res = super(node_dir,self)._file_get(cr, nodename)
is_allowed = self.check_perms(nodename and 1 or 5)
if not is_allowed:
raise IOError(errno.EPERM, "Permission into directory denied.")
cntobj = self.context._dirobj.pool.get('document.directory.content')
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
where = [('directory_id','=',self.dir_id) ]
ids = cntobj.search(cr, uid, where, context=ctx)
for content in cntobj.browse(cr, uid, ids, context=ctx):
res3 = cntobj._file_get(cr, self, nodename, content)
if res3:
res.extend(res3)
return res
def _child_get(self, cr, name=None, domain=None):
dirobj = self.context._dirobj
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
where = [('parent_id','=',self.dir_id)]
if name:
where.append(('name','=',name))
is_allowed = self.check_perms(1)
else:
is_allowed = self.check_perms(5)
if not is_allowed:
raise IOError(errno.EPERM, "Permission into directory denied.")
if not domain:
domain = []
where2 = where + domain + [('ressource_parent_type_id','=',False)]
ids = dirobj.search(cr, uid, where2, context=ctx)
res = []
for dirr in dirobj.browse(cr, uid, ids, context=ctx):
klass = dirr.get_node_class(dirr, context=ctx)
res.append(klass(dirr.name, self, self.context,dirr))
# Static directories should never return files with res_model/res_id
# because static dirs are /never/ related to a record.
# In fact, files related to some model and parented by the root dir
# (the default), will NOT be accessible in the node system unless
# a resource folder for that model exists (with resource_find_all=True).
# Having resource attachments in a common folder is bad practice,
# because they would be visible to all users, and their names may be
# the same, conflicting.
where += [('res_model', '=', False)]
fil_obj = dirobj.pool.get('ir.attachment')
ids = fil_obj.search(cr, uid, where, context=ctx)
if ids:
for fil in fil_obj.browse(cr, uid, ids, context=ctx):
klass = self.context.node_file_class
res.append(klass(fil.name, self, self.context, fil))
return res
def rmcol(self, cr):
uid = self.context.uid
directory = self.context._dirobj.browse(cr, uid, self.dir_id)
res = False
if not directory:
raise OSError(2, 'Not such file or directory.')
if not self.check_perms('u'):
raise IOError(errno.EPERM,"Permission denied.")
if directory._name == 'document.directory':
if self.children(cr):
raise OSError(39, 'Directory not empty.')
res = self.context._dirobj.unlink(cr, uid, [directory.id])
else:
raise OSError(1, 'Operation is not permitted.')
return res
def create_child_collection(self, cr, objname):
object2 = False
if not self.check_perms(2):
raise IOError(errno.EPERM,"Permission denied.")
dirobj = self.context._dirobj
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
obj = dirobj.browse(cr, uid, self.dir_id)
if obj and (obj.type == 'ressource') and not object2:
raise OSError(1, 'Operation is not permitted.')
#objname = uri2[-1]
val = {
'name': objname,
'ressource_parent_type_id': obj and obj.ressource_type_id.id or False,
'ressource_id': object2 and object2.id or False,
'parent_id' : obj and obj.id or False
}
return dirobj.create(cr, uid, val)
def create_child(self, cr, path, data=None):
""" API function to create a child file object and node
Return the node_* created
"""
if not self.check_perms(2):
raise IOError(errno.EPERM,"Permission denied.")
dirobj = self.context._dirobj
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
fil_obj=dirobj.pool.get('ir.attachment')
val = {
'name': path,
'datas_fname': path,
'parent_id': self.dir_id,
# Datas are not set here
}
fil_id = fil_obj.create(cr, uid, val, context=ctx)
fil = fil_obj.browse(cr, uid, fil_id, context=ctx)
fnode = node_file(path, self, self.context, fil)
if data is not None:
fnode.set_data(cr, data, fil)
return fnode
def _get_ttag(self, cr):
return 'dir-%d' % self.dir_id
def move_to(self, cr, ndir_node, new_name=False, fil_obj=None, ndir_obj=None, in_write=False):
""" Move directory. This operation is simple, since the present node is
only used for static, simple directories.
Note /may/ be called with ndir_node = None, to rename the document root.
"""
if ndir_node and (ndir_node.context != self.context):
raise NotImplementedError("Cannot move directories between contexts.")
if (not self.check_perms('u')) or (not ndir_node.check_perms('w')):
raise IOError(errno.EPERM,"Permission denied.")
dir_obj = self.context._dirobj
if not fil_obj:
dbro = dir_obj.browse(cr, self.context.uid, self.dir_id, context=self.context.context)
else:
dbro = dir_obj
assert dbro.id == self.dir_id
if not dbro:
raise IndexError("Cannot locate dir %d", self.dir_id)
if (not self.parent) and ndir_node:
if not dbro.parent_id:
raise IOError(errno.EPERM, "Cannot move the root directory!")
self.parent = self.context.get_dir_node(cr, dbro.parent_id)
assert self.parent
if self.parent != ndir_node:
_logger.debug('Cannot move dir %r from %r to %r.', self, self.parent, ndir_node)
raise NotImplementedError('Cannot move dir to another dir.')
ret = {}
if new_name and (new_name != dbro.name):
if ndir_node.child(cr, new_name):
raise IOError(errno.EEXIST, "Destination path already exists.")
ret['name'] = new_name
del dbro
if not in_write:
# We have to update the data ourselves
if ret:
ctx = self.context.context.copy()
ctx['__from_node'] = True
dir_obj.write(cr, self.context.uid, [self.dir_id,], ret, ctx)
ret = True
return ret
class node_res_dir(node_class):
""" A folder containing dynamic folders
A special sibling to node_dir, which does only contain dynamically
created folders foreach resource in the foreign model.
All folders should be of type node_res_obj and merely behave like
node_dirs (with limited domain).
"""
our_type = 'collection'
res_obj_class = None
def __init__(self, path, parent, context, dirr, dctx=None ):
super(node_res_dir,self).__init__(path, parent, context)
self.dir_id = dirr.id
#todo: more info from dirr
self.mimetype = 'application/x-directory'
# 'httpd/unix-directory'
self.create_date = dirr.create_date
# TODO: the write date should be MAX(file.write)..
self.write_date = dirr.write_date or dirr.create_date
self.content_length = 0
try:
self.uuser = (dirr.user_id and dirr.user_id.login) or 'nobody'
except Exception:
self.uuser = 'nobody'
self.ugroup = mkdosname(dirr.company_id and dirr.company_id.name, default='nogroup')
self.uidperms = dirr.get_dir_permissions()
self.unixperms = 040000 | _uid2unixperms(self.uidperms, dirr and dirr.user_id)
self.res_model = dirr.ressource_type_id and dirr.ressource_type_id.model or False
self.resm_id = dirr.ressource_id
self.res_find_all = dirr.resource_find_all
self.namefield = dirr.resource_field.name or 'name'
self.displayname = dirr.name
# Important: the domain is evaluated using the *parent* dctx!
self.domain = dirr.domain
self.ressource_tree = dirr.ressource_tree
# and then, we add our own vars in the dctx:
if dctx:
self.dctx.update(dctx)
# and then, we prepare a dctx dict, for deferred evaluation:
self.dctx_dict = {}
for dfld in dirr.dctx_ids:
self.dctx_dict[dfld.field] = dfld.expr
def __eq__(self, other):
if type(self) != type(other):
return False
if not self.context == other.context:
return False
# Two nodes, for the same document.directory, may have a
# different context! (dynamic folders)
if self.dctx != other.dctx:
return False
return self.dir_id == other.dir_id
def children(self, cr, domain=None):
return self._child_get(cr, domain=domain)
def child(self, cr, name, domain=None):
res = self._child_get(cr, name, domain=domain)
if res:
return res[0]
return None
def _child_get(self, cr, name=None, domain=None):
""" return virtual children of resource, based on the
foreign object.
Note that many objects use NULL for a name, so we should
better call the name_search(),name_get() set of methods
"""
if self.res_model not in self.context._dirobj.pool:
return []
obj = self.context._dirobj.pool[self.res_model]
dirobj = self.context._dirobj
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
ctx.update(self.context.extra_ctx)
where = []
if self.domain:
app = safe_eval(self.domain, ctx)
if not app:
pass
elif isinstance(app, list):
where.extend(app)
elif isinstance(app, tuple):
where.append(app)
else:
raise RuntimeError("Incorrect domain expr: %s." % self.domain)
if self.resm_id:
where.append(('id','=',self.resm_id))
if name:
# The =like character will match underscores against any characters
# including the special ones that couldn't exist in a FTP/DAV request
where.append((self.namefield,'=like',name.replace('\\','\\\\')))
is_allowed = self.check_perms(1)
else:
is_allowed = self.check_perms(5)
if not is_allowed:
raise IOError(errno.EPERM,"Permission denied.")
# print "Where clause for %s" % self.res_model, where
if self.ressource_tree:
object2 = False
if self.resm_id:
object2 = dirobj.pool[self.res_model].browse(cr, uid, self.resm_id) or False
if obj._parent_name in obj.fields_get(cr, uid):
where.append((obj._parent_name,'=',object2 and object2.id or False))
resids = obj.search(cr, uid, where, context=ctx)
res = []
for bo in obj.browse(cr, uid, resids, context=ctx):
if not bo:
continue
res_name = getattr(bo, self.namefield)
if not res_name:
continue
# Yes! we can't do better but skip nameless records.
# Escape the name for characters not supported in filenames
res_name = res_name.replace('/','_') # any other weird char?
if name and (res_name != ustr(name)):
# we have matched _ to any character, but we only meant to match
# the special ones.
# Eg. 'a_c' will find 'abc', 'a/c', 'a_c', may only
# return 'a/c' and 'a_c'
continue
res.append(self.res_obj_class(res_name, self.dir_id, self, self.context, self.res_model, bo))
return res
def _get_ttag(self, cr):
return 'rdir-%d' % self.dir_id
class node_res_obj(node_class):
""" A dynamically created folder.
A special sibling to node_dir, which does only contain dynamically
created folders foreach resource in the foreign model.
All folders should be of type node_res_obj and merely behave like
node_dirs (with limited domain).
"""
our_type = 'collection'
def __init__(self, path, dir_id, parent, context, res_model, res_bo, res_id=None):
super(node_res_obj,self).__init__(path, parent,context)
assert parent
#todo: more info from dirr
self.dir_id = dir_id
self.mimetype = 'application/x-directory'
# 'httpd/unix-directory'
self.create_date = parent.create_date
# TODO: the write date should be MAX(file.write)..
self.write_date = parent.write_date
self.content_length = 0
self.uidperms = parent.uidperms & 15
self.unixperms = 040000 | _uid2unixperms(self.uidperms, True)
self.uuser = parent.uuser
self.ugroup = parent.ugroup
self.res_model = res_model
self.domain = parent.domain
self.displayname = path
self.dctx_dict = parent.dctx_dict
if isinstance(parent, node_res_dir):
self.res_find_all = parent.res_find_all
else:
self.res_find_all = False
if res_bo:
self.res_id = res_bo.id
dc2 = self.context.context.copy()
dc2.update(self.dctx)
dc2['res_model'] = res_model
dc2['res_id'] = res_bo.id
dc2['this'] = res_bo
for fld,expr in self.dctx_dict.items():
try:
self.dctx[fld] = safe_eval(expr, dc2)
except Exception,e:
print "Cannot eval %s for %s." % (expr, fld)
print e
pass
else:
self.res_id = res_id
def __eq__(self, other):
if type(self) != type(other):
return False
if not self.context == other.context:
return False
if not self.res_model == other.res_model:
return False
if not self.res_id == other.res_id:
return False
if self.domain != other.domain:
return False
if self.res_find_all != other.res_find_all:
return False
if self.dctx != other.dctx:
return False
return self.dir_id == other.dir_id
def children(self, cr, domain=None):
return self._child_get(cr, domain=domain) + self._file_get(cr)
def child(self, cr, name, domain=None):
res = self._child_get(cr, name, domain=domain)
if res:
return res[0]
res = self._file_get(cr, name)
if res:
return res[0]
return None
def _file_get(self, cr, nodename=False):
res = []
is_allowed = self.check_perms((nodename and 1) or 5)
if not is_allowed:
raise IOError(errno.EPERM,"Permission denied.")
cntobj = self.context._dirobj.pool.get('document.directory.content')
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
where = [('directory_id','=',self.dir_id) ]
#if self.domain:
# where.extend(self.domain)
# print "res_obj file_get clause", where
ids = cntobj.search(cr, uid, where, context=ctx)
for content in cntobj.browse(cr, uid, ids, context=ctx):
res3 = cntobj._file_get(cr, self, nodename, content, context=ctx)
if res3:
res.extend(res3)
return res
def get_dav_props_DEPR(self, cr):
# Deprecated! (but document_ics must be cleaned, first)
res = {}
cntobj = self.context._dirobj.pool.get('document.directory.content')
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
where = [('directory_id','=',self.dir_id) ]
ids = cntobj.search(cr, uid, where, context=ctx)
for content in cntobj.browse(cr, uid, ids, context=ctx):
if content.extension == '.ics': # FIXME: call the content class!
res['http://groupdav.org/'] = ('resourcetype',)
return res
def get_dav_eprop_DEPR(self, cr, ns, prop):
# Deprecated!
if ns != 'http://groupdav.org/' or prop != 'resourcetype':
_logger.warning("Who asks for %s:%s?" % (ns, prop))
return None
cntobj = self.context._dirobj.pool.get('document.directory.content')
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
where = [('directory_id','=',self.dir_id) ]
ids = cntobj.search(cr,uid,where,context=ctx)
for content in cntobj.browse(cr, uid, ids, context=ctx):
# TODO: remove relic of GroupDAV
if content.extension == '.ics': # FIXME: call the content class!
return ('vevent-collection','http://groupdav.org/')
return None
def _child_get(self, cr, name=None, domain=None):
dirobj = self.context._dirobj
is_allowed = self.check_perms((name and 1) or 5)
if not is_allowed:
raise IOError(errno.EPERM,"Permission denied.")
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
directory = dirobj.browse(cr, uid, self.dir_id)
obj = dirobj.pool[self.res_model]
where = []
res = []
if name:
where.append(('name','=',name))
# Directory Structure display in tree structure
if self.res_id and directory.ressource_tree:
where1 = []
if name:
where1.append(('name','=like',name.replace('\\','\\\\')))
if obj._parent_name in obj.fields_get(cr, uid):
where1.append((obj._parent_name, '=', self.res_id))
namefield = directory.resource_field.name or 'name'
resids = obj.search(cr, uid, where1, context=ctx)
for bo in obj.browse(cr, uid, resids, context=ctx):
if not bo:
continue
res_name = getattr(bo, namefield)
if not res_name:
continue
res_name = res_name.replace('/', '_')
if name and (res_name != ustr(name)):
continue
# TODO Revise
klass = directory.get_node_class(directory, dynamic=True, context=ctx)
rnode = klass(res_name, dir_id=self.dir_id, parent=self, context=self.context,
res_model=self.res_model, res_bo=bo)
rnode.res_find_all = self.res_find_all
res.append(rnode)
where2 = where + [('parent_id','=',self.dir_id) ]
ids = dirobj.search(cr, uid, where2, context=ctx)
bo = obj.browse(cr, uid, self.res_id, context=ctx)
for dirr in dirobj.browse(cr, uid, ids, context=ctx):
if name and (name != dirr.name):
continue
if dirr.type == 'directory':
klass = dirr.get_node_class(dirr, dynamic=True, context=ctx)
res.append(klass(dirr.name, dirr.id, self, self.context, self.res_model, res_bo = bo, res_id = self.res_id))
elif dirr.type == 'ressource':
# child resources can be controlled by properly set dctx
klass = dirr.get_node_class(dirr, context=ctx)
res.append(klass(dirr.name,self,self.context, dirr, {'active_id': self.res_id})) # bo?
fil_obj = dirobj.pool.get('ir.attachment')
if self.res_find_all:
where2 = where
where3 = where2 + [('res_model', '=', self.res_model), ('res_id','=',self.res_id)]
# print "where clause for dir_obj", where3
ids = fil_obj.search(cr, uid, where3, context=ctx)
if ids:
for fil in fil_obj.browse(cr, uid, ids, context=ctx):
klass = self.context.node_file_class
res.append(klass(fil.name, self, self.context, fil))
# Get Child Ressource Directories
if directory.ressource_type_id and directory.ressource_type_id.id:
where4 = where + [('ressource_parent_type_id','=',directory.ressource_type_id.id)]
where5 = where4 + ['|', ('ressource_id','=',0), ('ressource_id','=',self.res_id)]
dirids = dirobj.search(cr,uid, where5)
for dirr in dirobj.browse(cr, uid, dirids, context=ctx):
if dirr.type == 'directory' and not dirr.parent_id:
klass = dirr.get_node_class(dirr, dynamic=True, context=ctx)
rnode = klass(dirr.name, dirr.id, self, self.context, self.res_model, res_bo = bo, res_id = self.res_id)
rnode.res_find_all = dirr.resource_find_all
res.append(rnode)
if dirr.type == 'ressource':
klass = dirr.get_node_class(dirr, context=ctx)
rnode = klass(dirr.name, self, self.context, dirr, {'active_id': self.res_id})
rnode.res_find_all = dirr.resource_find_all
res.append(rnode)
return res
def create_child_collection(self, cr, objname):
dirobj = self.context._dirobj
is_allowed = self.check_perms(2)
if not is_allowed:
raise IOError(errno.EPERM,"Permission denied.")
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
res_obj = dirobj.pool[self.res_model]
object2 = res_obj.browse(cr, uid, self.res_id) or False
obj = dirobj.browse(cr, uid, self.dir_id)
if obj and (obj.type == 'ressource') and not object2:
raise OSError(1, 'Operation is not permitted.')
val = {
'name': objname,
'ressource_parent_type_id': obj and obj.ressource_type_id.id or False,
'ressource_id': object2 and object2.id or False,
'parent_id' : False,
'resource_find_all': False,
}
if (obj and (obj.type in ('directory'))) or not object2:
val['parent_id'] = obj and obj.id or False
return dirobj.create(cr, uid, val)
def create_child(self, cr, path, data=None):
""" API function to create a child file object and node
Return the node_* created
"""
is_allowed = self.check_perms(2)
if not is_allowed:
raise IOError(errno.EPERM,"Permission denied.")
dirobj = self.context._dirobj
uid = self.context.uid
ctx = self.context.context.copy()
ctx.update(self.dctx)
fil_obj=dirobj.pool.get('ir.attachment')
val = {
'name': path,
'datas_fname': path,
'res_model': self.res_model,
'res_id': self.res_id,
# Datas are not set here
}
if not self.res_find_all:
val['parent_id'] = self.dir_id
fil_id = fil_obj.create(cr, uid, val, context=ctx)
fil = fil_obj.browse(cr, uid, fil_id, context=ctx)
klass = self.context.node_file_class
fnode = klass(path, self, self.context, fil)
if data is not None:
fnode.set_data(cr, data, fil)
return fnode
def _get_ttag(self, cr):
return 'rodir-%d-%d' % (self.dir_id, self.res_id)
node_res_dir.res_obj_class = node_res_obj
class node_file(node_class):
our_type = 'file'
def __init__(self, path, parent, context, fil):
super(node_file,self).__init__(path, parent,context)
self.file_id = fil.id
#todo: more info from ir_attachment
if fil.file_type and '/' in fil.file_type:
self.mimetype = str(fil.file_type)
self.create_date = fil.create_date
self.write_date = fil.write_date or fil.create_date
self.content_length = fil.file_size
self.displayname = fil.name
self.uidperms = 14
if parent:
if not parent.check_perms('x'):
self.uidperms = 0
elif not parent.check_perms('w'):
self.uidperms = 4
try:
self.uuser = (fil.user_id and fil.user_id.login) or 'nobody'
except Exception:
self.uuser = 'nobody'
self.ugroup = mkdosname(fil.company_id and fil.company_id.name, default='nogroup')
def __eq__(self, other):
if type(self) != type(other):
return False
if not self.context == other.context:
return False
if self.dctx != other.dctx:
return False
return self.file_id == other.file_id
def open_data(self, cr, mode):
if not self.check_perms(4):
raise IOError(errno.EPERM, "Permission denied.")
stobj = self.context._dirobj.pool.get('document.storage')
return stobj.get_file(cr, self.context.uid, None, self, mode=mode, context=self.context.context)
def rm(self, cr):
uid = self.context.uid
if not self.check_perms(8):
raise IOError(errno.EPERM, "Permission denied.")
document_obj = self.context._dirobj.pool.get('ir.attachment')
if self.type in ('collection','database'):
return False
document = document_obj.browse(cr, uid, self.file_id, context=self.context.context)
res = False
if document and document._name == 'ir.attachment':
res = document_obj.unlink(cr, uid, [document.id])
return res
def fix_ppath(self, cr, fbro):
"""Sometimes we may init this w/o path, parent.
This function fills the missing path from the file browse object
Note: this may be an expensive operation, do on demand. However,
once caching is in, we might want to do that at init time and keep
this object anyway
"""
if self.path or self.parent:
return
assert fbro
uid = self.context.uid
dirpath = []
if fbro.parent_id:
dirobj = self.context._dirobj.pool.get('document.directory')
dirpath = dirobj.get_full_path(cr, uid, fbro.parent_id.id, context=self.context.context)
if fbro.datas_fname:
dirpath.append(fbro.datas_fname)
else:
dirpath.append(fbro.name)
if len(dirpath)>1:
self.path = dirpath
else:
self.path = dirpath[0]
def get_data(self, cr, fil_obj=None):
""" Retrieve the data for some file.
fil_obj may optionally be specified, and should be a browse object
for the file. This is useful when the caller has already initiated
the browse object. """
if not self.check_perms(4):
raise IOError(errno.EPERM, "Permission denied.")
stobj = self.context._dirobj.pool.get('document.storage')
return stobj.get_data(cr, self.context.uid, None, self,self.context.context, fil_obj)
def get_data_len(self, cr, fil_obj=None):
bin_size = self.context.context.get('bin_size', False)
if bin_size and not self.content_length:
self.content_length = fil_obj.db_datas
return self.content_length
def set_data(self, cr, data, fil_obj=None):
""" Store data at some file.
fil_obj may optionally be specified, and should be a browse object
for the file. This is useful when the caller has already initiated
the browse object. """
if not self.check_perms(2):
raise IOError(errno.EPERM, "Permission denied.")
stobj = self.context._dirobj.pool.get('document.storage')
return stobj.set_data(cr, self.context.uid, None, self, data, self.context.context, fil_obj)
def _get_ttag(self, cr):
return 'file-%d' % self.file_id
def move_to(self, cr, ndir_node, new_name=False, fil_obj=None, ndir_obj=None, in_write=False):
if ndir_node and ndir_node.context != self.context:
raise NotImplementedError("Cannot move files between contexts.")
if (not self.check_perms(8)) and ndir_node.check_perms(2):
raise IOError(errno.EPERM, "Permission denied.")
doc_obj = self.context._dirobj.pool.get('ir.attachment')
if not fil_obj:
dbro = doc_obj.browse(cr, self.context.uid, self.file_id, context=self.context.context)
else:
dbro = fil_obj
assert dbro.id == self.file_id, "%s != %s for %r." % (dbro.id, self.file_id, self)
if not dbro:
raise IndexError("Cannot locate doc %d.", self.file_id)
if (not self.parent):
# there *must* be a parent node for this one
self.parent = self.context.get_dir_node(cr, dbro.parent_id)
assert self.parent
ret = {}
if ndir_node and self.parent != ndir_node:
if not (isinstance(self.parent, node_dir) and isinstance(ndir_node, node_dir)):
_logger.debug('Cannot move file %r from %r to %r.', self, self.parent, ndir_node)
raise NotImplementedError('Cannot move files between dynamic folders.')
if not ndir_obj:
ndir_obj = self.context._dirobj.browse(cr, self.context.uid, \
ndir_node.dir_id, context=self.context.context)
assert ndir_obj.id == ndir_node.dir_id
r2 = { 'parent_id': ndir_obj.id }
ret.update(r2)
if new_name and (new_name != dbro.name):
if len(ret):
raise NotImplementedError("Cannot rename and move.") # TODO
r2 = { 'name': new_name, 'datas_fname': new_name }
ret.update(r2)
del dbro
if not in_write:
# We have to update the data ourselves
if ret:
ctx = self.context.context.copy()
ctx['__from_node'] = True
doc_obj.write(cr, self.context.uid, [self.file_id,], ret, ctx )
ret = True
return ret
class node_content(node_class):
our_type = 'content'
def __init__(self, path, parent, context, cnt, dctx=None, act_id=None):
super(node_content,self).__init__(path, parent,context)
self.cnt_id = cnt.id
self.create_date = False
self.write_date = False
self.content_length = False
self.unixperms = 0640
if parent:
self.uidperms = parent.uidperms & 14
self.uuser = parent.uuser
self.ugroup = parent.ugroup
self.extension = cnt.extension
self.report_id = cnt.report_id and cnt.report_id.id
#self.mimetype = cnt.extension.
self.displayname = path
if dctx:
self.dctx.update(dctx)
self.act_id = act_id
def fill_fields(self, cr, dctx=None):
""" Try to read the object and fill missing fields, like mimetype,
dates etc.
This function must be different from the constructor, because
it uses the db cursor.
"""
cr.execute('SELECT DISTINCT mimetype FROM document_directory_content_type WHERE active AND code = %s;',
(self.extension,))
res = cr.fetchall()
if res and res[0][0]:
self.mimetype = str(res[0][0])
def get_data(self, cr, fil_obj=None):
cntobj = self.context._dirobj.pool.get('document.directory.content')
if not self.check_perms(4):
raise IOError(errno.EPERM, "Permission denied.")
ctx = self.context.context.copy()
ctx.update(self.dctx)
data = cntobj.process_read(cr, self.context.uid, self, ctx)
if data:
self.content_length = len(data)
return data
def open_data(self, cr, mode):
if mode.endswith('b'):
mode = mode[:-1]
if mode in ('r', 'w'):
cperms = mode[:1]
elif mode in ('r+', 'w+'):
cperms = 'rw'
else:
raise IOError(errno.EINVAL, "Cannot open at mode %s." % mode)
if not self.check_perms(cperms):
raise IOError(errno.EPERM, "Permission denied.")
ctx = self.context.context.copy()
ctx.update(self.dctx)
return nodefd_content(self, cr, mode, ctx)
def get_data_len(self, cr, fil_obj=None):
# FIXME : here, we actually generate the content twice!!
# we should have cached the generated content, but it is
# not advisable to do keep it in memory, until we have a cache
# expiration logic.
if not self.content_length:
self.get_data(cr,fil_obj)
return self.content_length
def set_data(self, cr, data, fil_obj=None):
cntobj = self.context._dirobj.pool.get('document.directory.content')
if not self.check_perms(2):
raise IOError(errno.EPERM, "Permission denied.")
ctx = self.context.context.copy()
ctx.update(self.dctx)
return cntobj.process_write(cr, self.context.uid, self, data, ctx)
def _get_ttag(self, cr):
return 'cnt-%d%s' % (self.cnt_id,(self.act_id and ('-' + str(self.act_id))) or '')
def get_dav_resourcetype(self, cr):
return ''
class node_descriptor(object):
"""A file-like interface to the data contents of a node.
This class is NOT a node, but an /open descriptor/ for some
node. It can hold references to a cursor or a file object,
because the life of a node_descriptor will be the open period
of the data.
It should also take care of locking, with any native mechanism
or using the db.
For the implementation, it would be OK just to wrap around file,
StringIO or similar class. The node_descriptor is only needed to
provide the link to the parent /node/ object.
"""
def __init__(self, parent):
assert isinstance(parent, node_class)
self.name = parent.displayname
self.__parent = parent
def _get_parent(self):
return self.__parent
def open(self, **kwargs):
raise NotImplementedError
def close(self):
raise NotImplementedError
def read(self, size=None):
raise NotImplementedError
def seek(self, offset, whence=None):
raise NotImplementedError
def tell(self):
raise NotImplementedError
def write(self, str):
raise NotImplementedError
def size(self):
raise NotImplementedError
def __len__(self):
return self.size()
def __nonzero__(self):
""" Ensure that a node_descriptor will never equal False
Since we do define __len__ and __iter__ for us, we must avoid
being regarded as non-true objects.
"""
return True
def next(self, str):
raise NotImplementedError
class nodefd_content(StringIO, node_descriptor):
""" A descriptor to content nodes
"""
def __init__(self, parent, cr, mode, ctx):
node_descriptor.__init__(self, parent)
self._context=ctx
self._size = 0L
if mode in ('r', 'r+'):
cntobj = parent.context._dirobj.pool.get('document.directory.content')
data = cntobj.process_read(cr, parent.context.uid, parent, ctx)
if data:
self._size = len(data)
parent.content_length = len(data)
StringIO.__init__(self, data)
elif mode in ('w', 'w+'):
StringIO.__init__(self, None)
# at write, we start at 0 (= overwrite), but have the original
# data available, in case of a seek()
elif mode == 'a':
StringIO.__init__(self, None)
else:
_logger.error("Incorrect mode %s is specified.", mode)
raise IOError(errno.EINVAL, "Invalid file mode.")
self.mode = mode
def size(self):
return self._size
def close(self):
# we now open a *separate* cursor, to update the data.
# FIXME: this may be improved, for concurrency handling
if self.mode == 'r':
StringIO.close(self)
return
par = self._get_parent()
uid = par.context.uid
cr = openerp.registry(par.context.dbname).cursor()
try:
if self.mode in ('w', 'w+', 'r+'):
data = self.getvalue()
cntobj = par.context._dirobj.pool.get('document.directory.content')
cntobj.process_write(cr, uid, par, data, par.context.context)
elif self.mode == 'a':
raise NotImplementedError
cr.commit()
except Exception:
_logger.exception('Cannot update db content #%d for close.', par.cnt_id)
raise
finally:
cr.close()
StringIO.close(self)
class nodefd_static(StringIO, node_descriptor):
""" A descriptor to nodes with static data.
"""
def __init__(self, parent, cr, mode, ctx=None):
node_descriptor.__init__(self, parent)
self._context=ctx
self._size = 0L
if mode in ('r', 'r+'):
data = parent.get_data(cr)
if data:
self._size = len(data)
parent.content_length = len(data)
StringIO.__init__(self, data)
elif mode in ('w', 'w+'):
StringIO.__init__(self, None)
# at write, we start at 0 (= overwrite), but have the original
# data available, in case of a seek()
elif mode == 'a':
StringIO.__init__(self, None)
else:
_logger.error("Incorrect mode %s is specified.", mode)
raise IOError(errno.EINVAL, "Invalid file mode.")
self.mode = mode
def size(self):
return self._size
def close(self):
# we now open a *separate* cursor, to update the data.
# FIXME: this may be improved, for concurrency handling
if self.mode == 'r':
StringIO.close(self)
return
par = self._get_parent()
# uid = par.context.uid
cr = openerp.registry(par.context.dbname).cursor()
try:
if self.mode in ('w', 'w+', 'r+'):
data = self.getvalue()
par.set_data(cr, data)
elif self.mode == 'a':
raise NotImplementedError
cr.commit()
except Exception:
_logger.exception('Cannot update db content #%d for close.', par.cnt_id)
raise
finally:
cr.close()
StringIO.close(self)
class nodefd_db(StringIO, node_descriptor):
""" A descriptor to db data
"""
def __init__(self, parent, ira_browse, mode):
node_descriptor.__init__(self, parent)
self._size = 0L
if mode.endswith('b'):
mode = mode[:-1]
if mode in ('r', 'r+'):
data = ira_browse.datas
if data:
data = data.decode('base64')
self._size = len(data)
StringIO.__init__(self, data)
elif mode in ('w', 'w+'):
StringIO.__init__(self, None)
# at write, we start at 0 (= overwrite), but have the original
# data available, in case of a seek()
elif mode == 'a':
StringIO.__init__(self, None)
else:
_logger.error("Incorrect mode %s is specified.", mode)
raise IOError(errno.EINVAL, "Invalid file mode.")
self.mode = mode
def size(self):
return self._size
def close(self):
# we now open a *separate* cursor, to update the data.
# FIXME: this may be improved, for concurrency handling
par = self._get_parent()
# uid = par.context.uid
registry = openerp.modules.registry.RegistryManager.get(par.context.dbname)
with registry.cursor() as cr:
data = self.getvalue().encode('base64')
if self.mode in ('w', 'w+', 'r+'):
registry.get('ir.attachment').write(cr, 1, par.file_id, {'datas': data})
cr.commit()
StringIO.close(self)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
dhorelik/django-cms
|
cms/management/commands/subcommands/moderator.py
|
66
|
1954
|
# -*- coding: utf-8 -*-
from logging import getLogger
from cms.management.commands.subcommands.base import SubcommandsCommand
from cms.models import CMSPlugin, Title
from cms.models.pagemodel import Page
from django.core.management.base import NoArgsCommand
log = getLogger('cms.management.moderator')
class ModeratorOnCommand(NoArgsCommand):
help = 'Turn moderation on, run AFTER upgrading to 2.4'
def handle_noargs(self, **options):
"""
Ensure that the public pages look the same as their draft versions.
This is done by checking the content of the public pages, and reverting
the draft version to look the same.
The second stage is to go through the draft pages and publish the ones
marked as published.
The end result should be that the public pages and their draft versions
have the same plugins listed. If both versions exist and have content,
the public page has precedence. Otherwise, the draft version is used.
"""
log.info('Reverting drafts to public versions')
for page in Page.objects.public():
for language in page.get_languages():
if CMSPlugin.objects.filter(placeholder__page=page, language=language).exists():
log.debug('Reverting page pk=%d' % (page.pk,))
page.publisher_draft.revert(language)
log.info('Publishing all published drafts')
for title in Title.objects.filter(publisher_is_draft=True, publisher_public_id__gt=0):
try:
title.page.publish(title.language)
log.debug('Published page pk=%d in %s' % (page.pk, title.language))
except Exception:
log.exception('Error publishing page pk=%d in %s' % (page.pk, title.language))
class ModeratorCommand(SubcommandsCommand):
help = 'Moderator utilities'
subcommands = {
'on': ModeratorOnCommand,
}
|
bsd-3-clause
|
sio2project/oioioi
|
oioioi/problems/management/commands/showbrokensolutions.py
|
1
|
1774
|
from django.core.management.base import BaseCommand
from django.db.models import F
from django.utils.translation import ugettext as _
from oioioi.problems.models import Problem
from oioioi.programs.models import ModelProgramSubmission
class Command(BaseCommand):
help = str(
_(
"Prints problems without 100-scored model solution. If "
"username is provided it shows only problems added by that "
"user."
)
)
def add_arguments(self, parser):
parser.add_argument(
'--user',
metavar='USERNAME',
help='Optional username for filtering problems.',
)
def handle(self, *args, **options):
username = options.get('user')
problems = self.get_problems_without_correct_modelsolution(username)
self.stdout.write('Problems: ' + str(len(problems)) + '\n')
for problem in problems:
message = u'- {name} / {short_name} ; id = {id}\n'.format(
name=problem.name, short_name=problem.short_name, id=str(problem.pk)
)
self.stdout.write(message)
def get_problems_without_correct_modelsolution(self, username=None):
if username is not None:
problems = Problem.objects.filter(author__username=username)
else:
problems = Problem.objects.all()
bad_problems = []
for problem in problems:
correct_model_submissions = ModelProgramSubmission.objects.filter(
score=F('submissionreport__scorereport__max_score'),
model_solution__problem=problem,
).order_by('id')
if not correct_model_submissions:
bad_problems.append(problem)
return bad_problems
|
gpl-3.0
|
SnappleCap/oh-mainline
|
vendor/packages/scrapy/scrapyd/website.py
|
16
|
3773
|
from datetime import datetime
from twisted.web import resource, static
from twisted.application.service import IServiceCollection
from .interfaces import IPoller, IEggStorage, ISpiderScheduler
from . import webservice
class Root(resource.Resource):
def __init__(self, config, app):
resource.Resource.__init__(self)
self.debug = config.getboolean('debug', False)
self.runner = config.get('runner')
logsdir = config.get('logs_dir')
self.app = app
self.putChild('', Home(self))
self.putChild('schedule.json', webservice.Schedule(self))
self.putChild('addversion.json', webservice.AddVersion(self))
self.putChild('listprojects.json', webservice.ListProjects(self))
self.putChild('listversions.json', webservice.ListVersions(self))
self.putChild('listspiders.json', webservice.ListSpiders(self))
self.putChild('delproject.json', webservice.DeleteProject(self))
self.putChild('delversion.json', webservice.DeleteVersion(self))
self.putChild('listjobs.json', webservice.ListJobs(self))
self.putChild('logs', static.File(logsdir, 'text/plain'))
self.putChild('procmon', ProcessMonitor(self))
self.update_projects()
def update_projects(self):
self.poller.update_projects()
self.scheduler.update_projects()
@property
def launcher(self):
app = IServiceCollection(self.app, self.app)
return app.getServiceNamed('launcher')
@property
def scheduler(self):
return self.app.getComponent(ISpiderScheduler)
@property
def eggstorage(self):
return self.app.getComponent(IEggStorage)
@property
def poller(self):
return self.app.getComponent(IPoller)
class Home(resource.Resource):
def __init__(self, root):
resource.Resource.__init__(self)
self.root = root
def render_GET(self, txrequest):
vars = {
'projects': ', '.join(self.root.scheduler.list_projects()),
}
return """
<html>
<head><title>Scrapyd</title></head>
<body>
<h1>Scrapyd</h1>
<p>Available projects: <b>%(projects)s</b></p>
<ul>
<li><a href="/procmon">Process monitor</a></li>
<li><a href="/logs/">Logs</li>
<li><a href="http://doc.scrapy.org/en/latest/topics/scrapyd.html">Documentation</a></li>
</ul>
<h2>How to schedule a spider?</h2>
<p>To schedule a spider you need to use the API (this web UI is only for
monitoring)</p>
<p>Example using <a href="http://curl.haxx.se/">curl</a>:</p>
<p><code>curl http://localhost:6800/schedule.json -d project=default -d spider=somespider</code></p>
<p>For more information about the API, see the <a href="http://doc.scrapy.org/topics/scrapyd.html">Scrapyd documentation</a></p>
</body>
</html>
""" % vars
class ProcessMonitor(resource.Resource):
def __init__(self, root):
resource.Resource.__init__(self)
self.root = root
def render(self, txrequest):
s = "<html><head><title>Scrapyd</title></title>"
s += "<body>"
s += "<h1>Process monitor</h1>"
s += "<p><a href='..'>Go back</a></p>"
s += "<table border='1'>"
s += "<tr>"
s += "<th>Project</th><th>Spider</th><th>Job</th><th>PID</th><th>Runtime</th><th>Log</th>"
s += "</tr>"
for p in self.root.launcher.processes.values():
s += "<tr>"
for a in ['project', 'spider', 'job', 'pid']:
s += "<td>%s</td>" % getattr(p, a)
s += "<td>%s</td>" % (datetime.now() - p.start_time)
s += "<td><a href='/logs/%s/%s/%s.log'>Log</a></td>" % (p.project, p.spider, p.job)
s += "</tr>"
s += "</table>"
s += "</body>"
s += "</html>"
return s
|
agpl-3.0
|
ljhljh235/AutoRest
|
src/generator/AutoRest.Python.Azure.Tests/Expected/AcceptanceTests/CustomBaseUri/setup.py
|
14
|
1138
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# coding: utf-8
from setuptools import setup, find_packages
NAME = "autorestparameterizedhosttestclient"
VERSION = "1.0.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = ["msrestazure>=0.4.7"]
setup(
name=NAME,
version=VERSION,
description="AutoRestParameterizedHostTestClient",
author_email="",
url="",
keywords=["Swagger", "AutoRestParameterizedHostTestClient"],
install_requires=REQUIRES,
packages=find_packages(),
include_package_data=True,
long_description="""\
Test Infrastructure for AutoRest
"""
)
|
mit
|
pjv/easyengine
|
tests/cli/2_test_stack_services_start.py
|
9
|
1582
|
from ee.utils import test
from ee.cli.main import get_test_app
class CliTestCaseStack(test.EETestCase):
def test_ee_cli(self):
self.app.setup()
self.app.run()
self.app.close()
def test_ee_cli_stack_services_start_nginx(self):
self.app = get_test_app(argv=['stack', 'start', '--nginx'])
self.app.setup()
self.app.run()
self.app.close()
def test_ee_cli_stack_services_start_php5_fpm(self):
self.app = get_test_app(argv=['stack', 'start', '--php'])
self.app.setup()
self.app.run()
self.app.close()
def test_ee_cli_stack_services_start_mysql(self):
self.app = get_test_app(argv=['stack', 'start', '--mysql'])
self.app.setup()
self.app.run()
self.app.close()
def test_ee_cli_stack_services_start_postfix(self):
self.app = get_test_app(argv=['stack', 'start', '--postfix'])
self.app.setup()
self.app.run()
self.app.close()
def test_ee_cli_stack_services_start_memcached(self):
self.app = get_test_app(argv=['stack', 'start', '--memcache'])
self.app.setup()
self.app.run()
self.app.close()
def test_ee_cli_stack_services_start_dovecot(self):
self.app = get_test_app(argv=['stack', 'start', '--dovecot'])
self.app.setup()
self.app.run()
self.app.close()
def test_ee_cli_stack_services_start_all(self):
self.app = get_test_app(argv=['stack', 'start'])
self.app.setup()
self.app.run()
self.app.close()
|
mit
|
justinlulejian/fah-gae
|
lib/google/protobuf/internal/more_messages_pb2.py
|
43
|
4177
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/protobuf/internal/more_messages.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/protobuf/internal/more_messages.proto',
package='google.protobuf.internal',
syntax='proto2',
serialized_pb=_b('\n,google/protobuf/internal/more_messages.proto\x12\x18google.protobuf.internal\"h\n\x10OutOfOrderFields\x12\x17\n\x0foptional_sint32\x18\x05 \x01(\x11\x12\x17\n\x0foptional_uint32\x18\x03 \x01(\r\x12\x16\n\x0eoptional_int32\x18\x01 \x01(\x05*\x04\x08\x04\x10\x05*\x04\x08\x02\x10\x03:C\n\x0foptional_uint64\x12*.google.protobuf.internal.OutOfOrderFields\x18\x04 \x01(\x04:B\n\x0eoptional_int64\x12*.google.protobuf.internal.OutOfOrderFields\x18\x02 \x01(\x03')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
OPTIONAL_UINT64_FIELD_NUMBER = 4
optional_uint64 = _descriptor.FieldDescriptor(
name='optional_uint64', full_name='google.protobuf.internal.optional_uint64', index=0,
number=4, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None)
OPTIONAL_INT64_FIELD_NUMBER = 2
optional_int64 = _descriptor.FieldDescriptor(
name='optional_int64', full_name='google.protobuf.internal.optional_int64', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None)
_OUTOFORDERFIELDS = _descriptor.Descriptor(
name='OutOfOrderFields',
full_name='google.protobuf.internal.OutOfOrderFields',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='optional_sint32', full_name='google.protobuf.internal.OutOfOrderFields.optional_sint32', index=0,
number=5, type=17, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='optional_uint32', full_name='google.protobuf.internal.OutOfOrderFields.optional_uint32', index=1,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='optional_int32', full_name='google.protobuf.internal.OutOfOrderFields.optional_int32', index=2,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=True,
syntax='proto2',
extension_ranges=[(4, 5), (2, 3), ],
oneofs=[
],
serialized_start=74,
serialized_end=178,
)
DESCRIPTOR.message_types_by_name['OutOfOrderFields'] = _OUTOFORDERFIELDS
DESCRIPTOR.extensions_by_name['optional_uint64'] = optional_uint64
DESCRIPTOR.extensions_by_name['optional_int64'] = optional_int64
OutOfOrderFields = _reflection.GeneratedProtocolMessageType('OutOfOrderFields', (_message.Message,), dict(
DESCRIPTOR = _OUTOFORDERFIELDS,
__module__ = 'google.protobuf.internal.more_messages_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.internal.OutOfOrderFields)
))
_sym_db.RegisterMessage(OutOfOrderFields)
OutOfOrderFields.RegisterExtension(optional_uint64)
OutOfOrderFields.RegisterExtension(optional_int64)
# @@protoc_insertion_point(module_scope)
|
mit
|
pombredanne/django-bulbs
|
bulbs/campaigns/migrations/0001_initial.py
|
2
|
1592
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import djbetty.fields
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Campaign',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sponsor_name', models.CharField(max_length=255)),
('sponsor_logo', djbetty.fields.ImageField(default=None, null=True, blank=True)),
('sponsor_url', models.URLField(null=True, blank=True)),
('start_date', models.DateTimeField(null=True, blank=True)),
('end_date', models.DateTimeField(null=True, blank=True)),
('campaign_label', models.CharField(max_length=255)),
('impression_goal', models.IntegerField(null=True, blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='CampaignPixel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('url', models.URLField()),
('campaign_type', models.IntegerField(default=0, choices=[(0, b'Logo')])),
('campaign', models.ForeignKey(related_name='pixels', to='campaigns.Campaign')),
],
options={
},
bases=(models.Model,),
),
]
|
mit
|
bspink/django
|
django/contrib/gis/db/backends/oracle/models.py
|
475
|
2275
|
"""
The GeometryColumns and SpatialRefSys models for the Oracle spatial
backend.
It should be noted that Oracle Spatial does not have database tables
named according to the OGC standard, so the closest analogs are used.
For example, the `USER_SDO_GEOM_METADATA` is used for the GeometryColumns
model and the `SDO_COORD_REF_SYS` is used for the SpatialRefSys model.
"""
from django.contrib.gis.db import models
from django.contrib.gis.db.backends.base.models import SpatialRefSysMixin
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class OracleGeometryColumns(models.Model):
"Maps to the Oracle USER_SDO_GEOM_METADATA table."
table_name = models.CharField(max_length=32)
column_name = models.CharField(max_length=1024)
srid = models.IntegerField(primary_key=True)
# TODO: Add support for `diminfo` column (type MDSYS.SDO_DIM_ARRAY).
class Meta:
app_label = 'gis'
db_table = 'USER_SDO_GEOM_METADATA'
managed = False
@classmethod
def table_name_col(cls):
"""
Returns the name of the metadata column used to store the feature table
name.
"""
return 'table_name'
@classmethod
def geom_col_name(cls):
"""
Returns the name of the metadata column used to store the feature
geometry column.
"""
return 'column_name'
def __str__(self):
return '%s - %s (SRID: %s)' % (self.table_name, self.column_name, self.srid)
class OracleSpatialRefSys(models.Model, SpatialRefSysMixin):
"Maps to the Oracle MDSYS.CS_SRS table."
cs_name = models.CharField(max_length=68)
srid = models.IntegerField(primary_key=True)
auth_srid = models.IntegerField()
auth_name = models.CharField(max_length=256)
wktext = models.CharField(max_length=2046)
# Optional geometry representing the bounds of this coordinate
# system. By default, all are NULL in the table.
cs_bounds = models.PolygonField(null=True)
objects = models.GeoManager()
class Meta:
app_label = 'gis'
db_table = 'CS_SRS'
managed = False
@property
def wkt(self):
return self.wktext
@classmethod
def wkt_col(cls):
return 'wktext'
|
bsd-3-clause
|
adrienbrault/home-assistant
|
tests/components/kodi/__init__.py
|
8
|
1248
|
"""Tests for the Kodi integration."""
from unittest.mock import patch
from homeassistant.components.kodi.const import CONF_WS_PORT, DOMAIN
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_SSL,
CONF_USERNAME,
)
from .util import MockConnection
from tests.common import MockConfigEntry
async def init_integration(hass) -> MockConfigEntry:
"""Set up the Kodi integration in Home Assistant."""
entry_data = {
CONF_NAME: "name",
CONF_HOST: "1.1.1.1",
CONF_PORT: 8080,
CONF_WS_PORT: 9090,
CONF_USERNAME: "user",
CONF_PASSWORD: "pass",
CONF_SSL: False,
}
entry = MockConfigEntry(domain=DOMAIN, data=entry_data, title="name")
entry.add_to_hass(hass)
with patch("homeassistant.components.kodi.Kodi.ping", return_value=True), patch(
"homeassistant.components.kodi.Kodi.get_application_properties",
return_value={"version": {"major": 1, "minor": 1}},
), patch(
"homeassistant.components.kodi.get_kodi_connection",
return_value=MockConnection(),
):
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
return entry
|
mit
|
Eseoghene/bite-project
|
deps/gdata-python-client/src/gdata/tlslite/integration/ClientHelper.py
|
285
|
7021
|
"""
A helper class for using TLS Lite with stdlib clients
(httplib, xmlrpclib, imaplib, poplib).
"""
from gdata.tlslite.Checker import Checker
class ClientHelper:
"""This is a helper class used to integrate TLS Lite with various
TLS clients (e.g. poplib, smtplib, httplib, etc.)"""
def __init__(self,
username=None, password=None, sharedKey=None,
certChain=None, privateKey=None,
cryptoID=None, protocol=None,
x509Fingerprint=None,
x509TrustList=None, x509CommonName=None,
settings = None):
"""
For client authentication, use one of these argument
combinations:
- username, password (SRP)
- username, sharedKey (shared-key)
- certChain, privateKey (certificate)
For server authentication, you can either rely on the
implicit mutual authentication performed by SRP or
shared-keys, or you can do certificate-based server
authentication with one of these argument combinations:
- cryptoID[, protocol] (requires cryptoIDlib)
- x509Fingerprint
- x509TrustList[, x509CommonName] (requires cryptlib_py)
Certificate-based server authentication is compatible with
SRP or certificate-based client authentication. It is
not compatible with shared-keys.
The constructor does not perform the TLS handshake itself, but
simply stores these arguments for later. The handshake is
performed only when this class needs to connect with the
server. Then you should be prepared to handle TLS-specific
exceptions. See the client handshake functions in
L{tlslite.TLSConnection.TLSConnection} for details on which
exceptions might be raised.
@type username: str
@param username: SRP or shared-key username. Requires the
'password' or 'sharedKey' argument.
@type password: str
@param password: SRP password for mutual authentication.
Requires the 'username' argument.
@type sharedKey: str
@param sharedKey: Shared key for mutual authentication.
Requires the 'username' argument.
@type certChain: L{tlslite.X509CertChain.X509CertChain} or
L{cryptoIDlib.CertChain.CertChain}
@param certChain: Certificate chain for client authentication.
Requires the 'privateKey' argument. Excludes the SRP or
shared-key related arguments.
@type privateKey: L{tlslite.utils.RSAKey.RSAKey}
@param privateKey: Private key for client authentication.
Requires the 'certChain' argument. Excludes the SRP or
shared-key related arguments.
@type cryptoID: str
@param cryptoID: cryptoID for server authentication. Mutually
exclusive with the 'x509...' arguments.
@type protocol: str
@param protocol: cryptoID protocol URI for server
authentication. Requires the 'cryptoID' argument.
@type x509Fingerprint: str
@param x509Fingerprint: Hex-encoded X.509 fingerprint for
server authentication. Mutually exclusive with the 'cryptoID'
and 'x509TrustList' arguments.
@type x509TrustList: list of L{tlslite.X509.X509}
@param x509TrustList: A list of trusted root certificates. The
other party must present a certificate chain which extends to
one of these root certificates. The cryptlib_py module must be
installed to use this parameter. Mutually exclusive with the
'cryptoID' and 'x509Fingerprint' arguments.
@type x509CommonName: str
@param x509CommonName: The end-entity certificate's 'CN' field
must match this value. For a web server, this is typically a
server name such as 'www.amazon.com'. Mutually exclusive with
the 'cryptoID' and 'x509Fingerprint' arguments. Requires the
'x509TrustList' argument.
@type settings: L{tlslite.HandshakeSettings.HandshakeSettings}
@param settings: Various settings which can be used to control
the ciphersuites, certificate types, and SSL/TLS versions
offered by the client.
"""
self.username = None
self.password = None
self.sharedKey = None
self.certChain = None
self.privateKey = None
self.checker = None
#SRP Authentication
if username and password and not \
(sharedKey or certChain or privateKey):
self.username = username
self.password = password
#Shared Key Authentication
elif username and sharedKey and not \
(password or certChain or privateKey):
self.username = username
self.sharedKey = sharedKey
#Certificate Chain Authentication
elif certChain and privateKey and not \
(username or password or sharedKey):
self.certChain = certChain
self.privateKey = privateKey
#No Authentication
elif not password and not username and not \
sharedKey and not certChain and not privateKey:
pass
else:
raise ValueError("Bad parameters")
#Authenticate the server based on its cryptoID or fingerprint
if sharedKey and (cryptoID or protocol or x509Fingerprint):
raise ValueError("Can't use shared keys with other forms of"\
"authentication")
self.checker = Checker(cryptoID, protocol, x509Fingerprint,
x509TrustList, x509CommonName)
self.settings = settings
self.tlsSession = None
def _handshake(self, tlsConnection):
if self.username and self.password:
tlsConnection.handshakeClientSRP(username=self.username,
password=self.password,
checker=self.checker,
settings=self.settings,
session=self.tlsSession)
elif self.username and self.sharedKey:
tlsConnection.handshakeClientSharedKey(username=self.username,
sharedKey=self.sharedKey,
settings=self.settings)
else:
tlsConnection.handshakeClientCert(certChain=self.certChain,
privateKey=self.privateKey,
checker=self.checker,
settings=self.settings,
session=self.tlsSession)
self.tlsSession = tlsConnection.session
|
apache-2.0
|
Reimilia/pdb_sth
|
native_contact.py
|
1
|
1442
|
import numpy as np
maximum = 4.5
minimum = 3
def make_pair(x, y):
xx, yy = np.meshgrid(range(len(x)), range(len(y)))
return np.c_[xx.ravel(), yy.ravel()]
def compute_distance(x, y, pairs):
return np.array([np.sqrt(np.sum(np.power(x[i] - y[j], 2))) for (i, j) in pairs])
def compute_native_contact(x, y, pairs, maximum, minimum):
distance = compute_distance(x, y, pairs)
distance[distance > maximum] = minimum
Numerator = np.sum(distance > minimum)
return Numerator * 1.0 / len(pairs)
def native_contact(receptor, native, ligands):
'''
input:
receptor :
numpy array in shape of (n_r,3) , n_r is the number of atoms
native :
numpy array in shape of (n_n,3) , n_n is the number of atoms
ligands:
numpy array in shape of (m,n_l,3) , m is the number of ligands , n_l is the number of atoms
output:
native_contact_value :
numpy array in shape of (m)
'''
pairs = make_pair(receptor, native)
pairs_distance = compute_distance(receptor, native, pairs)
pairs_distance[pairs_distance > maximum] = minimum
native_contact_pair = pairs[pairs_distance > minimum]
native_contacts = np.array(
[compute_native_contact(receptor, ligand, native_contact_pair, maximum, minimum) for ligand in ligands])
print native_contacts
return native_contacts
|
mit
|
samanehsan/osf.io
|
api_tests/comments/views/test_comment_detail.py
|
5
|
22955
|
from urlparse import urlparse
from nose.tools import * # flake8: noqa
from api.base.settings.defaults import API_BASE
from tests.base import ApiTestCase
from tests.factories import ProjectFactory, AuthUserFactory, CommentFactory, RegistrationFactory
class TestCommentDetailView(ApiTestCase):
def setUp(self):
super(TestCommentDetailView, self).setUp()
self.user = AuthUserFactory()
self.contributor = AuthUserFactory()
self.non_contributor = AuthUserFactory()
def _set_up_private_project_with_comment(self):
self.private_project = ProjectFactory.build(is_public=False, creator=self.user)
self.private_project.add_contributor(self.contributor, save=True)
self.comment = CommentFactory(node=self.private_project, target=self.private_project, user=self.user)
self.private_url = '/{}comments/{}/'.format(API_BASE, self.comment._id)
self.payload = {
'data': {
'id': self.comment._id,
'type': 'comments',
'attributes': {
'content': 'Updating this comment',
'deleted': False
}
}
}
def _set_up_public_project_with_comment(self):
self.public_project = ProjectFactory.build(is_public=True, creator=self.user)
self.public_project.add_contributor(self.contributor, save=True)
self.public_comment = CommentFactory(node=self.public_project, target=self.public_project, user=self.user)
self.public_url = '/{}comments/{}/'.format(API_BASE, self.public_comment._id)
self.public_comment_payload = {
'data': {
'id': self.public_comment._id,
'type': 'comments',
'attributes': {
'content': 'Updating this comment',
'deleted': False
}
}
}
def _set_up_registration_with_comment(self):
self.registration = RegistrationFactory(creator=self.user)
self.registration_comment = CommentFactory(node=self.registration, user=self.user)
self.registration_url = '/{}comments/{}/'.format(API_BASE, self.registration_comment._id)
def test_private_node_logged_in_contributor_can_view_comment(self):
self._set_up_private_project_with_comment()
res = self.app.get(self.private_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(self.comment._id, res.json['data']['id'])
assert_equal(self.comment.content, res.json['data']['attributes']['content'])
def test_private_node_logged_in_non_contributor_cannot_view_comment(self):
self._set_up_private_project_with_comment()
res = self.app.get(self.private_url, auth=self.non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_private_node_logged_out_user_cannot_view_comment(self):
self._set_up_private_project_with_comment()
res = self.app.get(self.private_url, expect_errors=True)
assert_equal(res.status_code, 401)
def test_public_node_logged_in_contributor_can_view_comment(self):
self._set_up_public_project_with_comment()
res = self.app.get(self.public_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(self.public_comment._id, res.json['data']['id'])
assert_equal(self.public_comment.content, res.json['data']['attributes']['content'])
def test_public_node_logged_in_non_contributor_can_view_comment(self):
self._set_up_public_project_with_comment()
res = self.app.get(self.public_url, auth=self.non_contributor.auth)
assert_equal(res.status_code, 200)
assert_equal(self.public_comment._id, res.json['data']['id'])
assert_equal(self.public_comment.content, res.json['data']['attributes']['content'])
def test_public_node_logged_out_user_can_view_comment(self):
self._set_up_public_project_with_comment()
res = self.app.get(self.public_url)
assert_equal(res.status_code, 200)
assert_equal(self.public_comment._id, res.json['data']['id'])
assert_equal(self.public_comment.content, res.json['data']['attributes']['content'])
def test_registration_logged_in_contributor_can_view_comment(self):
self._set_up_registration_with_comment()
res = self.app.get(self.registration_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(self.registration_comment._id, res.json['data']['id'])
assert_equal(self.registration_comment.content, res.json['data']['attributes']['content'])
def test_comment_has_user_link(self):
self._set_up_public_project_with_comment()
res = self.app.get(self.public_url)
url = res.json['data']['relationships']['user']['links']['related']['href']
expected_url = '/{}users/{}/'.format(API_BASE, self.user._id)
assert_equal(res.status_code, 200)
assert_equal(urlparse(url).path, expected_url)
def test_comment_has_node_link(self):
self._set_up_public_project_with_comment()
res = self.app.get(self.public_url)
url = res.json['data']['relationships']['node']['links']['related']['href']
expected_url = '/{}nodes/{}/'.format(API_BASE, self.public_project._id)
assert_equal(res.status_code, 200)
assert_equal(urlparse(url).path, expected_url)
def test_comment_has_target_link_with_correct_type(self):
self._set_up_public_project_with_comment()
res = self.app.get(self.public_url)
url = res.json['data']['relationships']['target']['links']['related']['href']
expected_url = '/{}nodes/{}/'.format(API_BASE, self.public_project._id)
target_type = res.json['data']['relationships']['target']['links']['related']['meta']['type']
expected_type = 'node'
assert_equal(res.status_code, 200)
assert_equal(urlparse(url).path, expected_url)
assert_equal(target_type, expected_type)
def test_comment_has_replies_link(self):
self._set_up_public_project_with_comment()
res = self.app.get(self.public_url)
url = res.json['data']['relationships']['replies']['links']['self']['href']
expected_url = '/{}comments/{}/replies/'.format(API_BASE, self.public_comment)
assert_equal(res.status_code, 200)
assert_equal(urlparse(url).path, expected_url)
def test_comment_has_reports_link(self):
self._set_up_public_project_with_comment()
res = self.app.get(self.public_url)
url = res.json['data']['relationships']['reports']['links']['related']['href']
expected_url = '/{}comments/{}/reports/'.format(API_BASE, self.public_comment)
assert_equal(res.status_code, 200)
assert_equal(urlparse(url).path, expected_url)
def test_private_node_only_logged_in_contributor_commenter_can_update_comment(self):
self._set_up_private_project_with_comment()
res = self.app.put_json_api(self.private_url, self.payload, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(self.payload['data']['attributes']['content'], res.json['data']['attributes']['content'])
def test_private_node_logged_in_non_contributor_cannot_update_comment(self):
self._set_up_private_project_with_comment()
res = self.app.put_json_api(self.private_url, self.payload, auth=self.non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_private_node_logged_out_user_cannot_update_comment(self):
self._set_up_private_project_with_comment()
res = self.app.put_json_api(self.private_url, self.payload, expect_errors=True)
assert_equal(res.status_code, 401)
def test_public_node_only_contributor_commenter_can_update_comment(self):
self._set_up_public_project_with_comment()
res = self.app.put_json_api(self.public_url, self.public_comment_payload, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(self.public_comment_payload['data']['attributes']['content'], res.json['data']['attributes']['content'])
def test_public_node_contributor_cannot_update_other_users_comment(self):
self._set_up_public_project_with_comment()
res = self.app.put_json_api(self.public_url, self.public_comment_payload, auth=self.contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_public_node_non_contributor_cannot_update_other_users_comment(self):
self._set_up_public_project_with_comment()
res = self.app.put_json_api(self.public_url, self.public_comment_payload, auth=self.non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_public_node_logged_out_user_cannot_update_comment(self):
self._set_up_public_project_with_comment()
res = self.app.put_json_api(self.public_url, self.public_comment_payload, expect_errors=True)
assert_equal(res.status_code, 401)
def test_public_node_non_contributor_commenter_can_update_comment(self):
project = ProjectFactory(is_public=True, comment_level='public')
comment = CommentFactory(node=project, target=project, user=self.non_contributor)
url = '/{}comments/{}/'.format(API_BASE, comment._id)
payload = {
'data': {
'id': comment._id,
'type': 'comments',
'attributes': {
'content': 'Updating this comment',
'deleted': False
}
}
}
res = self.app.put_json_api(url, payload, auth=self.non_contributor.auth)
assert_equal(res.status_code, 200)
assert_equal(payload['data']['attributes']['content'], res.json['data']['attributes']['content'])
def test_private_node_only_logged_in_contributor_commenter_can_delete_comment(self):
self._set_up_private_project_with_comment()
comment = CommentFactory(node=self.private_project, target=self.private_project, user=self.user)
url = '/{}comments/{}/'.format(API_BASE, comment._id)
payload = {
'data': {
'id': comment._id,
'type': 'comments',
'attributes': {
'deleted': True
}
}
}
res = self.app.patch_json_api(url, payload, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_true(res.json['data']['attributes']['deleted'])
assert_equal(res.json['data']['attributes']['content'], comment.content)
def test_private_node_contributor_cannot_delete_other_users_comment(self):
self._set_up_private_project_with_comment()
comment = CommentFactory(node=self.private_project, target=self.private_project, user=self.user)
url = '/{}comments/{}/'.format(API_BASE, comment._id)
payload = {
'data': {
'id': comment._id,
'type': 'comments',
'attributes': {
'deleted': True
}
}
}
res = self.app.patch_json_api(url, payload, auth=self.contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_private_node_non_contributor_cannot_delete_comment(self):
self._set_up_private_project_with_comment()
comment = CommentFactory(node=self.private_project, target=self.private_project, user=self.user)
url = '/{}comments/{}/'.format(API_BASE, comment._id)
payload = {
'data': {
'id': comment._id,
'type': 'comments',
'attributes': {
'deleted': True
}
}
}
res = self.app.patch_json_api(url, payload, auth=self.non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_private_node_logged_out_user_cannot_delete_comment(self):
self._set_up_private_project_with_comment()
comment = CommentFactory(node=self.private_project, target=self.private_project, user=self.user)
url = '/{}comments/{}/'.format(API_BASE, comment._id)
payload = {
'data': {
'id': comment._id,
'type': 'comments',
'attributes': {
'deleted': True
}
}
}
res = self.app.patch_json_api(url, payload, expect_errors=True)
assert_equal(res.status_code, 401)
def test_private_node_only_logged_in_contributor_commenter_can_undelete_comment(self):
self._set_up_private_project_with_comment()
comment = CommentFactory.build(node=self.private_project, target=self.private_project, user=self.user)
comment.is_deleted = True
comment.save()
url = '/{}comments/{}/'.format(API_BASE, comment._id)
payload = {
'data': {
'id': comment._id,
'type': 'comments',
'attributes': {
'deleted': False
}
}
}
res = self.app.patch_json_api(url, payload, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_false(res.json['data']['attributes']['deleted'])
assert_equal(res.json['data']['attributes']['content'], comment.content)
def test_private_node_contributor_cannot_undelete_other_users_comment(self):
self._set_up_private_project_with_comment()
comment = CommentFactory.build(node=self.private_project, target=self.private_project, user=self.user)
comment.is_deleted = True
comment.save()
url = '/{}comments/{}/'.format(API_BASE, comment._id)
payload = {
'data': {
'id': comment._id,
'type': 'comments',
'attributes': {
'deleted': False
}
}
}
res = self.app.patch_json_api(url, payload, auth=self.contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_private_node_non_contributor_cannot_undelete_comment(self):
self._set_up_private_project_with_comment()
comment = CommentFactory.build(node=self.private_project, target=self.private_project, user=self.user)
comment.is_deleted = True
comment.save()
url = '/{}comments/{}/'.format(API_BASE, comment._id)
payload = {
'data': {
'id': comment._id,
'type': 'comments',
'attributes': {
'deleted': False
}
}
}
res = self.app.patch_json_api(url, payload, auth=self.non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_private_node_logged_out_user_cannot_undelete_comment(self):
self._set_up_private_project_with_comment()
comment = CommentFactory.build(node=self.private_project, target=self.private_project, user=self.user)
comment.is_deleted = True
comment.save()
url = '/{}comments/{}/'.format(API_BASE, comment._id)
payload = {
'data': {
'id': comment._id,
'type': 'comments',
'attributes': {
'deleted': False
}
}
}
res = self.app.patch_json_api(url, payload, expect_errors=True)
assert_equal(res.status_code, 401)
def test_public_node_only_logged_in_contributor_commenter_can_delete_comment(self):
public_project = ProjectFactory(is_public=True, creator=self.user)
comment = CommentFactory(node=public_project, target=public_project, user=self.user)
url = '/{}comments/{}/'.format(API_BASE, comment._id)
payload = {
'data': {
'id': comment._id,
'type': 'comments',
'attributes': {
'deleted': True
}
}
}
res = self.app.patch_json_api(url, payload, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_true(res.json['data']['attributes']['deleted'])
assert_equal(res.json['data']['attributes']['content'], comment.content)
def test_public_node_contributor_cannot_delete_other_users_comment(self):
public_project = ProjectFactory(is_public=True, creator=self.user)
comment = CommentFactory(node=public_project, target=public_project, user=self.user)
url = '/{}comments/{}/'.format(API_BASE, comment._id)
payload = {
'data': {
'id': comment._id,
'type': 'comments',
'attributes': {
'deleted': True
}
}
}
res = self.app.patch_json_api(url, payload, auth=self.contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_public_node_non_contributor_cannot_delete_other_users_comment(self):
public_project = ProjectFactory(is_public=True, creator=self.user)
comment = CommentFactory(node=public_project, target=public_project, user=self.user)
url = '/{}comments/{}/'.format(API_BASE, comment._id)
payload = {
'data': {
'id': comment._id,
'type': 'comments',
'attributes': {
'deleted': True
}
}
}
res = self.app.patch_json_api(url, payload, auth=self.non_contributor.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_public_node_logged_out_user_cannot_delete_comment(self):
public_project = ProjectFactory(is_public=True, creator=self.user)
comment = CommentFactory(node=public_project, target=public_project, user=self.user)
url = '/{}comments/{}/'.format(API_BASE, comment._id)
payload = {
'data': {
'id': comment._id,
'type': 'comments',
'attributes': {
'deleted': True
}
}
}
res = self.app.patch_json_api(url, payload, expect_errors=True)
assert_equal(res.status_code, 401)
def test_public_node_non_contributor_commenter_can_delete_comment(self):
project = ProjectFactory(is_public=True, comment_level='public')
comment = CommentFactory(node=project, target=project, user=self.non_contributor)
url = '/{}comments/{}/'.format(API_BASE, comment._id)
payload = {
'data': {
'id': comment._id,
'type': 'comments',
'attributes': {
'deleted': True
}
}
}
res = self.app.patch_json_api(url, payload, auth=self.non_contributor.auth)
assert_equal(res.status_code, 200)
assert_true(res.json['data']['attributes']['deleted'])
assert_equal(res.json['data']['attributes']['content'], comment.content)
def test_private_node_only_logged_in_commenter_can_view_deleted_comment(self):
self._set_up_private_project_with_comment()
comment = CommentFactory(node=self.private_project, target=self.private_project, user=self.user)
comment.is_deleted = True
comment.save()
url = '/{}comments/{}/'.format(API_BASE, comment._id)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['content'], comment.content)
def test_private_node_contributor_cannot_see_other_users_deleted_comment(self):
self._set_up_private_project_with_comment()
comment = CommentFactory(node=self.private_project, target=self.private_project, user=self.user)
comment.is_deleted = True
comment.save()
url = '/{}comments/{}/'.format(API_BASE, comment._id)
res = self.app.get(url, auth=self.contributor.auth)
assert_equal(res.status_code, 200)
assert_is_none(res.json['data']['attributes']['content'])
def test_private_node_logged_out_user_cannot_see_deleted_comment(self):
self._set_up_private_project_with_comment()
comment = CommentFactory(node=self.private_project, target=self.private_project, user=self.user)
comment.is_deleted = True
comment.save()
url = '/{}comments/{}/'.format(API_BASE, comment._id)
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 401)
def test_public_node_only_logged_in_commenter_can_view_deleted_comment(self):
public_project = ProjectFactory(is_public=True, creator=self.user)
comment = CommentFactory(node=public_project, target=public_project, user=self.user)
comment.is_deleted = True
comment.save()
url = '/{}comments/{}/'.format(API_BASE, comment._id)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.json['data']['attributes']['content'], comment.content)
def test_public_node_contributor_cannot_view_other_users_deleted_comment(self):
public_project = ProjectFactory(is_public=True, creator=self.user)
comment = CommentFactory(node=public_project, target=public_project, user=self.user)
comment.is_deleted = True
comment.save()
url = '/{}comments/{}/'.format(API_BASE, comment._id)
res = self.app.get(url, auth=self.contributor.auth)
assert_equal(res.status_code, 200)
assert_is_none(res.json['data']['attributes']['content'])
def test_public_node_non_contributor_cannot_view_other_users_deleted_comment(self):
public_project = ProjectFactory(is_public=True, creator=self.user)
comment = CommentFactory(node=public_project, target=public_project, user=self.user)
comment.is_deleted = True
comment.save()
url = '/{}comments/{}/'.format(API_BASE, comment._id)
res = self.app.get(url, auth=self.non_contributor.auth)
assert_equal(res.status_code, 200)
assert_is_none(res.json['data']['attributes']['content'])
def test_public_node_logged_out_user_cannot_view_deleted_comments(self):
public_project = ProjectFactory(is_public=True, creator=self.user)
comment = CommentFactory(node=public_project, target=public_project, user=self.user)
comment.is_deleted = True
comment.save()
url = '/{}comments/{}/'.format(API_BASE, comment._id)
res = self.app.get(url)
assert_equal(res.status_code, 200)
assert_is_none(res.json['data']['attributes']['content'])
|
apache-2.0
|
HyperBaton/ansible
|
test/units/modules/network/f5/test_bigip_sys_global.py
|
22
|
4116
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_sys_global import ApiParameters
from library.modules.bigip_sys_global import ModuleParameters
from library.modules.bigip_sys_global import ModuleManager
from library.modules.bigip_sys_global import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_sys_global import ApiParameters
from ansible.modules.network.f5.bigip_sys_global import ModuleParameters
from ansible.modules.network.f5.bigip_sys_global import ModuleManager
from ansible.modules.network.f5.bigip_sys_global import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
banner_text='this is a banner',
console_timeout=100,
gui_setup='yes',
lcd_display='yes',
mgmt_dhcp='yes',
net_reboot='yes',
quiet_boot='yes',
security_banner='yes',
)
p = ModuleParameters(params=args)
assert p.banner_text == 'this is a banner'
assert p.console_timeout == 100
assert p.gui_setup == 'yes'
assert p.lcd_display == 'yes'
assert p.mgmt_dhcp == 'yes'
assert p.net_reboot == 'yes'
assert p.quiet_boot == 'yes'
assert p.security_banner == 'yes'
def test_api_parameters(self):
args = load_fixture('load_sys_global_settings.json')
p = ApiParameters(params=args)
assert 'Welcome to the BIG-IP Configuration Utility' in p.banner_text
assert p.console_timeout == 0
assert p.gui_setup == 'no'
assert p.lcd_display == 'yes'
assert p.mgmt_dhcp == 'yes'
assert p.net_reboot == 'no'
assert p.quiet_boot == 'yes'
assert p.security_banner == 'yes'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_update(self, *args):
set_module_args(dict(
banner_text='this is a banner',
console_timeout=100,
state='present',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
# Configure the parameters that would be returned by querying the
# remote device
current = ApiParameters(params=load_fixture('load_sys_global_settings.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(return_value=False)
mm.read_current_from_device = Mock(return_value=current)
mm.update_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
|
gpl-3.0
|
tudorvio/tempest
|
tempest/cmd/cleanup_service.py
|
7
|
30333
|
#!/usr/bin/env python
# Copyright 2015 Dell Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from tempest import clients
from tempest import config
from tempest import test
LOG = logging.getLogger(__name__)
CONF = config.CONF
CONF_FLAVORS = None
CONF_IMAGES = None
CONF_NETWORKS = []
CONF_PRIV_NETWORK_NAME = None
CONF_PUB_NETWORK = None
CONF_PUB_ROUTER = None
CONF_TENANTS = None
CONF_USERS = None
IS_CEILOMETER = None
IS_CINDER = None
IS_GLANCE = None
IS_HEAT = None
IS_NEUTRON = None
IS_NOVA = None
def init_conf():
global CONF_FLAVORS
global CONF_IMAGES
global CONF_NETWORKS
global CONF_PRIV_NETWORK
global CONF_PRIV_NETWORK_NAME
global CONF_PUB_NETWORK
global CONF_PUB_ROUTER
global CONF_TENANTS
global CONF_USERS
global IS_CEILOMETER
global IS_CINDER
global IS_GLANCE
global IS_HEAT
global IS_NEUTRON
global IS_NOVA
IS_CEILOMETER = CONF.service_available.ceilometer
IS_CINDER = CONF.service_available.cinder
IS_GLANCE = CONF.service_available.glance
IS_HEAT = CONF.service_available.heat
IS_NEUTRON = CONF.service_available.neutron
IS_NOVA = CONF.service_available.nova
CONF_FLAVORS = [CONF.compute.flavor_ref, CONF.compute.flavor_ref_alt]
CONF_IMAGES = [CONF.compute.image_ref, CONF.compute.image_ref_alt]
CONF_PRIV_NETWORK_NAME = CONF.compute.fixed_network_name
CONF_PUB_NETWORK = CONF.network.public_network_id
CONF_PUB_ROUTER = CONF.network.public_router_id
CONF_TENANTS = [CONF.identity.admin_tenant_name,
CONF.identity.tenant_name,
CONF.identity.alt_tenant_name]
CONF_USERS = [CONF.identity.admin_username, CONF.identity.username,
CONF.identity.alt_username]
if IS_NEUTRON:
CONF_PRIV_NETWORK = _get_network_id(CONF.compute.fixed_network_name,
CONF.identity.tenant_name)
CONF_NETWORKS = [CONF_PUB_NETWORK, CONF_PRIV_NETWORK]
def _get_network_id(net_name, tenant_name):
am = clients.AdminManager()
net_cl = am.network_client
id_cl = am.identity_client
networks = net_cl.list_networks()
tenant = id_cl.get_tenant_by_name(tenant_name)
t_id = tenant['id']
n_id = None
for net in networks['networks']:
if (net['tenant_id'] == t_id and net['name'] == net_name):
n_id = net['id']
break
return n_id
class BaseService(object):
def __init__(self, kwargs):
self.client = None
for key, value in kwargs.items():
setattr(self, key, value)
self.tenant_filter = {}
if hasattr(self, 'tenant_id'):
self.tenant_filter['tenant_id'] = self.tenant_id
def _filter_by_tenant_id(self, item_list):
if (item_list is None
or len(item_list) == 0
or not hasattr(self, 'tenant_id')
or self.tenant_id is None
or 'tenant_id' not in item_list[0]):
return item_list
return [item for item in item_list
if item['tenant_id'] == self.tenant_id]
def list(self):
pass
def delete(self):
pass
def dry_run(self):
pass
def save_state(self):
pass
def run(self):
if self.is_dry_run:
self.dry_run()
elif self.is_save_state:
self.save_state()
else:
self.delete()
class SnapshotService(BaseService):
def __init__(self, manager, **kwargs):
super(SnapshotService, self).__init__(kwargs)
self.client = manager.snapshots_client
def list(self):
client = self.client
snaps = client.list_snapshots()
LOG.debug("List count, %s Snapshots" % len(snaps))
return snaps
def delete(self):
snaps = self.list()
client = self.client
for snap in snaps:
try:
client.delete_snapshot(snap['id'])
except Exception:
LOG.exception("Delete Snapshot exception.")
def dry_run(self):
snaps = self.list()
self.data['snapshots'] = snaps
class ServerService(BaseService):
def __init__(self, manager, **kwargs):
super(ServerService, self).__init__(kwargs)
self.client = manager.servers_client
def list(self):
client = self.client
servers_body = client.list_servers()
servers = servers_body['servers']
LOG.debug("List count, %s Servers" % len(servers))
return servers
def delete(self):
client = self.client
servers = self.list()
for server in servers:
try:
client.delete_server(server['id'])
except Exception:
LOG.exception("Delete Server exception.")
def dry_run(self):
servers = self.list()
self.data['servers'] = servers
class ServerGroupService(ServerService):
def list(self):
client = self.client
sgs = client.list_server_groups()
LOG.debug("List count, %s Server Groups" % len(sgs))
return sgs
def delete(self):
client = self.client
sgs = self.list()
for sg in sgs:
try:
client.delete_server_group(sg['id'])
except Exception:
LOG.exception("Delete Server Group exception.")
def dry_run(self):
sgs = self.list()
self.data['server_groups'] = sgs
class StackService(BaseService):
def __init__(self, manager, **kwargs):
super(StackService, self).__init__(kwargs)
self.client = manager.orchestration_client
def list(self):
client = self.client
stacks = client.list_stacks()
LOG.debug("List count, %s Stacks" % len(stacks))
return stacks
def delete(self):
client = self.client
stacks = self.list()
for stack in stacks:
try:
client.delete_stack(stack['id'])
except Exception:
LOG.exception("Delete Stack exception.")
def dry_run(self):
stacks = self.list()
self.data['stacks'] = stacks
class KeyPairService(BaseService):
def __init__(self, manager, **kwargs):
super(KeyPairService, self).__init__(kwargs)
self.client = manager.keypairs_client
def list(self):
client = self.client
keypairs = client.list_keypairs()
LOG.debug("List count, %s Keypairs" % len(keypairs))
return keypairs
def delete(self):
client = self.client
keypairs = self.list()
for k in keypairs:
try:
name = k['keypair']['name']
client.delete_keypair(name)
except Exception:
LOG.exception("Delete Keypairs exception.")
def dry_run(self):
keypairs = self.list()
self.data['keypairs'] = keypairs
class SecurityGroupService(BaseService):
def __init__(self, manager, **kwargs):
super(SecurityGroupService, self).__init__(kwargs)
self.client = manager.security_groups_client
def list(self):
client = self.client
secgrps = client.list_security_groups()
secgrp_del = [grp for grp in secgrps if grp['name'] != 'default']
LOG.debug("List count, %s Security Groups" % len(secgrp_del))
return secgrp_del
def delete(self):
client = self.client
secgrp_del = self.list()
for g in secgrp_del:
try:
client.delete_security_group(g['id'])
except Exception:
LOG.exception("Delete Security Groups exception.")
def dry_run(self):
secgrp_del = self.list()
self.data['security_groups'] = secgrp_del
class FloatingIpService(BaseService):
def __init__(self, manager, **kwargs):
super(FloatingIpService, self).__init__(kwargs)
self.client = manager.floating_ips_client
def list(self):
client = self.client
floating_ips = client.list_floating_ips()
LOG.debug("List count, %s Floating IPs" % len(floating_ips))
return floating_ips
def delete(self):
client = self.client
floating_ips = self.list()
for f in floating_ips:
try:
client.delete_floating_ip(f['id'])
except Exception:
LOG.exception("Delete Floating IPs exception.")
def dry_run(self):
floating_ips = self.list()
self.data['floating_ips'] = floating_ips
class VolumeService(BaseService):
def __init__(self, manager, **kwargs):
super(VolumeService, self).__init__(kwargs)
self.client = manager.volumes_client
def list(self):
client = self.client
vols = client.list_volumes()
LOG.debug("List count, %s Volumes" % len(vols))
return vols
def delete(self):
client = self.client
vols = self.list()
for v in vols:
try:
client.delete_volume(v['id'])
except Exception:
LOG.exception("Delete Volume exception.")
def dry_run(self):
vols = self.list()
self.data['volumes'] = vols
class VolumeQuotaService(BaseService):
def __init__(self, manager, **kwargs):
super(VolumeQuotaService, self).__init__(kwargs)
self.client = manager.volume_quotas_client
def delete(self):
client = self.client
try:
client.delete_quota_set(self.tenant_id)
except Exception:
LOG.exception("Delete Volume Quotas exception.")
def dry_run(self):
quotas = self.client.show_quota_usage(self.tenant_id)
self.data['volume_quotas'] = quotas
class NovaQuotaService(BaseService):
def __init__(self, manager, **kwargs):
super(NovaQuotaService, self).__init__(kwargs)
self.client = manager.quotas_client
self.limits_client = manager.limits_client
def delete(self):
client = self.client
try:
client.delete_quota_set(self.tenant_id)
except Exception:
LOG.exception("Delete Quotas exception.")
def dry_run(self):
client = self.limits_client
quotas = client.show_limits()
self.data['compute_quotas'] = quotas['absolute']
# Begin network service classes
class NetworkService(BaseService):
def __init__(self, manager, **kwargs):
super(NetworkService, self).__init__(kwargs)
self.client = manager.network_client
def _filter_by_conf_networks(self, item_list):
if not item_list or not all(('network_id' in i for i in item_list)):
return item_list
return [item for item in item_list if item['network_id']
not in CONF_NETWORKS]
def list(self):
client = self.client
networks = client.list_networks(**self.tenant_filter)
networks = networks['networks']
# filter out networks declared in tempest.conf
if self.is_preserve:
networks = [network for network in networks
if network['id'] not in CONF_NETWORKS]
LOG.debug("List count, %s Networks" % networks)
return networks
def delete(self):
client = self.client
networks = self.list()
for n in networks:
try:
client.delete_network(n['id'])
except Exception:
LOG.exception("Delete Network exception.")
def dry_run(self):
networks = self.list()
self.data['networks'] = networks
class NetworkFloatingIpService(NetworkService):
def list(self):
client = self.client
flips = client.list_floatingips(**self.tenant_filter)
flips = flips['floatingips']
LOG.debug("List count, %s Network Floating IPs" % len(flips))
return flips
def delete(self):
client = self.client
flips = self.list()
for flip in flips:
try:
client.delete_floatingip(flip['id'])
except Exception:
LOG.exception("Delete Network Floating IP exception.")
def dry_run(self):
flips = self.list()
self.data['floating_ips'] = flips
class NetworkRouterService(NetworkService):
def list(self):
client = self.client
routers = client.list_routers(**self.tenant_filter)
routers = routers['routers']
if self.is_preserve:
routers = [router for router in routers
if router['id'] != CONF_PUB_ROUTER]
LOG.debug("List count, %s Routers" % len(routers))
return routers
def delete(self):
client = self.client
routers = self.list()
for router in routers:
try:
rid = router['id']
ports = [port for port
in client.list_router_interfaces(rid)['ports']
if port["device_owner"] == "network:router_interface"]
for port in ports:
client.remove_router_interface_with_port_id(rid,
port['id'])
client.delete_router(rid)
except Exception:
LOG.exception("Delete Router exception.")
def dry_run(self):
routers = self.list()
self.data['routers'] = routers
class NetworkHealthMonitorService(NetworkService):
def list(self):
client = self.client
hms = client.list_health_monitors()
hms = hms['health_monitors']
hms = self._filter_by_tenant_id(hms)
LOG.debug("List count, %s Health Monitors" % len(hms))
return hms
def delete(self):
client = self.client
hms = self.list()
for hm in hms:
try:
client.delete_health_monitor(hm['id'])
except Exception:
LOG.exception("Delete Health Monitor exception.")
def dry_run(self):
hms = self.list()
self.data['health_monitors'] = hms
class NetworkMemberService(NetworkService):
def list(self):
client = self.client
members = client.list_members()
members = members['members']
members = self._filter_by_tenant_id(members)
LOG.debug("List count, %s Members" % len(members))
return members
def delete(self):
client = self.client
members = self.list()
for member in members:
try:
client.delete_member(member['id'])
except Exception:
LOG.exception("Delete Member exception.")
def dry_run(self):
members = self.list()
self.data['members'] = members
class NetworkVipService(NetworkService):
def list(self):
client = self.client
vips = client.list_vips()
vips = vips['vips']
vips = self._filter_by_tenant_id(vips)
LOG.debug("List count, %s VIPs" % len(vips))
return vips
def delete(self):
client = self.client
vips = self.list()
for vip in vips:
try:
client.delete_vip(vip['id'])
except Exception:
LOG.exception("Delete VIP exception.")
def dry_run(self):
vips = self.list()
self.data['vips'] = vips
class NetworkPoolService(NetworkService):
def list(self):
client = self.client
pools = client.list_pools()
pools = pools['pools']
pools = self._filter_by_tenant_id(pools)
LOG.debug("List count, %s Pools" % len(pools))
return pools
def delete(self):
client = self.client
pools = self.list()
for pool in pools:
try:
client.delete_pool(pool['id'])
except Exception:
LOG.exception("Delete Pool exception.")
def dry_run(self):
pools = self.list()
self.data['pools'] = pools
class NetworkMeteringLabelRuleService(NetworkService):
def list(self):
client = self.client
rules = client.list_metering_label_rules()
rules = rules['metering_label_rules']
rules = self._filter_by_tenant_id(rules)
LOG.debug("List count, %s Metering Label Rules" % len(rules))
return rules
def delete(self):
client = self.client
rules = self.list()
for rule in rules:
try:
client.delete_metering_label_rule(rule['id'])
except Exception:
LOG.exception("Delete Metering Label Rule exception.")
def dry_run(self):
rules = self.list()
self.data['rules'] = rules
class NetworkMeteringLabelService(NetworkService):
def list(self):
client = self.client
labels = client.list_metering_labels()
labels = labels['metering_labels']
labels = self._filter_by_tenant_id(labels)
LOG.debug("List count, %s Metering Labels" % len(labels))
return labels
def delete(self):
client = self.client
labels = self.list()
for label in labels:
try:
client.delete_metering_label(label['id'])
except Exception:
LOG.exception("Delete Metering Label exception.")
def dry_run(self):
labels = self.list()
self.data['labels'] = labels
class NetworkPortService(NetworkService):
def list(self):
client = self.client
ports = [port for port in
client.list_ports(**self.tenant_filter)['ports']
if port["device_owner"] == "" or
port["device_owner"].startswith("compute:")]
if self.is_preserve:
ports = self._filter_by_conf_networks(ports)
LOG.debug("List count, %s Ports" % len(ports))
return ports
def delete(self):
client = self.client
ports = self.list()
for port in ports:
try:
client.delete_port(port['id'])
except Exception:
LOG.exception("Delete Port exception.")
def dry_run(self):
ports = self.list()
self.data['ports'] = ports
class NetworkSecGroupService(NetworkService):
def list(self):
client = self.client
filter = self.tenant_filter
# cannot delete default sec group so never show it.
secgroups = [secgroup for secgroup in
client.list_security_groups(**filter)['security_groups']
if secgroup['name'] != 'default']
if self.is_preserve:
secgroups = self._filter_by_conf_networks(secgroups)
LOG.debug("List count, %s securtiy_groups" % len(secgroups))
return secgroups
def delete(self):
client = self.client
secgroups = self.list()
for secgroup in secgroups:
try:
client.delete_secgroup(secgroup['id'])
except Exception:
LOG.exception("Delete security_group exception.")
def dry_run(self):
secgroups = self.list()
self.data['secgroups'] = secgroups
class NetworkSubnetService(NetworkService):
def list(self):
client = self.client
subnets = client.list_subnets(**self.tenant_filter)
subnets = subnets['subnets']
if self.is_preserve:
subnets = self._filter_by_conf_networks(subnets)
LOG.debug("List count, %s Subnets" % len(subnets))
return subnets
def delete(self):
client = self.client
subnets = self.list()
for subnet in subnets:
try:
client.delete_subnet(subnet['id'])
except Exception:
LOG.exception("Delete Subnet exception.")
def dry_run(self):
subnets = self.list()
self.data['subnets'] = subnets
# Telemetry services
class TelemetryAlarmService(BaseService):
def __init__(self, manager, **kwargs):
super(TelemetryAlarmService, self).__init__(kwargs)
self.client = manager.telemetry_client
def list(self):
client = self.client
alarms = client.list_alarms()
LOG.debug("List count, %s Alarms" % len(alarms))
return alarms
def delete(self):
client = self.client
alarms = self.list()
for alarm in alarms:
try:
client.delete_alarm(alarm['id'])
except Exception:
LOG.exception("Delete Alarms exception.")
def dry_run(self):
alarms = self.list()
self.data['alarms'] = alarms
# begin global services
class FlavorService(BaseService):
def __init__(self, manager, **kwargs):
super(FlavorService, self).__init__(kwargs)
self.client = manager.flavors_client
def list(self):
client = self.client
flavors = client.list_flavors({"is_public": None})
if not self.is_save_state:
# recreate list removing saved flavors
flavors = [flavor for flavor in flavors if flavor['id']
not in self.saved_state_json['flavors'].keys()]
if self.is_preserve:
flavors = [flavor for flavor in flavors
if flavor['id'] not in CONF_FLAVORS]
LOG.debug("List count, %s Flavors after reconcile" % len(flavors))
return flavors
def delete(self):
client = self.client
flavors = self.list()
for flavor in flavors:
try:
client.delete_flavor(flavor['id'])
except Exception:
LOG.exception("Delete Flavor exception.")
def dry_run(self):
flavors = self.list()
self.data['flavors'] = flavors
def save_state(self):
flavors = self.list()
self.data['flavors'] = {}
for flavor in flavors:
self.data['flavors'][flavor['id']] = flavor['name']
class ImageService(BaseService):
def __init__(self, manager, **kwargs):
super(ImageService, self).__init__(kwargs)
self.client = manager.images_client
def list(self):
client = self.client
images = client.list_images({"all_tenants": True})
if not self.is_save_state:
images = [image for image in images if image['id']
not in self.saved_state_json['images'].keys()]
if self.is_preserve:
images = [image for image in images
if image['id'] not in CONF_IMAGES]
LOG.debug("List count, %s Images after reconcile" % len(images))
return images
def delete(self):
client = self.client
images = self.list()
for image in images:
try:
client.delete_image(image['id'])
except Exception:
LOG.exception("Delete Image exception.")
def dry_run(self):
images = self.list()
self.data['images'] = images
def save_state(self):
self.data['images'] = {}
images = self.list()
for image in images:
self.data['images'][image['id']] = image['name']
class IdentityService(BaseService):
def __init__(self, manager, **kwargs):
super(IdentityService, self).__init__(kwargs)
self.client = manager.identity_client
class UserService(IdentityService):
def list(self):
client = self.client
users = client.get_users()
if not self.is_save_state:
users = [user for user in users if user['id']
not in self.saved_state_json['users'].keys()]
if self.is_preserve:
users = [user for user in users if user['name']
not in CONF_USERS]
elif not self.is_save_state: # Never delete admin user
users = [user for user in users if user['name'] !=
CONF.identity.admin_username]
LOG.debug("List count, %s Users after reconcile" % len(users))
return users
def delete(self):
client = self.client
users = self.list()
for user in users:
try:
client.delete_user(user['id'])
except Exception:
LOG.exception("Delete User exception.")
def dry_run(self):
users = self.list()
self.data['users'] = users
def save_state(self):
users = self.list()
self.data['users'] = {}
for user in users:
self.data['users'][user['id']] = user['name']
class RoleService(IdentityService):
def list(self):
client = self.client
try:
roles = client.list_roles()
# reconcile roles with saved state and never list admin role
if not self.is_save_state:
roles = [role for role in roles if
(role['id'] not in
self.saved_state_json['roles'].keys()
and role['name'] != CONF.identity.admin_role)]
LOG.debug("List count, %s Roles after reconcile" % len(roles))
return roles
except Exception:
LOG.exception("Cannot retrieve Roles.")
return []
def delete(self):
client = self.client
roles = self.list()
for role in roles:
try:
client.delete_role(role['id'])
except Exception:
LOG.exception("Delete Role exception.")
def dry_run(self):
roles = self.list()
self.data['roles'] = roles
def save_state(self):
roles = self.list()
self.data['roles'] = {}
for role in roles:
self.data['roles'][role['id']] = role['name']
class TenantService(IdentityService):
def list(self):
client = self.client
tenants = client.list_tenants()
if not self.is_save_state:
tenants = [tenant for tenant in tenants if (tenant['id']
not in self.saved_state_json['tenants'].keys()
and tenant['name'] != CONF.identity.admin_tenant_name)]
if self.is_preserve:
tenants = [tenant for tenant in tenants if tenant['name']
not in CONF_TENANTS]
LOG.debug("List count, %s Tenants after reconcile" % len(tenants))
return tenants
def delete(self):
client = self.client
tenants = self.list()
for tenant in tenants:
try:
client.delete_tenant(tenant['id'])
except Exception:
LOG.exception("Delete Tenant exception.")
def dry_run(self):
tenants = self.list()
self.data['tenants'] = tenants
def save_state(self):
tenants = self.list()
self.data['tenants'] = {}
for tenant in tenants:
self.data['tenants'][tenant['id']] = tenant['name']
class DomainService(BaseService):
def __init__(self, manager, **kwargs):
super(DomainService, self).__init__(kwargs)
self.client = manager.identity_v3_client
def list(self):
client = self.client
domains = client.list_domains()
if not self.is_save_state:
domains = [domain for domain in domains if domain['id']
not in self.saved_state_json['domains'].keys()]
LOG.debug("List count, %s Domains after reconcile" % len(domains))
return domains
def delete(self):
client = self.client
domains = self.list()
for domain in domains:
try:
client.update_domain(domain['id'], enabled=False)
client.delete_domain(domain['id'])
except Exception:
LOG.exception("Delete Domain exception.")
def dry_run(self):
domains = self.list()
self.data['domains'] = domains
def save_state(self):
domains = self.list()
self.data['domains'] = {}
for domain in domains:
self.data['domains'][domain['id']] = domain['name']
def get_tenant_cleanup_services():
tenant_services = []
if IS_CEILOMETER:
tenant_services.append(TelemetryAlarmService)
if IS_NOVA:
tenant_services.append(ServerService)
tenant_services.append(KeyPairService)
tenant_services.append(SecurityGroupService)
tenant_services.append(ServerGroupService)
if not IS_NEUTRON:
tenant_services.append(FloatingIpService)
tenant_services.append(NovaQuotaService)
if IS_HEAT:
tenant_services.append(StackService)
if IS_NEUTRON:
tenant_services.append(NetworkFloatingIpService)
if test.is_extension_enabled('metering', 'network'):
tenant_services.append(NetworkMeteringLabelRuleService)
tenant_services.append(NetworkMeteringLabelService)
tenant_services.append(NetworkRouterService)
tenant_services.append(NetworkPortService)
tenant_services.append(NetworkSubnetService)
tenant_services.append(NetworkService)
tenant_services.append(NetworkSecGroupService)
if IS_CINDER:
tenant_services.append(SnapshotService)
tenant_services.append(VolumeService)
tenant_services.append(VolumeQuotaService)
return tenant_services
def get_global_cleanup_services():
global_services = []
if IS_NOVA:
global_services.append(FlavorService)
if IS_GLANCE:
global_services.append(ImageService)
global_services.append(UserService)
global_services.append(TenantService)
global_services.append(DomainService)
global_services.append(RoleService)
return global_services
|
apache-2.0
|
crs4/ProMort
|
promort/shared_datasets_manager/migrations/0001_initial.py
|
2
|
2500
|
# Generated by Django 3.1.8 on 2021-05-09 16:06
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('slides_manager', '0019_auto_20210424_1321'),
]
operations = [
migrations.CreateModel(
name='SharedDataset',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('label', models.CharField(max_length=50, unique=True)),
('creation_date', models.DateTimeField(auto_now_add=True)),
('description', models.TextField(blank=True, default=None, null=True)),
('expiry_date', models.DateField(blank=True, default=None, null=True)),
('hidden', models.BooleanField(default=False)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='SharedDatasetItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dataset_index', models.IntegerField()),
('slides_set_a_label', models.CharField(max_length=50)),
('slides_set_b_label', models.CharField(blank=True, default=None, max_length=50, null=True)),
('creation_date', models.DateTimeField(auto_now_add=True)),
('notes', models.TextField(blank=True, default=None, null=True)),
('dataset', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='items', to='shared_datasets_manager.shareddataset')),
('slides_set_a', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='shared_dataset_item_a', to='slides_manager.slidesset')),
('slides_set_b', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='shared_dataset_item_b', to='slides_manager.slidesset')),
],
options={
'unique_together': {('dataset', 'slides_set_a_label'), ('slides_set_a', 'slides_set_b'), ('dataset', 'slides_set_b_label'), ('dataset', 'dataset_index')},
},
),
]
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.